Example #1
1
// LSBaseQuery builds the base query that both LSCount and LSStat share
func LSBaseQuery(now time.Time, indexRoot string, l LogstashElasticHosts, keystring string, filter, sduration, eduration string, size int) (*LogstashRequest, error) {
	start, err := opentsdb.ParseDuration(sduration)
	if err != nil {
		return nil, err
	}
	var end opentsdb.Duration
	if eduration != "" {
		end, err = opentsdb.ParseDuration(eduration)
		if err != nil {
			return nil, err
		}
	}
	st := now.Add(time.Duration(-start))
	en := now.Add(time.Duration(-end))
	r := LogstashRequest{
		IndexRoot: indexRoot,
		Start:     &st,
		End:       &en,
		Source:    elastic.NewSearchSource().Size(size),
	}
	tf := elastic.NewRangeFilter("@timestamp").Gte(st).Lte(en)
	filtered := elastic.NewFilteredQuery(tf)
	r.KeyMatches, err = ProcessLSKeys(keystring, filter, &filtered)
	if err != nil {
		return nil, err
	}
	r.Source = r.Source.Query(filtered)
	return &r, nil
}
Example #2
1
// Generates a report with the amount of time for each operation in the domain DAO. For
// more realistic values it does the same operation for the same amount of data X number
// of times to get the average time of the operation. After some results, with indexes we
// get 80% better performance, another good improvements was to create and remove many
// objects at once using go routines
func domainDAOPerformanceReport(reportFile string, domainDAO dao.DomainDAO) {
	// Report header
	report := " #       | Total            | Insert           | Find             | Remove\n" +
		"------------------------------------------------------------------------------------\n"

	// Report variables
	averageTurns := 5
	scale := []int{10, 50, 100, 500, 1000, 5000,
		10000, 50000, 100000, 500000, 1000000, 5000000}

	for _, numberOfItems := range scale {
		var totalDuration, insertDuration, queryDuration, removeDuration time.Duration

		for i := 0; i < averageTurns; i++ {
			utils.Println(fmt.Sprintf("Generating report - scale %d - turn %d", numberOfItems, i+1))
			totalDurationTmp, insertDurationTmp, queryDurationTmp, removeDurationTmp :=
				calculateDomainDAODurations(domainDAO, numberOfItems)

			totalDuration += totalDurationTmp
			insertDuration += insertDurationTmp
			queryDuration += queryDurationTmp
			removeDuration += removeDurationTmp
		}

		report += fmt.Sprintf("% -8d | % 16s | % 16s | % 16s | % 16s\n",
			numberOfItems,
			time.Duration(int64(totalDuration)/int64(averageTurns)).String(),
			time.Duration(int64(insertDuration)/int64(averageTurns)).String(),
			time.Duration(int64(queryDuration)/int64(averageTurns)).String(),
			time.Duration(int64(removeDuration)/int64(averageTurns)).String(),
		)
	}

	utils.WriteReport(reportFile, report)
}
Example #3
1
func ParseTimeInterval(source string) (time.Duration, error) {
	matches := intervalRegex.FindStringSubmatch(strings.ToLower(source))

	if matches == nil {
		return 0, errors.New("Invalid time interval " + source)
	}

	val, err := strconv.Atoi(matches[1])

	if err != nil {
		return 0, err
	}

	switch matches[2] {
	case "s":
		return time.Duration(val) * time.Second, nil

	case "m":
		return time.Duration(val) * time.Second * 60, nil

	case "h":
		return time.Duration(val) * time.Second * 60 * 60, nil

	case "d":
		return time.Duration(val) * time.Second * 60 * 60 * 24, nil

	case "w":
		return time.Duration(val) * time.Second * 60 * 60 * 24 * 7, nil

	default:
		return 0, errors.New("Invalid time interval " + source)
	}
}
Example #4
1
// setDefaults sets default values for any Worker fields that are
// uninitialized.
func (w *Worker) setDefaults() {
	if w.WorkerID == "" {
		// May as well use a UUID here, "it's what we've always done"
		w.WorkerID = uuid.NewV4().String()
	}

	if w.Concurrency == 0 {
		w.Concurrency = runtime.NumCPU()
	}

	if w.PollInterval == time.Duration(0) {
		w.PollInterval = time.Duration(1) * time.Second
	}

	if w.HeartbeatInterval == time.Duration(0) {
		w.HeartbeatInterval = time.Duration(15) * time.Second
	}

	if w.MaxAttempts == 0 {
		w.MaxAttempts = 100
	}

	if w.Clock == nil {
		w.Clock = clock.New()
	}
}
Example #5
1
// Ping sends a ping frame across the connection and
// returns the response time
func (s *Connection) Ping() (time.Duration, error) {
	pid := s.pingId
	s.pingIdLock.Lock()
	if s.pingId > 0x7ffffffe {
		s.pingId = s.pingId - 0x7ffffffe
	} else {
		s.pingId = s.pingId + 2
	}
	s.pingIdLock.Unlock()
	pingChan := make(chan error)
	s.pingChans[pid] = pingChan
	defer delete(s.pingChans, pid)

	frame := &spdy.PingFrame{Id: pid}
	startTime := time.Now()
	writeErr := s.framer.WriteFrame(frame)
	if writeErr != nil {
		return time.Duration(0), writeErr
	}
	select {
	case <-s.closeChan:
		return time.Duration(0), errors.New("connection closed")
	case err, ok := <-pingChan:
		if ok && err != nil {
			return time.Duration(0), err
		}
		break
	}
	return time.Now().Sub(startTime), nil
}
Example #6
1
func (d *UDPClient) Start(uri *url.URL) error {
	d.url = uri
	d.stop = make(chan struct{})

	params := uri.Query()
	// The address must not have a port, as otherwise both announce and lookup
	// sockets would try to bind to the same port.
	addr, err := net.ResolveUDPAddr(d.url.Scheme, params.Get("listenaddress")+":0")
	if err != nil {
		return err
	}
	d.listenAddress = addr

	broadcastSeconds, err := strconv.ParseUint(params.Get("broadcast"), 0, 0)
	if err != nil {
		d.globalBroadcastInterval = DefaultGlobalBroadcastInterval
	} else {
		d.globalBroadcastInterval = time.Duration(broadcastSeconds) * time.Second
	}

	retrySeconds, err := strconv.ParseUint(params.Get("retry"), 0, 0)
	if err != nil {
		d.errorRetryInterval = DefaultErrorRetryInternval
	} else {
		d.errorRetryInterval = time.Duration(retrySeconds) * time.Second
	}

	d.wg.Add(1)
	go d.broadcast()
	return nil
}
Example #7
0
// serve starts serving the provided http.Handler using security settings derived from the MasterConfig
func (c *MasterConfig) serve(handler http.Handler, extra []string) {
	timeout := c.Options.ServingInfo.RequestTimeoutSeconds
	if timeout == -1 {
		timeout = 0
	}

	server := &http.Server{
		Addr:           c.Options.ServingInfo.BindAddress,
		Handler:        handler,
		ReadTimeout:    time.Duration(timeout) * time.Second,
		WriteTimeout:   time.Duration(timeout) * time.Second,
		MaxHeaderBytes: 1 << 20,
	}

	go util.Forever(func() {
		for _, s := range extra {
			glog.Infof(s, c.Options.ServingInfo.BindAddress)
		}
		if c.TLS {
			server.TLSConfig = &tls.Config{
				// Change default from SSLv3 to TLSv1.0 (because of POODLE vulnerability)
				MinVersion: tls.VersionTLS10,
				// Populate PeerCertificates in requests, but don't reject connections without certificates
				// This allows certificates to be validated by authenticators, while still allowing other auth types
				ClientAuth: tls.RequestClientCert,
				ClientCAs:  c.ClientCAs,
			}
			glog.Fatal(cmdutil.ListenAndServeTLS(server, c.Options.ServingInfo.BindNetwork, c.Options.ServingInfo.ServerCert.CertFile, c.Options.ServingInfo.ServerCert.KeyFile))
		} else {
			glog.Fatal(server.ListenAndServe())
		}
	}, 0)
}
Example #8
0
// Handles incoming requests for both TCP and UDP
func handleGraphiteTextProtocol(t *trTransceiver, conn net.Conn, timeout int) {

	defer conn.Close() // decrements tcpWg

	if timeout != 0 {
		conn.SetDeadline(time.Now().Add(time.Duration(timeout) * time.Second))
	}

	// We use the Scanner, becase it has a MaxScanTokenSize of 64K

	connbuf := bufio.NewScanner(conn)

	for connbuf.Scan() {
		packetStr := connbuf.Text()

		if dp, err := parseGraphitePacket(packetStr); err != nil {
			log.Printf("handleGraphiteTextProtocol(): bad backet: %v")
		} else {
			t.queueDataPoint(dp)
		}

		if timeout != 0 {
			conn.SetDeadline(time.Now().Add(time.Duration(timeout) * time.Second))
		}
	}

	if err := connbuf.Err(); err != nil {
		log.Println("handleGraphiteTextProtocol(): Error reading: %v", err)
	}
}
Example #9
0
// run periodically probes the container.
func (w *worker) run() {
	probeTickerPeriod := time.Duration(w.spec.PeriodSeconds) * time.Second
	probeTicker := time.NewTicker(probeTickerPeriod)

	defer func() {
		// Clean up.
		probeTicker.Stop()
		if !w.containerID.IsEmpty() {
			w.resultsManager.Remove(w.containerID)
		}

		w.probeManager.removeWorker(w.pod.UID, w.container.Name, w.probeType)
	}()

	// If kubelet restarted the probes could be started in rapid succession.
	// Let the worker wait for a random portion of tickerPeriod before probing.
	time.Sleep(time.Duration(rand.Float64() * float64(probeTickerPeriod)))

probeLoop:
	for w.doProbe() {
		// Wait for next probe tick.
		select {
		case <-w.stopCh:
			break probeLoop
		case <-probeTicker.C:
			// continue
		}
	}
}
Example #10
0
File: mb.go Project: hqr/surge
func (r *gatewayB) sendata() {
	if r.flow.timeTxDone.After(Now) {
		return
	}
	for _, tioint := range r.tios {
		tio := tioint.(*TioOffset)
		tid := tio.GetID()
		if r.ack[tid] {
			continue
		}
		if tio.offset >= tio.totalbytes {
			continue
		}
		frsize := int64(configNetwork.sizeFrame)
		if tio.offset+frsize > tio.totalbytes {
			frsize = tio.totalbytes - tio.offset
		}
		newbits := frsize * 8
		if r.rb.below(newbits) {
			continue
		}
		assert(tio.GetTarget() == r.flow.to)
		ev := newRepDataEvent(r.realobject(), r.flow.to, tio, tio.offset, int(frsize))

		// transmit given the current flow's bandwidth
		d := time.Duration(newbits) * time.Second / time.Duration(r.flow.getbw())
		ok := r.Send(ev, SmethodWait)
		assert(ok)

		tio.offset += frsize
		r.flow.timeTxDone = Now.Add(d)
		r.rb.use(newbits)
		break
	}
}
Example #11
0
func (t *Telnet) Check(srv Service) (bool, error) {
	t.deadline = time.Now().Add(time.Duration(time.Duration(srv.Timeout) * time.Second))

	c, err := net.DialTimeout("tcp", srv.URL, t.deadline.Sub(time.Now()))
	if err != nil {
		return false, err
	}

	_, err = t.recv(c)
	if err != nil {
		return false, err
	}

	if err := t.sendAYT(c); err != nil {
		return false, err
	}

	n, err := t.recv(c)
	if err != nil {
		return false, err
	}

	if n == 0 {
		return false, errors.New("no response to AYT")
	}

	return true, nil
}
Example #12
0
// Converts the value to a duration in the given units
func (*duration) Call(args ...interface{}) (v interface{}, err error) {
	if len(args) != 1 && len(args) != 2 {
		return time.Duration(0), errors.New("duration expects one or two arguments duration(value, unit) where unit is optional depending on the type of value.")
	}

	getUnit := func() (time.Duration, error) {
		if len(args) != 2 {
			return 0, errors.New("duration expects unit argument for int and float values")
		}
		unit, ok := args[1].(time.Duration)
		if !ok {
			return 0, fmt.Errorf("invalid duration unit type: %T", args[1])
		}
		return unit, nil
	}
	var unit time.Duration
	switch a := args[0].(type) {
	case time.Duration:
		v = a
	case int64:
		unit, err = getUnit()
		v = time.Duration(a) * unit
	case float64:
		unit, err = getUnit()
		v = time.Duration(a * float64(unit))
	case string:
		v, err = influxql.ParseDuration(a)
		if err != nil {
			err = fmt.Errorf("invalid duration string %q", a)
		}
	default:
		err = fmt.Errorf("cannot convert %T to duration", a)
	}
	return
}
Example #13
0
func (p *Pool) waitAndStop() error {
	retryInterval := time.Duration(500)

	retriesLimit := int(time.Duration(60000) / retryInterval)
	retries := 0

	queueTicker := time.NewTicker(time.Millisecond * retryInterval)
	// Retry evenry 500ms to check if job queue is empty
	for _ = range queueTicker.C {
		// Check if jobQueue is empty and all workers are available
		if len(p.jobQueue) == 0 && len(p.workerPool) == p.maxWorkers {
			for _, worker := range p.workers {
				worker.Stop()
			}

			break
		}

		retries++
		if retries >= retriesLimit {
			queueTicker.Stop()

			return fmt.Errorf(fmt.Sprintf("checking queue status exceeded retry limit: %v", time.Duration(retries)*retryInterval*time.Millisecond))
		}
	}

	// Stop job queue ticker
	queueTicker.Stop()

	return nil
}
Example #14
0
func (t *Receiver) startEnquireLink(eli int) {
	t.eLTicker = time.NewTicker(time.Duration(eli) * time.Second)
	// check delay is half the time of enquire link intervel
	d := time.Duration(eli/2) * time.Second
	t.eLCheckTimer = time.NewTimer(d)
	t.eLCheckTimer.Stop()

	for {
		select {
		case <-t.eLTicker.C:

			p, _ := t.EnquireLink()
			if err := t.Write(p); err != nil {
				t.Err = SmppELWriteErr
				t.Close()
				return
			}

			t.eLCheckTimer.Reset(d)
		case <-t.eLCheckTimer.C:
			t.Err = SmppELRespErr
			t.Close()
			return
		}
	}
}
Example #15
0
func parseTimeout(timeout ...float64) time.Duration {
	if len(timeout) == 0 {
		return time.Duration(defaultTimeout * int64(time.Second))
	} else {
		return time.Duration(timeout[0] * float64(time.Second))
	}
}
func (rn *RaftNode) startTimer() {

	rn.debug_output2("timer started..", "")
	rn.LastAlarm = time.Now()
	for {

		if rn.Sm.state == LEADER {

			if time.Now().After(rn.LastAlarm.Add(time.Duration(rn.Sm.heartbeat_time_out*1) * time.Millisecond)) {
				rn.LastAlarm = time.Now()
				rn.TimeoutCh <- TIMEOUT{}
				//rn.debug_output3("L: ", time.Now())
			}
		} else {
			//debug_output(".")

			if time.Now().After(rn.LastAlarm.Add(time.Duration(rn.Sm.election_time_out*1) * time.Millisecond)) {
				rn.LastAlarm = time.Now()
				//debug_output("timeout fired")
				rn.TimeoutCh <- TIMEOUT{}
				//rn.debug_output2("timeout fired", "")
				//rn.debug_output3("F/C :", time.Now())
			}
		}

		if rn.StopSignal {
			//rn.debug_output2("stopping..timer", "")
			return
		}

	}
}
Example #17
0
func hasReachDailyTweetLimit() (bool, error) {
	var from time.Time
	var to time.Time

	now := time.Now()

	if now.Hour() >= WAKE_UP_HOUR {
		from = time.Date(now.Year(), now.Month(), now.Day(), WAKE_UP_HOUR, 0, 0, 0, now.Location())
	} else {
		yesterday := now.Add(-time.Duration(24) * time.Hour)
		from = time.Date(yesterday.Year(), yesterday.Month(), yesterday.Day(), WAKE_UP_HOUR, 0, 0, 0, yesterday.Location())
	}

	if now.Hour() < GO_TO_BED_HOUR {
		to = time.Date(now.Year(), now.Month(), now.Day(), GO_TO_BED_HOUR, 0, 0, 0, now.Location())
	} else {
		tomorrow := now.Add(time.Duration(24) * time.Hour)
		to = time.Date(tomorrow.Year(), tomorrow.Month(), tomorrow.Day(), GO_TO_BED_HOUR, 0, 0, 0, tomorrow.Location())
	}

	count, err := db.GetNumberOfTweetsBetweenDates(from, to)
	if err != nil {
		return true, err
	}

	return count >= MAX_TWEET_IN_A_DAY, nil
}
Example #18
0
func join(c *cli.Context) {
	dflag := getDiscovery(c)
	if dflag == "" {
		log.Fatalf("discovery required to join a cluster. See '%s join --help'.", c.App.Name)
	}

	addr := c.String("advertise")
	if addr == "" {
		log.Fatal("missing mandatory --advertise flag")
	}
	if !checkAddrFormat(addr) {
		log.Fatal("--advertise should be of the form ip:port or hostname:port")
	}

	joinDelay, err := time.ParseDuration(c.String("delay"))
	if err != nil {
		log.Fatalf("invalid --delay: %v", err)
	}
	if joinDelay < time.Duration(0)*time.Second {
		log.Fatalf("--delay should not be a negative number")
	}

	hb, err := time.ParseDuration(c.String("heartbeat"))
	if err != nil {
		log.Fatalf("invalid --heartbeat: %v", err)
	}
	if hb < 1*time.Second {
		log.Fatal("--heartbeat should be at least one second")
	}
	ttl, err := time.ParseDuration(c.String("ttl"))
	if err != nil {
		log.Fatalf("invalid --ttl: %v", err)
	}
	if ttl <= hb {
		log.Fatal("--ttl must be strictly superior to the heartbeat value")
	}

	d, err := discovery.New(dflag, hb, ttl, getDiscoveryOpt(c))
	if err != nil {
		log.Fatal(err)
	}

	// if joinDelay is 0, no delay will be executed
	// if joinDelay is larger than 0,
	// add a random delay between 0s and joinDelay at start to avoid synchronized registration
	if joinDelay > 0 {
		r := rand.New(rand.NewSource(time.Now().UTC().UnixNano()))
		delay := time.Duration(r.Int63n(int64(joinDelay)))
		log.Infof("Add a random delay %s to avoid synchronized registration", delay)
		time.Sleep(delay)
	}

	for {
		log.WithFields(log.Fields{"addr": addr, "discovery": dflag}).Infof("Registering on the discovery service every %s...", hb)
		if err := d.Register(addr); err != nil {
			log.Error(err)
		}
		time.Sleep(hb)
	}
}
Example #19
0
// TODO isn't this identical to handleGraphiteTextProtocol?
func handleStatsdTextProtocol(t *trTransceiver, conn net.Conn, timeout int) {

	defer conn.Close() // decrements tcpWg

	if timeout != 0 {
		conn.SetDeadline(time.Now().Add(time.Duration(timeout) * time.Second))
	}

	// We use the Scanner, becase it has a MaxScanTokenSize of 64K

	connbuf := bufio.NewScanner(conn)

	for connbuf.Scan() {
		if stat, err := parseStatsdPacket(connbuf.Text()); err == nil {
			t.queueStat(stat)
		} else {
			log.Printf("parseStatsdPacket(): %v", err)
		}

		if timeout != 0 {
			conn.SetDeadline(time.Now().Add(time.Duration(timeout) * time.Second))
		}
	}

	if err := connbuf.Err(); err != nil {
		log.Println("handleStatsdTextProtocol(): Error reading: %v", err)
	}
}
Example #20
0
func Run() {
	var r *int = flag.Int("r", 0, "read timeout")
	var w *int = flag.Int("w", 0, "write timeout")

	port := CFG.Int("port")

	if port == 0 {
		port = 80
	}

	host := CFG.Str("host")
	if host == "" {
		host = "127.0.0.1"
	}

	address := fmt.Sprintf("%s:%d", host, port)
	LOGGER.Log("WebGO running", address)

	server := http.Server{
		Addr:         address,
		ReadTimeout:  time.Duration(*r) * time.Second,
		WriteTimeout: time.Duration(*w) * time.Second,
		Handler:      &app,
	}

	//server.SetKeepAlivesEnabled(false)

	err := server.ListenAndServe()
	if err != nil {
		LOGGER.Fatal(err)
	}

}
Example #21
0
func (s *Sentinel) connect() (err error) {
	for i, server := range s.servers {
		s.conn, err = DialTimeout(server, time.Duration(Config.ConnectTimeout)*time.Second)
		if err != nil {
			continue
		}
		s.subConn, err = DialTimeout(server, time.Duration(Config.ConnectTimeout)*time.Second)
		if err != nil {
			s.conn.Close()
			continue
		}
		s.state = connStateConnected
		s.subs = NewSubscriptions(s.subConn)
		s.subs.throwError = true
		err = s.subs.Subscribe("+sdown", "-sdown", "+odown", "-odown", "+switch-master")
		if err != nil {
			s.close()
			continue
		}
		s.servers = append(s.servers[0:i], s.servers[i+1:]...)
		s.servers = append(s.servers, server)
		go s.monitor()
		return nil
	}
	return ErrNotConnected
}
Example #22
0
func InitWorker(modems []*GSMModem, bufferSize, bufferLow, loaderTimeout, countOut, loaderLongTimeout int) {
	log.Println("--- InitWorker")

	bufferMaxSize = bufferSize
	bufferLowCount = bufferLow
	messageLoaderTimeout = time.Duration(loaderTimeout) * time.Minute
	messageLoaderCountout = countOut
	messageLoaderLongTimeout = time.Duration(loaderLongTimeout) * time.Minute

	messages = make(chan SMS, bufferMaxSize)
	wakeupMessageLoader = make(chan bool, 1)
	wakeupMessageLoader <- true
	messageCountSinceLastWakeup = 0
	timeOfLastWakeup = time.Now().Add((time.Duration(loaderTimeout) * -1) * time.Minute) //older time handles the cold start state of the system

	// its important to init messages channel before starting modems because nil
	// channel is non-blocking

	for i := 0; i < len(modems); i++ {
		modem := modems[i]
		err := modem.Connect()
		if err != nil {
			log.Println("InitWorker: error connecting", modem.Devid, err)
			continue
		}
		go modem.ProcessMessages()
	}
	go messageLoader(bufferMaxSize, bufferLowCount)
}
Example #23
0
// logProgress logs block progress as an information message.  In order to
// prevent spam, it limits logging to one message every cfg.Progress seconds
// with duration and totals included.
func (bi *blockImporter) logProgress() {
	bi.receivedLogBlocks++

	now := time.Now()
	duration := now.Sub(bi.lastLogTime)
	if duration < time.Second*time.Duration(cfg.Progress) {
		return
	}

	// Truncate the duration to 10s of milliseconds.
	durationMillis := int64(duration / time.Millisecond)
	tDuration := 10 * time.Millisecond * time.Duration(durationMillis/10)

	// Log information about new block height.
	blockStr := "blocks"
	if bi.receivedLogBlocks == 1 {
		blockStr = "block"
	}
	txStr := "transactions"
	if bi.receivedLogTx == 1 {
		txStr = "transaction"
	}
	log.Infof("Processed %d %s in the last %s (%d %s, height %d, %s)",
		bi.receivedLogBlocks, blockStr, tDuration, bi.receivedLogTx,
		txStr, bi.lastHeight, bi.lastBlockTime)

	bi.receivedLogBlocks = 0
	bi.receivedLogTx = 0
	bi.lastLogTime = now
}
Example #24
0
func (s *StreamSuite) TestParseXML(c *C) {
	log.Println("testing ParseXML")
	b, ch := newBlock("testingParseXML", "parsexml")
	go blocks.BlockRoutine(b)
	outChan := make(chan *blocks.Msg)
	ch.AddChan <- &blocks.AddChanMsg{
		Route:   "out",
		Channel: outChan,
	}

	// where to find the xml in input
	ruleMsg := map[string]interface{}{"Path": ".data"}
	toRule := &blocks.Msg{Msg: ruleMsg, Route: "rule"}
	ch.InChan <- toRule

	queryOutChan := make(chan interface{})
	time.AfterFunc(time.Duration(1)*time.Second, func() {
		ch.QueryChan <- &blocks.QueryMsg{RespChan: queryOutChan, Route: "rule"}
	})

	var xmldata = string(`
  <?xml version="1.0" encoding="utf-8"?>
  <OdfBody DocumentType="DT_GM" Date="20130131" Time="140807885" LogicalDate="20130131" Venue="ACV" Language="ENG" FeedFlag="P" DocumentCode="AS0ACV000" Version="3" Serial="1">
    <Competition Code="OG2014">
      <Config SDelay="60" />
    </Competition>
  </OdfBody>
  `)

	time.AfterFunc(time.Duration(2)*time.Second, func() {
		xmlMsg := map[string]interface{}{"data": xmldata}
		postData := &blocks.Msg{Msg: xmlMsg, Route: "in"}
		ch.InChan <- postData
	})

	time.AfterFunc(time.Duration(5)*time.Second, func() {
		ch.QuitChan <- true
	})
	for {
		select {
		case err := <-ch.ErrChan:
			if err != nil {
				c.Errorf(err.Error())
			} else {
				return
			}
		case messageI := <-queryOutChan:
			if !reflect.DeepEqual(messageI, ruleMsg) {
				log.Println("Rule mismatch:", messageI, ruleMsg)
				c.Fail()
			}
		case messageI := <-outChan:
			message := messageI.Msg.(map[string]interface{})
			odfbody := message["OdfBody"].(map[string]interface{})
			competition := odfbody["Competition"].(map[string]interface{})
			c.Assert(odfbody["DocumentType"], Equals, "DT_GM")
			c.Assert(competition["Code"], Equals, "OG2014")
		}
	}
}
Example #25
0
func TestConfig_Parse(t *testing.T) {
	// Parse configuration.
	var c monitor.Config
	if _, err := toml.Decode(`
store-enabled=true
store-database="the_db"
store-retention-policy="the_rp"
store-retention-duration="1h"
store-replication-factor=1234
store-interval="10m"
`, &c); err != nil {
		t.Fatal(err)
	}

	// Validate configuration.
	if !c.StoreEnabled {
		t.Fatalf("unexpected store-enabled: %v", c.StoreEnabled)
	} else if c.StoreDatabase != "the_db" {
		t.Fatalf("unexpected store-database: %s", c.StoreDatabase)
	} else if c.StoreRetentionPolicy != "the_rp" {
		t.Fatalf("unexpected store-retention-policy: %s", c.StoreRetentionPolicy)
	} else if time.Duration(c.StoreRetentionDuration) != 1*time.Hour {
		t.Fatalf("unexpected store-retention-duration: %s", c.StoreRetentionDuration)
	} else if c.StoreReplicationFactor != 1234 {
		t.Fatalf("unexpected store-replication-factor: %d", c.StoreReplicationFactor)
	} else if time.Duration(c.StoreInterval) != 10*time.Minute {
		t.Fatalf("unexpected store-interval:  %s", c.StoreInterval)
	}
}
Example #26
0
func TestCache(t *testing.T) {
	foobar := "foobar"
	Set("foobar", foobar, time.Duration(10*time.Second))
	obj, err := Get("foobar")
	if err != nil {
		t.Error(err)
	}

	str, ok := obj.(string)
	if !ok {
		t.Error("Type assertions error")
	}
	if str != foobar {
		t.Error("Set/Get were not conform.")
	}
	go Set("foobar", foobar, time.Duration(1*time.Microsecond))
	time.Sleep(20 * time.Microsecond)
	obj, err = Get("foobar")
	if err == nil {
		t.Error("Time out is not working.")
	}

	go Set("foobar", foobar, time.Duration(1*time.Microsecond))
	time.Sleep(time.Minute)
	if HasKey("foobar") {
		t.Error("GC is not working.")
	}
}
Example #27
0
func (s *EtcdServer) parseProposeCtxErr(err error, start time.Time) error {
	switch err {
	case context.Canceled:
		return ErrCanceled
	case context.DeadlineExceeded:
		curLeadElected := s.r.leadElectedTime()
		prevLeadLost := curLeadElected.Add(-2 * time.Duration(s.Cfg.ElectionTicks) * time.Duration(s.Cfg.TickMs) * time.Millisecond)
		if start.After(prevLeadLost) && start.Before(curLeadElected) {
			return ErrTimeoutDueToLeaderFail
		}

		lead := types.ID(atomic.LoadUint64(&s.r.lead))
		switch lead {
		case types.ID(raft.None):
			// TODO: return error to specify it happens because the cluster does not have leader now
		case s.ID():
			if !isConnectedToQuorumSince(s.r.transport, start, s.ID(), s.cluster.Members()) {
				return ErrTimeoutDueToConnectionLost
			}
		default:
			if !isConnectedSince(s.r.transport, start, lead) {
				return ErrTimeoutDueToConnectionLost
			}
		}

		return ErrTimeout
	default:
		return err
	}
}
Example #28
0
// ZMQ_LINGER: Retrieve linger period for socket shutdown
//
// Returns time.Duration(-1) for infinite
//
// See: http://api.zeromq.org/2-2:zmq-getsockopt#toc17
func (soc *Socket) GetLinger() (time.Duration, error) {
	v, err := soc.getInt(C.ZMQ_LINGER)
	if v < 0 {
		return time.Duration(-1), err
	}
	return time.Duration(v) * time.Millisecond, err
}
func initRedisQueue(option *config.Option) map[string][]*redis.Pool {
	redisPool := make(map[string][]*redis.Pool, 0)

	//创建redis的消费连接
	for _, v := range option.QueueHostPorts {

		pool := redis.NewPool(func() (conn redis.Conn, err error) {

			conn, err = redis.DialTimeout("tcp", v.Host+":"+strconv.Itoa(v.Port),
				time.Duration(v.Timeout)*time.Second,
				time.Duration(v.Timeout)*time.Second,
				time.Duration(v.Timeout)*time.Second)

			return
		}, time.Duration(v.Timeout*2)*time.Second, v.Maxconn/2, v.Maxconn)

		pools, ok := redisPool[v.QueueName]
		if !ok {
			pools = make([]*redis.Pool, 0)
			redisPool[v.QueueName] = pools
		}
		redisPool[v.QueueName] = append(pools, pool)
	}

	return redisPool
}
Example #30
0
func (s *EtcdServer) LeaseRenew(id lease.LeaseID) (int64, error) {
	ttl, err := s.lessor.Renew(id)
	if err == nil {
		return ttl, nil
	}
	if err != lease.ErrNotPrimary {
		return -1, err
	}

	// renewals don't go through raft; forward to leader manually
	leader := s.cluster.Member(s.Leader())
	for i := 0; i < 5 && leader == nil; i++ {
		// wait an election
		dur := time.Duration(s.cfg.ElectionTicks) * time.Duration(s.cfg.TickMs) * time.Millisecond
		select {
		case <-time.After(dur):
			leader = s.cluster.Member(s.Leader())
		case <-s.done:
			return -1, ErrStopped
		}
	}
	if leader == nil || len(leader.PeerURLs) == 0 {
		return -1, ErrNoLeader
	}

	for _, url := range leader.PeerURLs {
		lurl := url + "/leases"
		ttl, err = leasehttp.RenewHTTP(id, lurl, s.peerRt, s.cfg.peerDialTimeout())
		if err == nil {
			break
		}
	}
	return ttl, err
}