Esempio n. 1
0
func (this *FunServantImpl) warmUp() {
	log.Debug("warming up...")

	if this.mg != nil {
		go this.mg.Warmup()
	}

	if this.mc != nil {
		go this.mc.Warmup()
	}

	if this.my != nil {
		this.my.Warmup()
	}

	if this.proxy != nil {
		this.proxy.Warmup()
	}

	if this.rd != nil {
		this.rd.Warmup()
	}

	log.Debug("warmup done")
}
Esempio n. 2
0
func (this *subServer) wsReadPump(clientGone chan struct{}, ws *websocket.Conn) {
	ws.SetReadLimit(this.wsReadLimit)
	ws.SetReadDeadline(time.Now().Add(this.wsPongWait))
	ws.SetPongHandler(func(string) error {
		ws.SetReadDeadline(time.Now().Add(this.wsPongWait))
		return nil
	})

	// if kateway shutdown while there are open ws conns, the shutdown will
	// wait 1m: this.subServer.wsPongWait
	for {
		_, message, err := ws.ReadMessage()
		if err != nil {
			if websocket.IsUnexpectedCloseError(err, websocket.CloseGoingAway) {
				log.Warn("%s: %v", ws.RemoteAddr(), err)
			} else {
				log.Debug("%s: %v", ws.RemoteAddr(), err)
			}

			close(clientGone)
			break
		}

		log.Debug("ws[%s] read: %s", ws.RemoteAddr(), string(message))
	}
}
Esempio n. 3
0
File: mg.go Progetto: justinblah/fae
func (this *FunServantImpl) MgInsert(ctx *rpc.Context,
	kind string, table string, shardId int32,
	doc []byte, options []byte) (r bool, intError error) {
	log.Debug("%s %d %s %s %v %s", kind, shardId, table,
		string(doc), doc, string(options))

	var sess *mongo.Session
	sess, intError = this.mongoSession(kind, shardId)
	if intError != nil {
		return
	}

	var bdoc = bson.M{}
	json.Unmarshal(doc, &bdoc)
	log.Debug("%+v", bdoc)

	err := sess.DB().C(table).Insert(bdoc)
	if err == nil {
		r = true
	} else {
		log.Error(err)
	}
	sess.Recyle(&err)

	return
}
Esempio n. 4
0
func LaunchHttpServer(listenAddr string, debugAddr string) (err error) {
	if httpApi != nil {
		return httpDupLaunch
	}

	httpApi = new(httpRestApi)
	httpApi.httpPaths = make([]string, 0, 10)
	httpApi.httpRouter = mux.NewRouter()
	httpApi.httpServer = &http.Server{
		Addr:    listenAddr,
		Handler: httpApi.httpRouter,
	}

	httpApi.httpListener, err = net.Listen("tcp", httpApi.httpServer.Addr)
	if err != nil {
		httpApi = nil
		return err
	}

	if debugAddr != "" {
		log.Debug("HTTP serving at %s with pprof at %s", listenAddr, debugAddr)
	} else {
		log.Debug("HTTP serving at %s", listenAddr)
	}

	go httpApi.httpServer.Serve(httpApi.httpListener)
	if debugAddr != "" {
		go http.ListenAndServe(debugAddr, nil)
	}

	return nil
}
Esempio n. 5
0
// TODO batch DELETE/INSERT for better performance.
func (this *JobExecutor) handleDueJobs(wg *sync.WaitGroup) {
	defer wg.Done()

	var (
		// zabbix maintains a in-memory delete queue
		// delete from history_uint where itemid=? and clock<min_clock
		sqlDeleteJob = fmt.Sprintf("DELETE FROM %s WHERE job_id=?", this.table)

		sqlInsertArchive = fmt.Sprintf("INSERT INTO %s(job_id,payload,ctime,due_time,etime,actor_id) VALUES(?,?,?,?,?,?)",
			jm.HistoryTable(this.topic))
		sqlReinject = fmt.Sprintf("INSERT INTO %s(job_id, payload, ctime, due_time) VALUES(?,?,?,?)", this.table)
	)
	for {
		select {
		case <-this.stopper:
			return

		case item := <-this.dueJobs:
			now := time.Now()
			affectedRows, _, err := this.mc.Exec(jm.AppPool, this.table, this.aid, sqlDeleteJob, item.JobId)
			if err != nil {
				log.Error("%s: %s", this.ident, err)
				continue
			}
			if affectedRows == 0 {
				// 2 possibilities:
				// - client Cancel job wins
				// - this handler is too slow and the job fetched twice in tick
				continue
			}

			log.Debug("%s land %s", this.ident, item)
			_, _, err = store.DefaultPubStore.SyncPub(this.cluster, this.topic, nil, item.Payload)
			if err != nil {
				err = hh.Default.Append(this.cluster, this.topic, nil, item.Payload)
			}
			if err != nil {
				// pub fails and hinted handoff also fails: reinject job back to mysql
				log.Error("%s: %s", this.ident, err)
				this.mc.Exec(jm.AppPool, this.table, this.aid, sqlReinject,
					item.JobId, item.Payload, item.Ctime, item.DueTime)
				continue
			}

			log.Debug("%s fired %s", this.ident, item)
			this.auditor.Trace(item.String())

			// mv job to archive table
			_, _, err = this.mc.Exec(jm.AppPool, this.table, this.aid, sqlInsertArchive,
				item.JobId, item.Payload, item.Ctime, item.DueTime, now.Unix(), this.parentId)
			if err != nil {
				log.Error("%s: %s", this.ident, err)
			} else {
				log.Debug("%s archived %s", this.ident, item)
			}

		}
	}
}
Esempio n. 6
0
func (this *ConfigProxy) LoadConfig(selfAddr string, cf *conf.Conf) {
	if selfAddr == "" {
		panic("proxy self addr unknown")
	}
	this.PoolCapacity = cf.Int("pool_capacity", 10)
	this.IdleTimeout = cf.Duration("idle_timeout", 0)
	this.IoTimeout = cf.Duration("io_timeout", time.Second*10)
	this.BorrowTimeout = cf.Duration("borrow_timeout", time.Second*10)
	this.DiagnosticInterval = cf.Duration("diagnostic_interval", time.Second*5)
	this.TcpNoDelay = cf.Bool("tcp_nodelay", true)
	this.BufferSize = cf.Int("buffer_size", 4<<10)
	this.SelfAddr = selfAddr
	parts := strings.SplitN(this.SelfAddr, ":", 2)
	if parts[0] == "" {
		// auto get local ip when self_addr like ":9001"
		ips, _ := ip.LocalIpv4Addrs()
		if len(ips) == 0 {
			panic("cannot get local ip address")
		}

		this.SelfAddr = ips[0] + ":" + parts[1]
	}

	log.Debug("proxy conf: %+v", *this)
}
Esempio n. 7
0
func (this *ConfigMongodb) LoadConfig(cf *conf.Conf) {
	this.ShardBaseNum = cf.Int("shard_base_num", 100000)
	this.DebugProtocol = cf.Bool("debug_protocol", false)
	this.DebugHeartbeat = cf.Bool("debug_heartbeat", false)
	this.ShardStrategy = cf.String("shard_strategy", "legacy")
	this.ConnectTimeout = cf.Duration("connect_timeout", 4*time.Second)
	this.IoTimeout = cf.Duration("io_timeout", 30*time.Second)
	this.MaxIdleConnsPerServer = cf.Int("max_idle_conns_per_server", 2)
	this.MaxConnsPerServer = cf.Int("max_conns_per_server",
		this.MaxIdleConnsPerServer*5)
	this.HeartbeatInterval = cf.Int("heartbeat_interval", 120)
	section, err := cf.Section("breaker")
	if err == nil {
		this.Breaker.loadConfig(section)
	}
	this.Servers = make(map[string]*ConfigMongodbServer)
	for i := 0; i < len(cf.List("servers", nil)); i++ {
		section, err := cf.Section(fmt.Sprintf("servers[%d]", i))
		if err != nil {
			panic(err)
		}

		server := new(ConfigMongodbServer)
		server.ShardBaseNum = this.ShardBaseNum
		server.loadConfig(section)
		this.Servers[server.Pool] = server
	}

	log.Debug("mongodb conf: %+v", *this)
}
Esempio n. 8
0
// consume topic: __consumer_offsets and process the message to get offsets of consumers
func (this *ZkCluster) processConsumerOffsetsMessage(msg *sarama.ConsumerMessage) {
	var keyver, valver uint16
	var partition uint32
	var offset, timestamp uint64

	buf := bytes.NewBuffer(msg.Key)
	err := binary.Read(buf, binary.BigEndian, &keyver)
	if (err != nil) || ((keyver != 0) && (keyver != 1)) {
		log.Warn("Failed to decode %s:%v offset %v: keyver", msg.Topic, msg.Partition, msg.Offset)
		return
	}
	group, err := readString(buf)
	if err != nil {
		log.Warn("Failed to decode %s:%v offset %v: group", msg.Topic, msg.Partition, msg.Offset)
		return
	}
	topic, err := readString(buf)
	if err != nil {
		log.Warn("Failed to decode %s:%v offset %v: topic", msg.Topic, msg.Partition, msg.Offset)
		return
	}
	err = binary.Read(buf, binary.BigEndian, &partition)
	if err != nil {
		log.Warn("Failed to decode %s:%v offset %v: partition", msg.Topic, msg.Partition, msg.Offset)
		return
	}

	buf = bytes.NewBuffer(msg.Value)
	err = binary.Read(buf, binary.BigEndian, &valver)
	if (err != nil) || ((valver != 0) && (valver != 1)) {
		log.Warn("Failed to decode %s:%v offset %v: valver", msg.Topic, msg.Partition, msg.Offset)
		return
	}
	err = binary.Read(buf, binary.BigEndian, &offset)
	if err != nil {
		log.Warn("Failed to decode %s:%v offset %v: offset", msg.Topic, msg.Partition, msg.Offset)
		return
	}
	_, err = readString(buf)
	if err != nil {
		log.Warn("Failed to decode %s:%v offset %v: metadata", msg.Topic, msg.Partition, msg.Offset)
		return
	}
	err = binary.Read(buf, binary.BigEndian, &timestamp)
	if err != nil {
		log.Warn("Failed to decode %s:%v offset %v: timestamp", msg.Topic, msg.Partition, msg.Offset)
		return
	}

	partitionOffset := &PartitionOffset{
		Cluster:   this.Name(),
		Topic:     topic,
		Partition: int32(partition),
		Group:     group,
		Timestamp: int64(timestamp),
		Offset:    int64(offset),
	}
	log.Debug("%+v", partitionOffset)
	return
}
Esempio n. 9
0
func (this *subServer) commitOffsets() {
	for cluster, clusterTopic := range this.ackedOffsets {
		zkcluster := meta.Default.ZkCluster(cluster)

		for topic, groupPartition := range clusterTopic {
			for group, partitionOffset := range groupPartition {
				for partition, offset := range partitionOffset {
					if offset == -1 {
						// this slot is empty
						continue
					}

					log.Debug("cluster[%s] group[%s] commit offset {T:%s/%d O:%d}", cluster, group, topic, partition, offset)

					if err := zkcluster.ResetConsumerGroupOffset(topic, group, strconv.Itoa(partition), offset); err != nil {
						log.Error("cluster[%s] group[%s] commit offset {T:%s/%d O:%d} %v", cluster, group, topic, partition, offset, err)

						if err == zk.ErrNoNode {
							// invalid offset commit request, will not retry
							this.ackedOffsets[cluster][topic][group][partition] = -1
						}
					} else {
						// mark this slot empty
						this.ackedOffsets[cluster][topic][group][partition] = -1
					}
				}
			}
		}
	}

}
Esempio n. 10
0
func newMysql(dsn string, maxStmtCached int, bc *config.ConfigBreaker) *mysql {
	this := new(mysql)
	if bc == nil {
		bc = &config.ConfigBreaker{
			FailureAllowance: 5,
			RetryTimeout:     time.Second * 10,
		}
	}
	this.dsn = dsn
	this.breaker = &breaker.Consecutive{
		FailureAllowance: bc.FailureAllowance,
		RetryTimeout:     bc.RetryTimeout}
	if maxStmtCached > 0 {
		this.stmtsStore = cache.NewLruCache(maxStmtCached)
		this.stmtsStore.OnEvicted = func(key cache.Key, value interface{}) {
			query := key.(string)
			stmt := value.(*sql.Stmt)
			stmt.Close()

			log.Debug("[%s] stmt[%s] closed", this.dsn, query)
		}
	}

	return this
}
Esempio n. 11
0
func (this *Ping) diagnose() {
	this.zkzone.ForSortedClusters(func(zkcluster *zk.ZkCluster) {
		registeredBrokers := zkcluster.RegisteredInfo().Roster
		for _, broker := range registeredBrokers {
			log.Debug("ping %s", broker.Addr())

			kfk, err := sarama.NewClient([]string{broker.Addr()}, sarama.NewConfig())
			if err != nil {
				log.Error("%25s %30s %s", broker.Addr(), broker.NamedAddr(), color.Red(err.Error()))

				continue
			}

			_, err = kfk.Topics() // kafka didn't provide ping, so use Topics() as ping
			if err != nil {
				log.Error("%25s %30s %s", broker.Addr(), broker.NamedAddr(), color.Red(err.Error()))
			} else {
				if !this.problematicMode {
					log.Info("%25s %30s %s", broker.Addr(), broker.NamedAddr(), color.Green("ok"))
				}
			}
			kfk.Close()
		}
	})

}
Esempio n. 12
0
func (this *ConfigMemcache) LoadConfig(cf *conf.Conf) {
	this.Servers = make(map[string]*ConfigMemcacheServer)
	this.HashStrategy = cf.String("hash_strategy", "standard")
	this.Timeout = cf.Duration("timeout", 4*time.Second)
	this.ReplicaN = cf.Int("replica_num", 1)
	section, err := cf.Section("breaker")
	if err == nil {
		this.Breaker.loadConfig(section)
	}
	this.MaxIdleConnsPerServer = cf.Int("max_idle_conns_per_server", 3)
	this.MaxConnsPerServer = cf.Int("max_conns_per_server",
		this.MaxIdleConnsPerServer*10)
	for i := 0; i < len(cf.List("servers", nil)); i++ {
		section, err := cf.Section(fmt.Sprintf("servers[%d]", i))
		if err != nil {
			panic(err)
		}

		server := new(ConfigMemcacheServer)
		server.loadConfig(section)
		this.Servers[server.Address()] = server
	}

	log.Debug("memcache conf: %+v", *this)
}
Esempio n. 13
0
func (this *Peer) discoverPeers() {
	defer func() {
		this.c.Close() // leave the multicast group
	}()

	var msg peerMessage
	reader := bufio.NewReader(this.c)
	for {
		// net.ListenMulticastUDP sets IP_MULTICAST_LOOP=0 as
		// default, so you never receive your own sent data
		// if you run both sender and receiver on (logically) same IP host
		line, _, err := reader.ReadLine()
		if err != nil {
			log.Error(err)
			continue
		}

		if err := msg.unmarshal(line); err != nil {
			// Not our protocol, it may be SSDP or else
			continue
		}

		log.Debug("received peer: %+v", msg)

		neighborIp, present := msg["ip"]
		if !present {
			log.Info("Peer msg has no 'ip'")
			continue
		}

		this.refreshNeighbor(neighborIp.(string))
	}
}
Esempio n. 14
0
// Close must be called before Recycle
func (this *syncProducerClient) Close() {
	log.Debug("cluster[%s] closing kafka sync client: %d", this.cluster, this.id)

	// will close the producer and the kafka tcp conn
	this.SyncProducer.Close()
	this.closed = true
}
Esempio n. 15
0
func (this *ConfigRedis) LoadConfig(cf *conf.Conf) {
	section, err := cf.Section("breaker")
	if err == nil {
		this.Breaker.loadConfig(section)
	}

	this.Servers = make(map[string]map[string]*ConfigRedisServer)
	for i := 0; i < len(cf.List("pools", nil)); i++ {
		section, err := cf.Section(fmt.Sprintf("pools[%d]", i))
		if err != nil {
			panic(err)
		}

		pool := section.String("name", "")
		if pool == "" {
			panic("Empty redis pool name")
		}

		this.Servers[pool] = make(map[string]*ConfigRedisServer)

		// get servers in each pool
		for j := 0; j < len(section.List("servers", nil)); j++ {
			server, err := section.Section(fmt.Sprintf("servers[%d]", j))
			if err != nil {
				panic(err)
			}

			redisServer := new(ConfigRedisServer)
			redisServer.loadConfig(server)
			this.Servers[pool][redisServer.Addr] = redisServer
		}
	}

	log.Debug("redis conf: %+v", *this)
}
Esempio n. 16
0
// loadSegments loads all in-range segments on disk
// FIXME manage q.inflights counter while loading segments
func (q *queue) loadSegments(minId uint64) (segments, error) {
	segments := []*segment{}

	files, err := ioutil.ReadDir(q.dir)
	if err != nil {
		return segments, err
	}

	for _, segment := range files {
		if segment.IsDir() || segment.Name() == cursorFile {
			continue
		}

		// segment file names are all numeric
		id, err := strconv.ParseUint(segment.Name(), 10, 64)
		if err != nil {
			log.Error("queue[%s] segment:%s %s", q.ident(), segment.Name(), err)
			continue
		}
		if id < minId {
			log.Debug("queue[%s] skip stale segment:%s", q.ident(), segment.Name())
			continue
		}

		segment, err := newSegment(id, filepath.Join(q.dir, segment.Name()), q.maxSegmentSize)
		if err != nil {
			return segments, err
		}

		segments = append(segments, segment)
	}
	return segments, nil
}
Esempio n. 17
0
// watchTopicPartitionsChange watch partition changes on a topic.
func (cg *ConsumerGroup) watchTopicPartitionsChange(topic string, stopper <-chan struct{},
	topicPartitionsChanged chan<- string, outstanding *sync.WaitGroup) {
	defer outstanding.Done()

	_, ch, err := cg.kazoo.Topic(topic).WatchPartitions()
	if err != nil {
		if err == zk.ErrNoNode {
			err = ErrInvalidTopic
		}
		log.Error("[%s/%s] topic[%s] watch partitions: %s", cg.group.Name, cg.shortID(), topic, err)
		cg.emitError(err, topic, -1)
		return
	}

	var (
		backoff    = time.Duration(5)
		maxRetries = 3
	)
	select {
	case <-cg.stopper:
		return

	case <-stopper:
		return

	case <-ch:
		// when partitions scales up, the zk node might not be completely ready, await it ready
		//
		// even if zk node ready, kafka broker might not be ready:
		// kafka server: Request was for a topic or partition that does not exist on this broker
		// so we blindly wait: should be enough for most cases
		// in rare cases, that is still not enough: imagine partitions 1->1000, which takes long
		// ok, just return that err to client to retry
		time.Sleep(time.Second * backoff)
		for retries := 0; retries < maxRetries; retries++ {
			// retrieve brokers/topics/{topic}/partitions/{partition}/state and find the leader broker id
			// the new partitions state znode might not be ready yet
			if partitions, err := cg.kazoo.Topic(topic).Partitions(); err == nil {
				if _, err = retrievePartitionLeaders(partitions); err == nil {
					log.Debug("[%s/%s] topic[%s] partitions change complete", cg.group.Name, cg.shortID(), topic)
					break
				} else {
					log.Warn("[%s/%s] topic[%s] partitions change retry#%d waiting: %v", cg.group.Name, cg.shortID(), topic, retries, err)
					backoff-- // don't worry if negative
					time.Sleep(time.Second * backoff)
				}
			} else {
				log.Warn("[%s/%s] topic[%s] partitions change retry#%d waiting: %v", cg.group.Name, cg.shortID(), topic, retries, err)
				backoff--
				time.Sleep(time.Second * backoff)
			}
		}

		// safe to trigger rebalance
		select {
		case topicPartitionsChanged <- topic:
		default:
		}
	}
}
Esempio n. 18
0
func (this *ConfigLock) LoadConfig(cf *conf.Conf) {
	this.MaxItems = cf.Int("max_items", 1<<20)
	this.Expires = cf.Duration("expires", time.Second*10)

	this.enabled = true

	log.Debug("lock conf: %+v", *this)
}
Esempio n. 19
0
func loadConfig(cf *conf.Conf) {
	config.etcServers = cf.StringList("etcd_servers", nil)
	config.faeTemplateFile = cf.String("fae_template_file", "")
	config.faeTargetFile = cf.String("fae_target_file", "")
	config.maintainTemplateFile = cf.String("maintain_template_file", "")
	config.maintainTargetFile = cf.String("maintain_target_file", "")

	log.Debug("config: %+v", config)
}
Esempio n. 20
0
func (c *fastListenerConn) Close() error {
	log.Debug("%s closing conn from %s", c.Conn.LocalAddr(), c.Conn.RemoteAddr())

	err := c.Conn.Close()
	if c.gw != nil && !Options.DisableMetrics {
		c.gw.svrMetrics.ConcurrentConns.Dec(1)
	}
	return err
}
Esempio n. 21
0
func (this *Peer) killNeighbor(ip string) {
	this.Lock()
	defer this.Unlock()

	delete(this.neighbors, ip)
	this.picker.DelPeer(ip)
	log.Info("Peer[%s] killed", ip)

	log.Debug("Neighbors: %+v", this.neighbors)
}
Esempio n. 22
0
func (c *limitListenerConn) Close() error {
	log.Debug("%s[%s] closing conn from %s", c.name, c.Conn.LocalAddr(), c.Conn.RemoteAddr())

	err := c.Conn.Close()
	if c.gw != nil && !Options.DisableMetrics {
		c.gw.svrMetrics.ConcurrentConns.Dec(1)
	}
	c.releaseOnce.Do(c.release)
	return err
}
Esempio n. 23
0
func (this *profiler) do(name string, ctx *rpc.Context, format interface{}, args ...interface{}) {
	elapsed := time.Since(this.t1)
	if elapsed.Seconds() > 5.0 { // TODO config
		// slow response
		s := fmt.Sprintf("SLOW T=%s Q=%s X{%s} "+format, elapsed, name, this.contextInfo(ctx), args...)
		log.Warn(s)
	} else if this.on {
		s := fmt.Sprintf("T=%s Q=%s X{%s} "+format, elapsed, name, this.contextInfo(ctx), args...)
		log.Debug(s)
	}
}
Esempio n. 24
0
func (this *Client) Warmup() {
	t1 := time.Now()
	for poolName, pool := range this.conns {
		for addr, conn := range pool {
			log.Debug("redis pool[%s] connecting: %s", poolName, addr)
			for i := 0; i < this.cf.Servers[poolName][addr].MaxActive; i++ {
				c := conn.Get()
				if c.Err() != nil {
					log.Error("redis[%s][%s]: %v", poolName, addr, c.Err())
					continue
				}

				c.Do("PING")
				defer c.Close()
			}
		}
	}
	log.Debug("Redis warmup within %s: %+v",
		time.Since(t1), this.selectors)
}
Esempio n. 25
0
func (cg *ConsumerGroup) Close() error {
	shutdownError := AlreadyClosing
	cg.singleShutdown.Do(func() {
		log.Debug("[%s/%s] closing...", cg.group.Name, cg.shortID())

		shutdownError = nil

		close(cg.stopper) // notify all sub-goroutines to stop
		cg.wg.Wait()      // await cg.consumeTopics() done

		if err := cg.offsetManager.Close(); err != nil {
			// e,g. Not all offsets were committed before shutdown was completed
			log.Error("[%s/%s] closing offset manager: %s", cg.group.Name, cg.shortID(), err)
		}

		if shutdownError = cg.instance.Deregister(); shutdownError != nil {
			log.Error("[%s/%s] de-register cg instance: %s", cg.group.Name, cg.shortID(), shutdownError)
		} else {
			log.Debug("[%s/%s] de-registered cg instance", cg.group.Name, cg.shortID())
		}

		if cg.consumer != nil {
			if shutdownError = cg.consumer.Close(); shutdownError != nil {
				log.Error("[%s/%s] closing Sarama consumer: %v", cg.group.Name, cg.shortID(), shutdownError)
			}
		}

		close(cg.messages)
		close(cg.errors)

		log.Debug("[%s/%s] closed", cg.group.Name, cg.shortID())

		cg.instance = nil
		cg.kazoo.Close()
		if cg.cacher != nil {
			// nothing? TODO gc it quickly
		}
	})

	return shutdownError
}
Esempio n. 26
0
func (this *ConfigMemcacheServer) loadConfig(section *conf.Conf) {
	this.host = section.String("host", "")
	if this.host == "" {
		panic("Empty memcache server host")
	}
	this.hort = section.String("port", "")
	if this.hort == "" {
		panic("Empty memcache server port")
	}

	log.Debug("memcache server: %+v", *this)
}
Esempio n. 27
0
func (this *Peer) refreshNeighbor(ip string) {
	this.Lock()
	defer this.Unlock()

	if _, present := this.neighbors[ip]; !present {
		log.Info("Peer[%s] joined", ip)
		this.picker.AddPeer(ip)
	}

	this.neighbors[ip] = time.Now()

	log.Debug("Neighbors: %+v", this.neighbors)
}
Esempio n. 28
0
func (this *configRpc) loadConfig(section *conf.Conf) {
	this.listenAddr = section.String("listen_addr", "")
	if this.listenAddr == "" {
		panic("Empty listen_addr")
	}

	this.clientSlowThreshold = section.Float("client_slow_threshold", 5)
	this.callSlowThreshold = section.Float("call_slow_threshold", 5)
	this.clientTimeout = time.Duration(section.Int("client_timeout", 0)) * time.Second
	this.framed = section.Bool("framed", false)
	this.protocol = section.String("protocol", "binary")

	log.Debug("rpc: %+v", *this)
}
Esempio n. 29
0
func (this *mysql) Query(query string, args ...interface{}) (rows *sql.Rows,
	err error) {
	if this.db == nil {
		return nil, ErrNotOpen
	}
	if this.breaker.Open() {
		return nil, ErrCircuitOpen
	}

	var stmt *sql.Stmt = nil
	if this.stmtsStore != nil {
		if stmtc, present := this.stmtsStore.Get(query); present {
			stmt = stmtc.(*sql.Stmt)
		} else {
			// FIXME thundering hurd
			stmt, err = this.db.Prepare(query)
			if err != nil {
				if this.isSystemError(err) {
					log.Warn("mysql prepare breaks: %s", err.Error())
					this.breaker.Fail()
				}

				return nil, err
			}

			this.mutex.Lock()
			this.stmtsStore.Set(query, stmt)
			this.mutex.Unlock()
			log.Debug("[%s] stmt[%s] open", this.dsn, query)
		}
	}

	// Under the hood, db.Query() actually prepares, executes, and closes
	// a prepared statement. That's three round-trips to the database.
	if stmt != nil {
		rows, err = stmt.Query(args...)
	} else {
		rows, err = this.db.Query(query, args...)
	}
	if err != nil {
		if this.isSystemError(err) {
			log.Warn("mysql query breaks: %s", err.Error())
			this.breaker.Fail()
		}
	} else {
		this.breaker.Succeed()
	}

	return
}
Esempio n. 30
0
func (l *fastListener) Accept() (net.Conn, error) {
	c, err := l.Listener.Accept()
	if err != nil {
		return nil, err
	}

	log.Debug("%s new conn from %s", l.Listener.Addr(), c.RemoteAddr())
	if l.gw != nil && !Options.DisableMetrics {
		l.gw.svrMetrics.TotalConns.Inc(1)
		l.gw.svrMetrics.ConcurrentConns.Inc(1)
	}

	return &fastListenerConn{c, l.gw}, nil
}