Ejemplo n.º 1
0
func SubCommonMsg() error {
	r := Redix[_SubCommonMsg]
	RedixMu[_SubCommonMsg].Lock()
	defer RedixMu[_SubCommonMsg].Unlock()

	psc := redis.PubSubConn{Conn: r}
	err := psc.PSubscribe(SubCommonMsgKey)
	if err != nil {
		return err
	}
	ch := make(chan redis.PMessage, 128)
	go func() {
		defer psc.Close()
		for {
			data := psc.Receive()
			switch m := data.(type) {
			case redis.PMessage:
				ch <- m
			case redis.Subscription:
				if m.Count == 0 {
					glog.Fatalf("Subscription: %s %s %d, %v\n", m.Kind, m.Channel, m.Count, m)
					return
				}
			case error:
				glog.Errorf("[modifypwd|redis] sub of error: %v\n", m)
				return
			}
		}
	}()
	go HandleCommonMsg(ch)
	return nil
}
Ejemplo n.º 2
0
// respondToPings continuously listens for pings from other worker pools and
// immediately responds with a pong. It will only return if there is an error.
func (p *Pool) respondToPings() error {
	pong := redisPool.Get()
	ping := redis.PubSubConn{redisPool.Get()}
	defer func() {
		pong.Close()
		ping.Close()
	}()
	// Subscribe to the ping key for this pool to receive pings.
	if err := ping.Subscribe(p.pingKey()); err != nil {
		return err
	}
	for {
		// Whenever we recieve a ping, reply immediately with a pong by
		// publishing to the pong key for this pool.
		switch reply := ping.Receive().(type) {
		case redis.Message:
			if _, err := pong.Do("PUBLISH", p.pongKey(), 0); err != nil {
				return err
			}
		case error:
			err := reply.(error)
			return err
		}
		time.Sleep(1 * time.Millisecond)
	}
}
Ejemplo n.º 3
0
func SubModifiedPasswd() error {
	r := Redix[_SubModifiedPasswd]
	RedixMu[_SubModifiedPasswd].Lock()
	defer RedixMu[_SubModifiedPasswd].Unlock()

	psc := redis.PubSubConn{Conn: r}
	err := psc.Subscribe(SubModifiedPasswdKey)
	if err != nil {
		return err
	}
	ch := make(chan []byte, 128)
	go func() {
		defer psc.Close()
		for {
			data := psc.Receive()
			switch n := data.(type) {
			case redis.Message:
				ch <- n.Data
			case redis.Subscription:
				if n.Count == 0 {
					glog.Fatalf("Subscription: %s %s %d, %v\n", n.Kind, n.Channel, n.Count, n)
					return
				}
			case error:
				glog.Errorf("[modifypwd|redis] sub of error: %v\n", n)
				return
			}
		}
	}()
	go HandleModifiedPasswd(ch)
	return nil
}
Ejemplo n.º 4
0
// pingAndPurgeIfNeeded pings other by publishing to others ping key. If it
// does not receive a pong reply within some amount of time, it will
// assume the pool is stale and purge it.
func (p *Pool) pingAndPurgeIfNeeded(other *Pool) error {
	ping := redisPool.Get()
	pong := redis.PubSubConn{redisPool.Get()}
	// Listen for pongs by subscribing to the other pool's pong key
	pong.Subscribe(other.pongKey())
	// Ping the other pool by publishing to its ping key
	ping.Do("PUBLISH", other.pingKey(), 1)
	// Use a select statement to either receive the pong or timeout
	pongChan := make(chan interface{})
	errChan := make(chan error)
	go func() {
		defer func() {
			pong.Close()
			ping.Close()
		}()
		select {
		case <-p.exit:
			return
		default:
		}
		for {
			reply := pong.Receive()
			switch reply.(type) {
			case redis.Message:
				// The pong was received
				pongChan <- reply
				return
			case error:
				// There was some unexpected error
				err := reply.(error)
				errChan <- err
				return
			}
		}
	}()
	timeout := time.After(p.config.StaleTimeout)
	select {
	case <-pongChan:
		// The other pool responded with a pong
		return nil
	case err := <-errChan:
		// Received an error from the pubsub conn
		return err
	case <-timeout:
		// The pool is considered stale and should be purged
		t := newTransaction()
		other.RLock()
		otherId := other.id
		other.RUnlock()
		t.purgeStalePool(otherId)
		if err := t.exec(); err != nil {
			return err
		}
	}
	return nil
}
Ejemplo n.º 5
0
func RedisSub(key string) (chan interface{}, redis.PubSubConn, error) {
	mq := make(chan interface{}, Conf.RedisMQSize)
	c := redisPool.Get()
	defer c.Close()
	pc, err := redis.Dial(Conf.RedisNetwork, Conf.RedisAddr)
	if err != nil {
		Log.Printf("redis.Dial(\"%s\", \"%s\") failed (%s)", Conf.RedisNetwork, Conf.RedisAddr, err.Error())
		return nil, redis.PubSubConn{}, err
	}

	psc := redis.PubSubConn{pc}
	// check queue
	err = redisQueue(c, key, mq)
	if err != nil {
		Log.Printf("redisQueue failed (%s)", err.Error())
		return nil, redis.PubSubConn{}, err
	}
	// subscribe
	psc.Subscribe(key)
	if _, ok := psc.Receive().(redis.Subscription); !ok {
		Log.Printf("init sub must redis.Subscription")
		return nil, redis.PubSubConn{}, fmt.Errorf("first sub must init")
	}
	// double check
	err = redisQueue(c, key, mq)
	if err != nil {
		Log.Printf("redisQueue failed (%s)", err.Error())
		return nil, redis.PubSubConn{}, err
	}

	go func() {
		// DEBUG
		Log.Printf("redis routine start")
		// DEBUG
		defer Log.Printf("redis routine exit")
		defer psc.Close()
		for {
			switch n := psc.Receive().(type) {
			case redis.Message:
				mq <- string(n.Data)
			case redis.PMessage:
				mq <- string(n.Data)
			case redis.Subscription:
				// DEBUG
				Log.Printf("redis UnSubscrption")
				return
			case error:
				Log.Printf("psc.Receive() failed (%s)", n.Error())
				mq <- n
				return
			}
		}
	}()

	return mq, psc, nil
}
Ejemplo n.º 6
0
func (c *Cache) updateEvents() {
	var disconnected bool = false
connect:
	for {
		rconn, err := redis.DialTimeout("tcp", GetConfig().RedisAddress, redisConnectionTimeout, 0, 200*time.Millisecond)
		if err != nil {
			disconnected = true
			time.Sleep(50 * time.Millisecond)
			continue
		}
		if _, err = rconn.Do("PING"); err != nil {
			// Doing a PING after (re-connection) prevents cases where redis
			// is currently loading the dataset and is still not ready.
			// "LOADING Redis is loading the dataset in memory"
			rconn.Close()
			log.Info("Redis is loading the dataset in memory")
			time.Sleep(500 * time.Millisecond)
			continue
		}
		log.Info("Redis connected, subscribing pubsub.")
		psc := redis.PubSubConn{Conn: rconn}
		psc.Subscribe(FILE_UPDATE)
		psc.Subscribe(MIRROR_UPDATE)
		psc.Subscribe(MIRROR_FILE_UPDATE)
		if disconnected == true {
			// This is a way to keep the cache active while disconnected
			// from redis but still clear the cache (possibly outdated)
			// after a successful reconnection.
			disconnected = false
			c.Clear()
		}
		for {
			switch v := psc.Receive().(type) {
			case redis.Message:
				//if os.Getenv("DEBUG") != "" {
				//	fmt.Printf("Redis message on channel %s: message: %s\n", v.Channel, v.Data)
				//}
				c.handleMessage(v.Channel, v.Data)
			case redis.Subscription:
				if os.Getenv("DEBUG") != "" {
					log.Debug("Redis subscription event on channel %s: %s %d\n", v.Channel, v.Kind, v.Count)
				}
			case error:
				log.Error("UpdateEvents error: %s", v)
				psc.Close()
				rconn.Close()
				time.Sleep(50 * time.Millisecond)
				disconnected = true
				goto connect
			}
		}
	}
}
Ejemplo n.º 7
0
func (r *RedisBackend) subscribeChannel(key string, msgs chan string) {
	var wg sync.WaitGroup

	redisPool := redis.Pool{
		MaxIdle:     1,
		IdleTimeout: 0,
		Dial: func() (redis.Conn, error) {
			return redis.DialTimeout("tcp", r.RedisHost, time.Second, 0, 0)
		},
		// test every connection for now
		TestOnBorrow: r.testOnBorrow,
	}

	for {
		conn := redisPool.Get()
		// no defer, doesn't return
		if err := conn.Err(); err != nil {
			conn.Close()
			log.Printf("ERROR: %v\n", err)
			time.Sleep(5 * time.Second)
			continue
		}

		wg.Add(1)
		psc := redis.PubSubConn{Conn: conn}
		go func() {
			defer wg.Done()
			for {
				switch n := psc.Receive().(type) {
				case redis.Message:
					msg := string(n.Data)
					msgs <- msg
				case error:
					psc.Close()
					log.Printf("ERROR: %v\n", n)
					return
				}
			}
		}()

		wg.Add(1)
		go func() {
			defer wg.Done()
			psc.Subscribe(key)
			log.Printf("Monitoring for config changes on channel: %s\n", key)
		}()
		wg.Wait()
		conn.Close()
	}
}
Ejemplo n.º 8
0
func (p *Pubsub) updateEvents() {
	var disconnected bool = false
connect:
	for {
		rconn := p.r.pool.Get()
		if _, err := rconn.Do("PING"); err != nil {
			disconnected = true
			rconn.Close()
			if RedisIsLoading(err) {
				// Doing a PING after (re-connection) prevents cases where redis
				// is currently loading the dataset and is still not ready.
				log.Warning("Redis is still loading the dataset in memory")
			}
			time.Sleep(500 * time.Millisecond)
			continue
		}
		log.Info("Subscribing pubsub")
		psc := redis.PubSubConn{Conn: rconn}

		psc.Subscribe(CLUSTER)
		psc.Subscribe(FILE_UPDATE)
		psc.Subscribe(MIRROR_UPDATE)
		psc.Subscribe(MIRROR_FILE_UPDATE)

		if disconnected == true {
			// This is a way to keep the cache active while disconnected
			// from redis but still clear the cache (possibly outdated)
			// after a successful reconnection.
			disconnected = false
			p.handleMessage(string(PUBSUB_RECONNECTED), nil)
		}
		for {
			switch v := psc.Receive().(type) {
			case redis.Message:
				//log.Debug("Redis message on channel %s: message: %s", v.Channel, v.Data)
				p.handleMessage(v.Channel, v.Data)
			case redis.Subscription:
				log.Debug("Redis subscription on channel %s: %s (%d)", v.Channel, v.Kind, v.Count)
			case error:
				log.Error("Pubsub disconnected: %s", v)
				psc.Close()
				rconn.Close()
				time.Sleep(50 * time.Millisecond)
				disconnected = true
				goto connect
			}
		}
	}
}
Ejemplo n.º 9
0
func (s *Socket) ListenToRedis() {
	rConn := redis.PubSubConn{Conn: RedisPool.Get()}
	defer rConn.Close()

	rConn.Subscribe(s.redisChannels()...)

	var (
		message *Message
		err     error
	)

	for {
		switch event := rConn.Receive().(type) {
		case redis.Message:
			err = json.Unmarshal(event.Data, &message)

			if err != nil {
				s.logMsg("[SECURITY] Redis message isn't JSON: %s", event.Data)
				continue
			}

			switch message.Event {
			case "message":
				if message.IssuerID == s.ID {
					// Message was sent by this connection, ignore.
					continue
				}

				s.logMsg("Received message from redis on '%s'", message.Channel)
				websocket.JSON.Send(s.ws, &message)
			case "close":
				if message.IssuerID == s.ID {
					rConn.PUnsubscribe(s.redisChannels()...)
					break
				}
			}
		case error:
			rConn.Close()
			rConn = redis.PubSubConn{Conn: RedisPool.Get()}
			rConn.Subscribe(s.redisChannels()...)
		}
	}
}
Ejemplo n.º 10
0
func SubUserState() (<-chan []byte, error) {
	r := Redix[_SubLoginState].Get()

	psc := redis.PubSubConn{Conn: r}
	psc.Subscribe(SubKey)
	ch := make(chan []byte, 128)

	// 单独处理Subscribe后的第一个消息(即返回值),用于保证在已监听到SubKey后,
	// 本函数返回,主函数继续载入已有的设备在线列表。用于避免Msgbus和Comet都刚刚
	// 启动时,Comet删除上次的旧设备列表和立刻登录的新设备之间的时间窗口中,Msgbus
	// 由于异步监听SubKey,导致先载入了旧的列表,后监听到SubKey,导致遗漏了Comet
	// 发出的删除旧列表通知。
	data := psc.Receive()
	switch n := data.(type) {
	case redis.Subscription:
		if n.Count == 0 {
			glog.Fatalf("Subscription: %s %s %d, %v\n", n.Kind, n.Channel, n.Count, n)
		}
	case error:
		glog.Errorf("subscribe on LoginState error: %v\n", n)
		return nil, n
	}

	go func() {
		defer psc.Close()
		for {
			data := psc.Receive()
			switch n := data.(type) {
			case redis.Message:
				ch <- n.Data
				//if glog.V(1) {
				//	glog.Infof("Message: %s %s\n", n.Channel, n.Data)
				//}
			case error:
				glog.Errorf("error: %v\n", n)
				return
			}
		}
	}()
	return ch, nil
}
Ejemplo n.º 11
0
// GetCerts gets a list of certs from the database, or another cluster member.
func (r *Redis) GetCerts() ([]core.CertBundle, error) {
	if database.CentralStore {
		return database.GetCerts()
	}

	conn := pool.Get()
	defer conn.Close()

	// get known members(other than me) to 'poll' for certs
	members, _ := redis.Strings(conn.Do("SMEMBERS", "members"))
	if len(members) == 0 {
		// should only happen on new cluster
		// assume i'm ok to be master so don't reset imported certs
		config.Log.Trace("[cluster] - Assuming OK to be master, using certs from my database...")
		return common.GetCerts()
	}
	for i := range members {
		if members[i] == self {
			// if i'm in the list of members, new requests should have failed while `waitForMembers`ing
			config.Log.Trace("[cluster] - Assuming I was in sync, using certs from my database...")
			return common.GetCerts()
		}
	}

	c, err := redis.DialURL(config.ClusterConnection, redis.DialConnectTimeout(15*time.Second), redis.DialPassword(config.ClusterToken))
	if err != nil {
		return nil, fmt.Errorf("Failed to reach redis for certs subscriber - %v", err)
	}
	defer c.Close()

	message := make(chan interface{})
	subconn := redis.PubSubConn{c}

	// subscribe to channel that certs will be published on
	if err := subconn.Subscribe("certs"); err != nil {
		return nil, fmt.Errorf("Failed to reach redis for certs subscriber - %v", err)
	}
	defer subconn.Close()

	// listen always
	go func() {
		for {
			message <- subconn.Receive()
		}
	}()

	// todo: maybe use ttl?
	// timeout is how long to wait for the listed members to come back online
	timeout := time.After(time.Duration(20) * time.Second)

	// loop attempts for timeout, allows last dead members to start back up
	for {
		select {
		case <-timeout:
			return nil, fmt.Errorf("Timed out waiting for certs from %v", strings.Join(members, ", "))
		default:
			// request certs from each member until successful
			for _, member := range members {
				// memberTimeout is how long to wait for a member to respond with list of certs
				memberTimeout := time.After(3 * time.Second)

				// ask a member for its certs
				config.Log.Trace("[cluster] - Attempting to request certs from %v...", member)
				_, err := conn.Do("PUBLISH", "portal", fmt.Sprintf("get-certs %s", member))
				if err != nil {
					return nil, err
				}

				// wait for member to respond
				for {
					select {
					case <-memberTimeout:
						config.Log.Debug("[cluster] - Timed out waiting for certs from %v", member)
						goto nextCertMember
					case msg := <-message:
						switch v := msg.(type) {
						case redis.Message:
							config.Log.Trace("[cluster] - Received message on 'certs' channel")
							var certs []core.CertBundle
							err = parseBody(v.Data, &certs)
							if err != nil {
								return nil, fmt.Errorf("Failed to marshal certs - %v", err.Error())
							}
							config.Log.Trace("[cluster] - Certs from cluster: %#v\n", certs)
							return certs, nil
						case error:
							return nil, fmt.Errorf("Subscriber failed to receive certs - %v", v.Error())
						}
					}
				}
			nextCertMember:
			}
		}
	}
}