Esempio n. 1
0
func NewTransaction(pool *redis.Pool) *Transaction {
	t := &Transaction{
		conn:    pool.Get(),
		Actions: make([]*Action, 0),
	}
	return t
}
Esempio n. 2
0
// Expired implements the `func Expired` defined on the Strategy interface. It
// scans iteratively over the Heart's `location` field to look for items that
// have expired. An item is marked as expired iff the last update time happened
// before the instant of the maxAge subtracted from the current time.
func (s HashExpireyStrategy) Expired(location string,
	pool *redis.Pool) (expired []string, err error) {

	now := time.Now()

	cnx := pool.Get()
	defer cnx.Close()

	reply, err := redis.StringMap(cnx.Do("HGETALL", location))
	if err != nil {
		return
	}

	for id, tick := range reply {
		lastUpdate, err := time.Parse(DefaultTimeFormat, tick)

		if err != nil {
			continue
		} else if lastUpdate.Add(s.MaxAge).Before(now) {
			expired = append(expired, id)
		}
	}

	return
}
Esempio n. 3
0
func getProcedure(pool *redis.Pool, endpoint URI) *registrationInfo {
	conn := pool.Get()
	defer conn.Close()

	proceduresKey := fmt.Sprintf("procedures:%s", endpoint)
	procedures, err := redis.Strings(conn.Do("ZREVRANGE", proceduresKey, 0, 1))
	if err != nil {
		out.Debug("Redis error on key %s: %v", proceduresKey, err)
		return nil
	}

	if len(procedures) == 0 {
		return nil
	}

	info := registrationInfo{}

	reply, err := redis.Values(conn.Do("HGETALL", procedures[0]))
	if err != nil {
		out.Debug("Redis error on key %s: %v", procedures[0], err)
		return nil
	}

	err = redis.ScanStruct(reply, &info)
	if err != nil {
		out.Debug("Redis error on key %s: %v", procedures[0], err)
		return nil
	}

	return &info
}
Esempio n. 4
0
// Fetch Txos and Txins
func (tx *Tx) Build(rpool *redis.Pool) (err error) {
	c := rpool.Get()
	defer c.Close()
	tx.TxIns = []*TxIn{}
	tx.TxOuts = []*TxOut{}
	txinskeys := []interface{}{}
	for i := range iter.N(int(tx.TxInCnt)) {
		txinskeys = append(txinskeys, fmt.Sprintf("txi:%v:%v", tx.Hash, i))
	}
	txinsjson, _ := redis.Strings(c.Do("MGET", txinskeys...))
	for _, txinjson := range txinsjson {
		ctxi := new(TxIn)
		err = json.Unmarshal([]byte(txinjson), ctxi)
		tx.TxIns = append(tx.TxIns, ctxi)
	}
	txoutskeys := []interface{}{}
	txoutsspentkeys := []interface{}{}
	for i := range iter.N(int(tx.TxOutCnt)) {
		txoutskeys = append(txoutskeys, fmt.Sprintf("txo:%v:%v", tx.Hash, i))
		txoutsspentkeys = append(txoutsspentkeys, fmt.Sprintf("txo:%v:%v:spent", tx.Hash, i))
	}
	txoutsjson, _ := redis.Strings(c.Do("MGET", txoutskeys...))
	txoutsspentjson, _ := redis.Strings(c.Do("MGET", txoutsspentkeys...))
	for txoindex, txoutjson := range txoutsjson {
		ctxo := new(TxOut)
		err = json.Unmarshal([]byte(txoutjson), ctxo)
		if txoutsspentjson[txoindex] != "" {
			cspent := new(TxoSpent)
			err = json.Unmarshal([]byte(txoutsspentjson[txoindex]), cspent)
			ctxo.Spent = cspent
		}
		tx.TxOuts = append(tx.TxOuts, ctxo)
	}
	return
}
func (this *testRedis) Ping(pool *redis.Pool) error {
	connection := pool.Get()
	defer connection.Close()

	_, err := connection.Do("PING")
	return err
}
Esempio n. 6
0
func Deq(pool *redis.Pool, latch *utee.Throttle, uid interface{}) ([]byte, error) {
	c := pool.Get()
	defer c.Close()
	defer latch.Release()
	for {
		name := qname(uid)
		k, err := redis.String(c.Do("LPOP", name))
		if err != nil && err != redis.ErrNil {
			continue
		}

		if len(k) == 0 {
			break
		}
		b, err := redis.Bytes(c.Do("GET", k))
		if err != nil && err != redis.ErrNil {
			continue
		}
		if b != nil {
			c.Send("DEL", k)
			continue
		}
	}
	i++
	if i%10000 == 0 {
		log.Println("@success:", i)
	}
	return nil, nil
}
Esempio n. 7
0
// AddTXPayloadToQueue adds the given TXPayload to the queue.
func AddTXPayloadToQueue(p *redis.Pool, payload models.TXPayload) error {
	var buf bytes.Buffer
	enc := gob.NewEncoder(&buf)
	if err := enc.Encode(payload); err != nil {
		return fmt.Errorf("encode tx-payload for node %s error: %s", payload.DevEUI, err)
	}

	c := p.Get()
	defer c.Close()

	exp := int64(common.NodeTXPayloadQueueTTL) / int64(time.Millisecond)
	key := fmt.Sprintf(nodeTXPayloadQueueTempl, payload.DevEUI)

	c.Send("MULTI")
	c.Send("LPUSH", key, buf.Bytes())
	c.Send("PEXPIRE", key, exp)
	_, err := c.Do("EXEC")

	if err != nil {
		return fmt.Errorf("add tx-payload to queue for node %s error: %s", payload.DevEUI, err)
	}

	log.WithFields(log.Fields{
		"dev_eui":   payload.DevEUI,
		"reference": payload.Reference,
	}).Info("tx-payload added to queue")
	return nil
}
Esempio n. 8
0
func ReclaimSessionID(pool *redis.Pool, sessionID ID, authid string, domain string) error {
	conn := pool.Get()
	defer conn.Close()

	sessionKey := fmt.Sprintf("session:%x", sessionID)

	// First, try to claim the session ID.  This tells us if it exists or
	// safely reserves it if it does not.
	reply, err := redis.Int(conn.Do("HSETNX", sessionKey, "domain", domain))
	if err != nil {
		out.Debug("Redis error on key %s: %v", sessionKey, err)
		return err
	} else if reply == 1 {
		// It did not exist before, but now he owns it.
		return nil
	}

	prevDomain, err := redis.String(conn.Do("HGET", sessionKey, "domain"))
	if err != nil {
		out.Debug("Redis error on key %s: %v", sessionKey, err)
		return err
	}

	// Ensure that the new agent owns the claimed session ID.
	if subdomain(authid, prevDomain) {
		return nil
	} else {
		return fmt.Errorf("Permission denied: %s cannot claim %s", authid, sessionKey)
	}
}
Esempio n. 9
0
// MustFlushRedis flushes the Redis storage.
func MustFlushRedis(p *redis.Pool) {
	c := p.Get()
	defer c.Close()
	if _, err := c.Do("FLUSHALL"); err != nil {
		log.Fatal(err)
	}
}
Esempio n. 10
0
// ReadMACPayloadTXQueue reads the full MACPayload tx queue for the given
// device address.
func ReadMACPayloadTXQueue(p *redis.Pool, devAddr lorawan.DevAddr) ([]models.MACPayload, error) {
	var out []models.MACPayload

	c := p.Get()
	defer c.Close()

	key := fmt.Sprintf(nodeSessionMACTXQueueTempl, devAddr)
	values, err := redis.Values(c.Do("LRANGE", key, 0, -1))
	if err != nil {
		return nil, fmt.Errorf("get mac-payload from tx queue for devaddr %s error: %s", devAddr, err)
	}

	for _, value := range values {
		b, ok := value.([]byte)
		if !ok {
			return nil, fmt.Errorf("expected []byte type, got %T", value)
		}

		var pl models.MACPayload
		err = gob.NewDecoder(bytes.NewReader(b)).Decode(&pl)
		if err != nil {
			return nil, fmt.Errorf("decode mac-payload for devaddr %s error: %s", devAddr, err)
		}
		out = append(out, pl)
	}
	return out, nil
}
Esempio n. 11
0
func deliver(pool *redis.Pool, key string, data map[string]string) (err error) {
	hkey, err := RandomKey(GOOSE_REDIS_REQ_PREFIX, 16)
	if err != nil {
		return
	}

	conn := pool.Get()
	defer conn.Close()

	// Record the request/match.
	for field, val := range data {
		if err = conn.Send("HSET", hkey, field, val); err != nil {
			return
		}
	}

	// Notify any processes blocking on the associated list.
	if err = conn.Send("LPUSH", key, hkey); err != nil {
		return
	}

	// Flush the pipeline.
	if err = conn.Flush(); err != nil {
		return
	}

	// Read all of the replies, but just drop them on the ground.
	for i := 0; i < len(data)+1; i += 1 {
		if _, err = conn.Receive(); err != nil {
			return
		}
	}

	return
}
Esempio n. 12
0
// Redis DB Pool
func NewRDBpool(address string) *RDBpool {
	pool := redis.Pool{
		MaxActive: 0,
		MaxIdle:   3,
		Dial: func() (redis.Conn, error) {
			c, err := redis.DialTimeout(
				"tcp",
				address,
				time.Duration(1)*time.Second,
				time.Duration(1)*time.Second,
				time.Duration(1)*time.Second,
			)
			if err != nil {
				return nil, err
			}

			return c, err
		},
	}

	conn := pool.Get()
	defer conn.Close()
	if conn.Err() != nil {
		panic(fmt.Sprintf("Can not connect to redis %s", address))
	}

	return &RDBpool{pool: pool}
}
Esempio n. 13
0
func BenchmarkRedisPool(b *testing.B) {
	b.StopTimer()
	p := redis.Pool{
		Dial: func() (redis.Conn, error) {
			//			c, err := redis.Dial("tcp", cfg.RedisAddr())
			c, err := redis.Dial("tcp", ":6379")
			if err != nil {
				return nil, err
			}
			// 选择db
			c.Do("SELECT", 11)
			return c, nil
		},

		MaxIdle:   30,
		MaxActive: 30}
	c := p.Get()
	if err := c.Err(); err != nil {
		b.Fatal(err)
	}
	c.Close()
	defer p.Close()
	//	c = p.Get()
	b.StartTimer()
	for i := 0; i < b.N; i++ {
		c = p.Get()
		if _, err := c.Do("PING"); err != nil {
			b.Fatal(err)
		}
		c.Close()
	}
	b.StopTimer()
	c.Close()

}
Esempio n. 14
0
// DeleteMACPayloadFromTXQueue deletes the given MACPayload from the tx queue
// of the given device address.
func DeleteMACPayloadFromTXQueue(p *redis.Pool, devAddr lorawan.DevAddr, pl models.MACPayload) error {
	var buf bytes.Buffer
	enc := gob.NewEncoder(&buf)
	if err := enc.Encode(pl); err != nil {
		return fmt.Errorf("gob encode tx mac-payload for node %s error: %s", pl.DevEUI, err)
	}

	c := p.Get()
	defer c.Close()

	key := fmt.Sprintf(nodeSessionMACTXQueueTempl, devAddr)
	val, err := redis.Int(c.Do("LREM", key, 0, buf.Bytes()))
	if err != nil {
		return fmt.Errorf("delete mac-payload from tx queue for devaddr %s error: %s", devAddr, err)
	}

	if val == 0 {
		return fmt.Errorf("mac-payload with reference '%s' is not in tx queue for devaddr %s", pl.Reference, devAddr)
	}

	log.WithFields(log.Fields{
		"dev_eui":   pl.DevEUI,
		"dev_addr":  devAddr,
		"reference": pl.Reference,
	}).Info("mac-payload removed from tx queue")
	return nil
}
Esempio n. 15
0
func instanceIsMaster(pool *redis.Pool, port string) {
	c := pool.Get()
	defer c.Close()

	for {
		master, err := redis.StringMap(c.Do("CONFIG", "GET", "slaveof"))
		if err != nil {
			// Retry connection to Redis until it is back.
			//log.Println(err)
			defer c.Close()
			time.Sleep(time.Second * time.Duration(connectionLostInterval))
			c = pool.Get()
			continue
		}
		for _, value := range master {
			if value != "" {
				// Instance is now a slave, notify.
				if fetchPossible[port] {
					c.Do("PUBLISH", "redis-scouter", "stop")
					fetchPossible[port] = false
					log.Printf("[instance-check-%s] became a slave", port)
				}
			} else {
				// Re-enable metrics.
				if !fetchPossible[port] {
					fetchPossible[port] = true
					log.Printf("[instance-check-%s] became a master", port)
				}
			}
		}
		time.Sleep(time.Second * time.Duration(masterCheckInterval))
	}
}
Esempio n. 16
0
func ProcessNewBlock(conf *Config, rpool *redis.Pool, spool *redis.Pool) {
	log.Println("ProcessNewBlock startup")
	conn := rpool.Get()
	defer conn.Close()
	psc := redis.PubSubConn{Conn: conn}
	psc.Subscribe("btcplex:blocknotify")
	for {
		switch v := psc.Receive().(type) {
		case redis.Message:
			hash := string(v.Data)
			log.Printf("Processing new block: %v\n", hash)
			c := rpool.Get()
			newblock, err := SaveBlockFromRPC(conf, spool, hash)
			if err != nil {
				log.Printf("Error processing new block: %v\n", err)
			} else {
				// Once the block is processed, we can publish it as btcplex own blocknotify
				c.Do("PUBLISH", "btcplex:blocknotify2", hash)
				newblockjson, _ := json.Marshal(newblock)
				c.Do("PUBLISH", "btcplex:newblock", string(newblockjson))
			}
			c.Close()
		}
	}
}
Esempio n. 17
0
// AddMACPayloadToTXQueue adds the given payload to the queue of MAC commands
// to send to the node. Note that the queue is bound to the node-session, since
// all mac operations are reset after a re-join of the node.
func AddMACPayloadToTXQueue(p *redis.Pool, pl models.MACPayload) error {
	var buf bytes.Buffer
	enc := gob.NewEncoder(&buf)
	if err := enc.Encode(pl); err != nil {
		return fmt.Errorf("gob encode tx mac-payload for node %s error: %s", pl.DevEUI, err)
	}

	c := p.Get()
	defer c.Close()

	ns, err := GetNodeSessionByDevEUI(p, pl.DevEUI)
	if err != nil {
		return fmt.Errorf("get node-session for node %s error: %s", pl.DevEUI, err)
	}

	exp := int64(common.NodeSessionTTL) / int64(time.Millisecond)
	key := fmt.Sprintf(nodeSessionMACTXQueueTempl, ns.DevAddr)

	c.Send("MULTI")
	c.Send("RPUSH", key, buf.Bytes())
	c.Send("PEXPIRE", key, exp)
	_, err = c.Do("EXEC")

	if err != nil {
		return fmt.Errorf("add mac-payload to tx queue for node %s error: %s", pl.DevEUI, err)
	}
	log.WithFields(log.Fields{
		"dev_eui":   pl.DevEUI,
		"dev_addr":  ns.DevAddr,
		"reference": pl.Reference,
	}).Info("mac-payload added to tx queue")
	return nil
}
Esempio n. 18
0
func StoreSessionDetails(pool *redis.Pool, session *Session, details map[string]interface{}) {
	conn := pool.Get()
	defer conn.Close()

	sessionKey := fmt.Sprintf("session:%x", session.Id)

	if !session.canFreeze {
		// Track sessions that cannot be frozen (ordinary ones) so that the
		// node can clear them out when it restarts.
		_, err := conn.Do("SADD", "transient_sessionids", int64(session.Id))
		if err != nil {
			out.Debug("Redis error on key transient_sessionids: %v", err)
		}
	}

	guardian, ok := details["guardianDomain"].(string)
	if ok && guardian != "" {
		endpoint := guardian + "/thaw"
		_, err := conn.Do("HSET", sessionKey, "thawEndpoint", endpoint)
		if err != nil {
			out.Debug("Redis error on key %s: %v", sessionKey, err)
		}
	}

	id, ok := details["guardianID"].(string)
	if ok && id != "" {
		_, err := conn.Do("HSET", sessionKey, "thawID", id)
		if err != nil {
			out.Debug("Redis error on key %s: %v", sessionKey, err)
		}
	}
}
Esempio n. 19
0
func updateServerLoad(pool *redis.Pool, serverID string) {
	tock := time.Duration((common.SERVER_UPDATE_LOAD_SECONDS * 0.95) * float64(time.Second))
	c := time.Tick(tock) //want it to run SLIGHTLY before the key expires
	conn := pool.Get()

	for _ = range c { //inf for loop
		fmt.Println("Setting server load...")

		if _, err := conn.Do("PEXPIRE", "ASMS::server:"+serverID+"::load:", common.SERVER_UPDATE_LOAD_SECONDS*1000); err != nil {
			panic(err)
		} //end if

		numberOfDevices, err := redis.Float64(conn.Do("LLEN", "ASMS::server:"+serverID+"::deviceIDs:"))
		if err != nil {
			panic(err) //could run into an issue here as this might not be set. Might need to set it to zero.
		} // no issue it seems, redis returns a 0 if the list key doesnt exist.

		currentLoad := numberOfDevices * common.MAX_DEVICES_LOAD_DENOMINATOR // % out of 100

		//current load
		if _, err := conn.Do("SET", "ASMS::server:"+serverID+"::load:", currentLoad); err != nil {
			panic(err)
		} //end if

		fmt.Println("Done setting server load...")
	} //end for loop

} //end UpdateServerLoad
Esempio n. 20
0
func getHash(pool *redis.Pool, id string) map[string]int {
	c := pool.Get()
	defer c.Close()
	m, err := redis.IntMap(c.Do("HGETALL", id))
	utee.Chk(err)
	return m
}
Esempio n. 21
0
//when a device requests something from the tracker this is what responds
func handleReply(pool *redis.Pool, zmpREP *zmq.Socket) {
	for { //endless loop
		req, err := zmpREP.Recv()
		if err != nil {
			fmt.Println(err)
		} //end if

		var rMsg string //the return message

		if len(req) == 1 { //the message is only 1 part long.
			msg := string(req[0])
			if msg == "serverLoads" { //if they sent the right message to the tracker
				conn := pool.Get() //get a connection from the pool
				defer conn.Close()

				//get the list of server loads
				//// Start getting the server loads from redis
				listLength, err := redis.Int(conn.Do("LLEN", "ASMS::connected::serverID:"))
				if err != nil {
					fmt.Println(err)
				}

				sIDs, err := redis.Strings(conn.Do("LRANGE", "ASMS::connected::serverID:", 0, listLength))
				if err != nil {
					fmt.Println(err)
				}

				serverLoads := []*common.Server{}

				for i := range sIDs {
					load, err := redis.Float64(conn.Do("GET", "ASMS::server:"+sIDs[i]+"::load:"))
					if err != nil {
						fmt.Println(err)
					}
					serverLoads = append(serverLoads, &common.Server{ServerID: sIDs[i], ServerLoad: common.Load(load)}) //add the server to the list, assert the load as common.Load
				}
				sort.Sort(common.ByLoad{serverLoads}) //sort the loads of the servers

				//translate the serverID to the servers IP
				serverIP, err := redis.String(conn.Do("GET", "ASMS::serverID:"+serverLoads[0].ServerID+"::IP:"))
				if err != nil {
					fmt.Println(err)
				}
				rMsg = serverIP //set rMsg equal to the serverID ip with the smallest load

			} else {
				rMsg = "0" // we will program this in as being unknown message recieved
			} //end if

			err = zmpREP.Send([][]byte{ //send the response back
				[]byte(rMsg),
			})
			if err != nil { //check for an error
				panic(err)
			} //end if
		} //end if

	} //end for
} //end handleReply
Esempio n. 22
0
// Returns an iterator.  Be sure to call Close when done with the iterator!
func GetSubscriptions(pool *redis.Pool, endpoint URI) *subscriptionIterator {
	iter := &subscriptionIterator{
		conn:   pool.Get(),
		key:    fmt.Sprintf("subscribers:%s", endpoint),
		cursor: -1,
	}
	return iter
}
Esempio n. 23
0
func updateDevice(serverID string, deviceID string, pool *redis.Pool) {
	conn := pool.Get()

	if _, err := conn.Do("PUBLISH", "ASMS::UpdateDevice::", deviceID+"::"+serverID); err != nil {
		panic(err)
	} //end if

} //end updateDevice
Esempio n. 24
0
// collectAndCallOnce collects the package, sleeps the configured duraction and
// calls the callback only once with a slice of packets, sorted by signal
// strength (strongest at index 0). This method exists since multiple gateways
// are able to receive the same packet, but the packet needs to processed
// only once.
// It is important to validate the packet before calling collectAndCallOnce
// (since the MIC is part of the storage key, make sure it is valid).
// It is safe to collect the same packet received by the same gateway twice.
// Since the underlying storage type is a set, the result will always be a
// unique set per gateway MAC and packet MIC.
func collectAndCallOnce(p *redis.Pool, rxPacket models.RXPacket, callback func(packets RXPackets) error) error {
	var buf bytes.Buffer
	enc := gob.NewEncoder(&buf)
	if err := enc.Encode(rxPacket); err != nil {
		return fmt.Errorf("encode rx packet error: %s", err)
	}
	c := p.Get()
	defer c.Close()

	// store the packet in a set with CollectAndCallOnceWait expiration
	// in case the packet is received by multiple gateways, the set will contain
	// each packet.
	key := "collect_" + hex.EncodeToString(rxPacket.PHYPayload.MIC[:])
	c.Send("MULTI")
	c.Send("SADD", key, buf.Bytes())
	c.Send("PEXPIRE", key, int64(CollectAndCallOnceWait*2)/int64(time.Millisecond))
	_, err := c.Do("EXEC")
	if err != nil {
		return fmt.Errorf("add rx packet to collect set error: %s", err)
	}

	// acquire a lock on processing this packet
	_, err = redis.String((c.Do("SET", key+"_lock", "lock", "PX", int64(CollectAndCallOnceWait*2)/int64(time.Millisecond), "NX")))
	if err != nil {
		if err == redis.ErrNil {
			// the packet processing is already locked by an other process
			// so there is nothing to do anymore :-)
			return nil
		}
		return fmt.Errorf("acquire lock error: %s", err)
	}

	// wait the configured amount of time, more packets might be received
	// from other gateways
	time.Sleep(CollectAndCallOnceWait)

	// collect all packets from the set
	rxPackets := make(RXPackets, 0)
	payloads, err := redis.ByteSlices(c.Do("SMEMBERS", key))
	if err != nil {
		return fmt.Errorf("get collect set members error: %s", err)
	}
	if len(payloads) == 0 {
		return errors.New("zero items in collect set")
	}

	for _, b := range payloads {
		var packet models.RXPacket
		if err := gob.NewDecoder(bytes.NewReader(b)).Decode(&packet); err != nil {
			return fmt.Errorf("decode rx packet error: %s", err)
		}
		rxPackets = append(rxPackets, packet)
	}

	sort.Sort(rxPackets)
	return callback(rxPackets)
}
Esempio n. 25
0
func instanceAlive(pool *redis.Pool) bool {
	c := pool.Get()
	defer c.Close()
	_, err := c.Do("PING")
	if err != nil {
		return false
	}
	return true
}
Esempio n. 26
0
// Fetch unconfirmed tx from Redis
func GetUnconfirmedTx(pool *redis.Pool, hash string) (tx *Tx, err error) {
	c := pool.Get()
	defer c.Close()
	txkey := fmt.Sprintf("btcplex:utx:%v", hash)
	tx = new(Tx)
	txjson, _ := redis.String(c.Do("GET", txkey))
	err = json.Unmarshal([]byte(txjson), tx)
	return
}
Esempio n. 27
0
func (entity *Entity) Retrieve(pool *redis.Pool) error {
	db := pool.Get()
	defer db.Close()
	url, err := redis.String(db.Do("GET", entity.Token))
	if err == nil {
		entity.Url = url
	}
	return err
}
Esempio n. 28
0
func DB(pool *redis.Pool) martini.Handler {
	return func(c martini.Context) {
		conn := pool.Get()
		c.Map(conn)
		c.Next()
		conn.Close()
	}

}
Esempio n. 29
0
// Fetch a transaction by hash
func GetTx(rpool *redis.Pool, hash string) (tx *Tx, err error) {
	c := rpool.Get()
	defer c.Close()
	tx = new(Tx)
	txjson, _ := redis.String(c.Do("GET", fmt.Sprintf("tx:%v", hash)))
	err = json.Unmarshal([]byte(txjson), tx)
	tx.Build(rpool)
	return
}
Esempio n. 30
0
func (promApi *PromApi) seriesNodeBootTime(redisPool *redis.Pool) {
	log.Println("info: seriesNodeBootTime begin.")
	seriesResp := &SeriesResp{}
	// open redis client.
	redisClient := redisPool.Get()
	defer redisClient.Close()
	end := time.Now().Unix()
	start := end - 900
	startStr := time.Unix(start, 0).UTC().Format(time.RFC3339)
	endStr := time.Unix(end, 0).UTC().Format(time.RFC3339)
	containerStart := end - 21600
	containerStartStr := time.Unix(containerStart, 0).UTC().Format(time.RFC3339)
	// request all prometheus container metrics.
	redisClient.Do("SET", "containerLastSeen", promApi.containerLastSeenRequest(containerStartStr, endStr, "600", consts.ContainerLastSeenMetric))
	redisClient.Do("SET", "containerMemoryUsageBytes", promApi.containerMemoryUsageBytesRequest(containerStartStr, endStr, "600", consts.ContainerMemoryUsageBytesMetric))
	redisClient.Do("SET", "totalRancherServices", promApi.totalRancherServicesRequest(containerStartStr, endStr, "600", consts.TotalRancherServicesMetric))
	// request all prometheus host metrics.
	requestEndpoint := "http://" + promApi.ApiEndpoint + "/api/v1/query?query=node_boot_time"
	if res, err := http.Get(requestEndpoint); err != nil {
		log.Println("error: PromApi seriesNodeBootTime exec http request error: ", err)
	} else {
		result, _ := ioutil.ReadAll(res.Body)
		defer res.Body.Close()
		if unmarshalError := json.Unmarshal(result, seriesResp); unmarshalError != nil {
			log.Println("error: PromApi seriesNodeBootTime unmarshal error: ", unmarshalError)
			return
		}
		seriesResp.RequestType = consts.NodeBootMetric
		if respBytes, marshalErr := json.Marshal(seriesResp); marshalErr != nil {
			log.Println("error: PromApi seriesNodeBootTime marshal error:", marshalErr)
			return
		} else {
			redisClient.Do("SET", "seriesNodeBootTime", respBytes)
			dataLen := len(seriesResp.Data.Result)
			if dataLen > 0 {
				// save all instance.
				promApi.instance = make([]string, dataLen)
				for index, data := range seriesResp.Data.Result {
					promApi.instance[index] = data.Metric.Instance
				}
			}
			log.Println("info: seriesNodeBootTime end.")
			redisClient.Do("SET", "nodeCpu", promApi.nodeCpuRequest(startStr, endStr, "10", consts.CpuMetric))
			redisClient.Do("SET", "nodeProcessRunning", promApi.nodeProcessRunningRequest(startStr, endStr, "2", consts.ProcessRunningMetric))
			redisClient.Do("SET", "nodeProcessBlocked", promApi.nodeProcessBlockedRequest(startStr, endStr, "2", consts.ProcessBlockedMetric))
			redisClient.Do("SET", "nodeMemoryUsage", promApi.nodeMemoryUsageRequest(startStr, endStr, "2", consts.MemoryMetric))
			redisClient.Do("SET", "nodeMemoryCommittedAs", promApi.nodeMemoryCommittedAsRequest(startStr, endStr, "2", consts.MemoryCommittedMetric))
			redisClient.Do("SET", "nodeDiskReadTimeMs", promApi.nodeDiskReadTimeMsRequest(startStr, endStr, "2", consts.DiskReadMetric))
			redisClient.Do("SET", "nodeFileSystemUsage", promApi.nodeFileSystemUsageRequest(startStr, endStr, "2", consts.DiskMetric))
			redisClient.Do("SET", "nodeNetworkReceive", promApi.nodeNetworkReceiveRequest(startStr, endStr, "2", consts.NetworkReceiveMetric))
			redisClient.Do("SET", "nodeNetworkTransmit", promApi.nodeNetworkTransmitRequest(startStr, endStr, "2", consts.NetworkTransmitMetric))
			redisClient.Do("SET", "rancherHosts", promApi.rancherHostsRequest(startStr, endStr, "2", consts.RancherHostsMetric))
			redisClient.Do("SET", "combinMetric", promApi.combinMetricRequest(startStr, endStr, "2", consts.CombinMetric))
		}
	}
}