示例#1
0
// AddSlaveToPod is used for adding a new slave to an existing pod.
// TODO: technically the actual implementation should be moved into the actions
// package and the UI's handlers package can then also call it. As it is, it is
// also implemented there.
func (r *RPC) AddSlaveToPod(nsr rsclient.AddSlaveToPodRequest, resp *bool) error {
	pod, err := r.constellation.GetPod(nsr.Pod)
	if err != nil {
		return errors.New("Pod not found")
	}
	name := fmt.Sprintf("%s:%d", nsr.SlaveIP, nsr.SlavePort)
	new_slave, err := client.DialWithConfig(&client.DialConfig{Address: name, Password: nsr.SlaveAuth})
	defer new_slave.ClosePool()
	if err != nil {
		log.Print("ERR: Dialing slave -", err)
		return errors.New("Server was unable to connect to slave")
	}
	err = new_slave.SlaveOf(pod.Info.IP, fmt.Sprintf("%d", pod.Info.Port))
	if err != nil {
		log.Printf("Err: %v", err)
		if strings.Contains(err.Error(), "Already connected to specified master") {
			return errors.New("Already connected to specified master")
		}
	}

	new_slave.ConfigSet("masterauth", pod.AuthToken)
	new_slave.ConfigSet("requirepass", pod.AuthToken)
	pod.Master.LastUpdateValid = false
	r.constellation.PodMap[pod.Name] = pod
	*resp = true
	return err
}
示例#2
0
func init() {
	client, err := rclient.DialWithConfig(&rclient.DialConfig{network, address, db, password, timeout, maxidle})
	if err != nil {
		panic(err)
	}
	r = client
}
示例#3
0
func (RedisConnection) GetConnection(protocol, uri string) (RedisClient, error) {
	client, err := client.DialWithConfig(&client.DialConfig{
		Network: protocol,
		Address: uri,
		Timeout: RedisConnectionTimeoutPeriod})
	return client, err
}
示例#4
0
// Info is deprecated in favor of the cluster level node routines
func Info(c web.C, w http.ResponseWriter, r *http.Request) {
	var response InfoResponse
	target := c.URLParams["targetAddress"]
	section := c.URLParams["section"]
	_ = section
	conn, err := client.DialWithConfig(&client.DialConfig{Address: target})

	if err != nil {
		response.Status = "CONNECTIONERROR"
		response.StatusMessage = "Unable to connect to specified Redis instance"
		fmt.Fprint(w, response)
	} else {
		defer conn.ClosePool()
		info, err := conn.Info()
		if err != nil {
			response.Status = "COMMANDERROR"
			response.StatusMessage = err.Error()
		} else {

			response.Data = info
			response.Status = "SUCCESS"
		}
	}
	packed, err := json.Marshal(response)
	if err != nil {
		log.Printf("JSON Marshalling Error: %s", err)
	}
	w.Write(packed)
}
示例#5
0
文件: main.go 项目: ngaut/libredis
func init() {
	client, err := rclient.DialWithConfig(&rclient.DialConfig{Address: address})
	if err != nil {
		panic(err)
	}
	r = client
}
示例#6
0
func LoadNodeFromHostPort(ip string, port int, authtoken string) (node *RedisNode, err error) {
	name := fmt.Sprintf("%s:%d", ip, port)
	node = &RedisNode{Name: name, Address: ip, Port: port, Auth: authtoken}
	node.LastUpdateValid = false
	node.Slaves = make([]*RedisNode, 5)

	conn, err := client.DialWithConfig(&client.DialConfig{Address: name, Password: authtoken, Timeout: 2 * time.Second})
	if err != nil {
		log.Printf("Failed connection to %s:%d. Error:%s", ip, port, err.Error())
		log.Printf("NODE: %+v", node)
		return node, err
	}
	defer conn.ClosePool()

	node.Connected = true
	nodeInfo, err := conn.Info()
	if err != nil {
		log.Printf("WARNING: NODE '%s' was unable to return Info(). Error='%s'", name, err)
	}
	if nodeInfo.Server.Version == "" {
		log.Printf("WARNING: NODE '%s' was unable to return Info(). Error=NONE", name)
	}
	node.HasValidAuth = true
	_, err = node.UpdateData()
	if err != nil {
		log.Printf("Node %s has invalid state. Err from UpdateData call: %s", node.Name, err)
		return node, err
	}
	node.Info = nodeInfo
	//log.Printf("node: %+v", node)
	return node, nil
}
示例#7
0
func SetRedisConnection(ip string, port int, auth string) (err error) {
	if auth > "" {
		redisconn, err = client.DialWithConfig(&client.DialConfig{Address: fmt.Sprintf("%s:%d", ip, port), Password: auth})
	} else {
		redisconn, err = client.Dial(ip, port)
	}
	return err
}
示例#8
0
func InitializeRedisClient(address, auth string) (err error) {
	rediscon, err = client.DialWithConfig(&client.DialConfig{Address: address, Password: auth})
	if err != nil {
		log.Print("Failed InitializeRedisClient with err: ", err.Error())
		return err
	}
	redisInitialized = true
	return nil
}
示例#9
0
func NewRedisClient(ip string) *RedisClient {
	config := DefaultRedisConfig(ip)
	client, err := libredis.DialWithConfig(config)
	if err != nil {
		panic(err)
	}

	return &RedisClient{client}
}
示例#10
0
// AddSlaveHTMLProcessor is the action target for the AddSlaveHTML form
func AddSlaveHTMLProcessor(c web.C, w http.ResponseWriter, r *http.Request) {
	r.ParseForm()
	log.Print("add slave processor called")
	podname := c.URLParams["podName"]
	context, err := NewPageContext()
	checkContextError(err, &w)
	pod, _ := context.Constellation.GetPod(podname)
	context.Title = "Pod Slave Result"
	context.ViewTemplate = "slave-added"
	context.Pod = pod
	context.Refresh = true
	context.RefreshURL = fmt.Sprintf("/pod/%s", pod.Name)
	context.RefreshTime = 5
	address := r.FormValue("host")
	sname := r.FormValue("sname")
	portstr := r.FormValue("port")
	slaveauth := r.FormValue("authtoken")
	port, _ := strconv.Atoi(portstr)

	type results struct {
		PodName      string
		SlaveName    string
		SlaveAddress string
		SlavePort    int
		Error        string
		HasError     bool
		PodURL       string
	}
	res := results{PodName: podname, SlaveName: sname, SlaveAddress: address, SlavePort: port}
	name := fmt.Sprintf("%s:%d", address, port)
	slave_target, err := client.DialWithConfig(&client.DialConfig{Address: name, Password: slaveauth})
	defer slave_target.ClosePool()
	if err != nil {
		log.Print("ERR: Dialing slave -", err)
		context.Data = err
		render(w, context)
		return
	}
	err = slave_target.SlaveOf(pod.Info.IP, fmt.Sprintf("%d", pod.Info.Port))
	if err != nil {
		log.Printf("Err: %v", err)
	} else {
		log.Printf("Slave added success")
		slave_target.ConfigSet("masterauth", pod.AuthToken)
		slave_target.ConfigSet("requirepass", pod.AuthToken)
		slave, err := actions.LoadNodeFromHostPort(address, port, pod.AuthToken)
		if err != nil {
			log.Printf("In AddSlaveHTMLProcessor, unable to get new slave node")
		} else {
			pod.Master.Slaves = append(pod.Master.Slaves, slave)
		}
	}
	context.Data = res
	render(w, context)

}
示例#11
0
func (n *RedisNode) Ping() bool {
	dconf := client.DialConfig{Address: n.Name, Password: n.Auth, Network: "tcp", Timeout: DialTimeout}
	conn, err := client.DialWithConfig(&dconf)
	if err != nil {
		return false
	}
	err = conn.Ping()
	if err != nil {
		return false
	}
	return true
}
示例#12
0
// LiveSlaves() returns a list of connections to slaves. it can be empty if no
// slaves exist or no slaves are reachable
func LiveSlaves(pod parser.PodConfig) []*client.Redis {
	slaves := pod.KnownSlaves
	var live []*client.Redis
	for _, s := range slaves {
		sc, err := client.DialWithConfig(&client.DialConfig{Address: s, Password: pod.Authpass})
		if err != nil {
			log.Print(err.Error())
			continue
		}
		live = append(live, sc)
	}
	return live
}
示例#13
0
// APIAddSlave is the API call handler for adding a slave
func APIAddSlave(c web.C, w http.ResponseWriter, r *http.Request) {
	target := c.URLParams["podName"]
	context, err := NewPageContext()
	checkContextError(err, &w)
	pod, _ := context.Constellation.GetPod(target)
	body, err := ioutil.ReadAll(r.Body)
	var response InfoResponse
	var reqdata common.AddSlaveRequest
	err = json.Unmarshal(body, &reqdata)
	if err != nil {
		retcode, em := throwJSONParseError(r)
		log.Print(em)
		http.Error(w, em, retcode)
	}
	reqdata.Podname = target
	name := fmt.Sprintf("%s:%d", reqdata.SlaveAddress, reqdata.SlavePort)
	slave_target, err := client.DialWithConfig(&client.DialConfig{Address: name, Password: reqdata.SlaveAuth})
	defer slave_target.ClosePool()
	if err != nil {
		log.Print("ERR: Dialing slave -", err)
		response.Status = "ERROR"
		response.StatusMessage = "Unable to connect and command slave"
		http.Error(w, "Unable to contact slave", 400)
		return
	}
	err = slave_target.SlaveOf(pod.Info.IP, fmt.Sprintf("%d", pod.Info.Port))
	if err != nil {
		log.Printf("Err: %v", err)
		if strings.Contains(err.Error(), "Already connected to specified master") {
			response.Status = "NOOP"
			response.StatusMessage = "Already connected to specified master"
			packed, _ := json.Marshal(response)
			w.Write(packed)
			return
		}
	}

	pod.Master.LastUpdateValid = false
	context.Constellation.PodMap[pod.Name] = pod
	slave_target.ConfigSet("masterauth", pod.AuthToken)
	slave_target.ConfigSet("requirepass", pod.AuthToken)
	response.Status = "COMPLETE"
	response.StatusMessage = "Slave added"
	packed, _ := json.Marshal(response)
	w.Write(packed)

}
示例#14
0
// AddSlaveHTMLProcessor is the action target for the AddSlaveHTML form
func AddSlaveHTMLProcessor(c web.C, w http.ResponseWriter, r *http.Request) {
	r.ParseForm()
	log.Print("add slave processor called")
	podname := c.URLParams["podName"]
	pod, _ := ManagedConstellation.GetPod(podname)
	context := PageContext{Title: "Pod Slave Result", ViewTemplate: "slave-added", Constellation: ManagedConstellation, Pod: pod}
	context.Refresh = true
	context.RefreshURL = fmt.Sprintf("/pod/%s", pod.Name)
	context.RefreshTime = 5
	address := r.FormValue("host")
	sname := r.FormValue("sname")
	portstr := r.FormValue("port")
	slaveauth := r.FormValue("authtoken")
	port, _ := strconv.Atoi(portstr)

	type results struct {
		PodName      string
		SlaveName    string
		SlaveAddress string
		SlavePort    int
		Error        string
		HasError     bool
		PodURL       string
	}
	res := results{PodName: podname, SlaveName: sname, SlaveAddress: address, SlavePort: port}
	name := fmt.Sprintf("%s:%d", address, port)
	slave_target, err := client.DialWithConfig(&client.DialConfig{Address: name, Password: slaveauth})
	defer slave_target.ClosePool()
	if err != nil {
		log.Print("ERR: Dialing slave -", err)
		context.Data = err
		render(w, context)
		return
	}
	err = slave_target.SlaveOf(pod.Info.IP, fmt.Sprintf("%d", pod.Info.Port))
	log.Printf("Err: %v", err)
	slave_target.ConfigSet("masterauth", pod.AuthToken)
	slave_target.ConfigSet("requirepass", pod.AuthToken)
	context.Data = res
	render(w, context)

}
示例#15
0
// CheckAuth() will attempt to connect to the master and validate we can auth
// by issuing a ping
func CheckAuth(pod *parser.PodConfig) (map[string]bool, error) {
	addr := fmt.Sprintf("%s:%s", pod.MasterIP, pod.MasterPort)
	results := make(map[string]bool)
	invalid := false
	dc := client.DialConfig{Address: addr, Password: pod.Authpass}
	c, err := client.DialWithConfig(&dc)
	if err != nil {
		if !strings.Contains(err.Error(), "invalid password") {
			log.Print("Unable to connect to %s. Error: %s", addr, err.Error())
		}
		results["master"] = false
	} else {
		err = c.Ping()
		if err != nil {
			log.Print(err)
			results["master"] = false
			invalid = true
		} else {
			results["master"] = true
		}
	}

	for _, slave := range LiveSlaves(*pod) {
		sid := fmt.Sprintf(slave.Address())
		if slave.Ping() != nil {
			results[sid] = false
			invalid = true
			continue
		} else {
			results[sid] = true
		}
	}
	if invalid {
		err = errors.New("At least one node in pod failed auth check")
	}
	return results, err
}
// LoadLocalPods uses the PodConfigs read from the sentinel config file and
// talks to the local sentinel to develop the list of pods the local sentinel
// knows about.
func (c *Constellation) LoadLocalPods() error {
	if c.LocalPodMap == nil {
		c.LocalPodMap = make(map[string]*RedisPod)
	}
	if c.RemotePodMap == nil {
		c.RemotePodMap = make(map[string]*RedisPod)
	}
	// Initialize local sentinel
	if c.LocalSentinel.Name == "" {
		log.Print("Initializing LOCAL sentinel")
		var address string
		var err error
		if c.SentinelConfig.Host == "" {
			log.Print("No Hostname, determining local hostname")
			myhostname, err := os.Hostname()
			if err != nil {
				log.Print(err)
			}
			myip, err := net.LookupHost(myhostname)
			if err != nil {
				log.Fatal(err)
			}
			c.LocalSentinel.Host = myip[0]
			c.SentinelConfig.Host = myip[0]
			log.Printf("%+v", myip)
			address = fmt.Sprintf("%s:%d", myip[0], c.SentinelConfig.Port)
			c.LocalSentinel.Name = address
			log.Printf("Determined LOCAL address is: %s", address)
			log.Printf("Determined LOCAL name is: %s", c.LocalSentinel.Name)
		} else {
			address = fmt.Sprintf("%s:%d", c.SentinelConfig.Host, c.SentinelConfig.Port)
			log.Printf("Determined LOCAL address is: %s", address)
			c.LocalSentinel.Name = address
			log.Printf("Determined LOCAL name is: %s", c.LocalSentinel.Name)
		}
		c.LocalSentinel.Host = c.SentinelConfig.Host
		c.LocalSentinel.Port = c.SentinelConfig.Port
		c.LocalSentinel.Connection, err = client.DialWithConfig(&client.DialConfig{Address: address})
		if err != nil {
			// Handle error reporting here!
			//log.Printf("SentinelConfig=%+v", c.SentinelConfig)
			log.Fatalf("LOCAL Sentinel '%s' failed connection attempt", c.LocalSentinel.Name)
		}
		c.LocalSentinel.Info, _ = c.LocalSentinel.Connection.SentinelInfo()
	}
	for pname, pconfig := range c.SentinelConfig.ManagedPodConfigs {
		mi, err := c.LocalSentinel.GetMaster(pname)
		if err != nil {
			log.Printf("WARNING: Pod '%s' in config but not found when talking to the sentinel controller. Err: '%s'", pname, err)
			continue
		}
		address := fmt.Sprintf("%s:%d", mi.Host, mi.Port)
		_, err = c.GetNode(address, pname, pconfig.AuthToken)
		if err != nil {
			log.Printf("Was unable to get node '%s' for pod '%s' with auth '%s'", address, pname, pconfig.AuthToken)
		}
		pod, err := c.LocalSentinel.GetPod(pname)
		if err != nil {
			log.Printf("ERROR: No pod found on LOCAL sentinel for %s", pname)
		}
		c.LocalPodMap[pod.Name] = &pod
		c.LoadNodesForPod(&pod, &c.LocalSentinel)
	}

	log.Print("Done with LocalSentinel initialization")
	return nil
}
示例#17
0
// UpdateData will check if an update is needed, and update if so. It returns a
// boolean indicating if an update was done and an err.
func (n *RedisNode) UpdateData() (bool, error) {
	// If the last update was successful and it has been less than 10 seconds,
	// don't bother.
	if n == nil {
		log.Print("WTF?! a nill node?")
		return false, errors.New("Node given does not exist in the system. SOmewhere there is a bug.")
	}
	if n.LastUpdateValid {
		elapsed := time.Since(n.LastUpdate)
		if elapsed.Seconds() < NodeRefreshInterval {
			n.LastUpdateDelay = time.Since(n.LastUpdate)
			return false, nil
		}
	}
	dconf := client.DialConfig{Address: n.Name, Password: n.Auth, Network: "tcp", Timeout: DialTimeout}
	conn, err := client.DialWithConfig(&dconf)
	//deadline := time.Now().Add(DialTimeout)
	if err != nil {
		log.Print("unable to connect to node. Err:", err)
		n.LastUpdateValid = false
		n.LastUpdateDelay = time.Since(n.LastUpdate)
		return false, err
	}
	defer conn.ClosePool()
	nodeinfo, err := conn.Info()
	if err != nil {
		log.Print("Info error on node. Err:", err)
		n.LastUpdateValid = false
		n.LastUpdateDelay = time.Since(n.LastUpdate)
		return false, err
	}
	n.LastUpdate = time.Now()
	if nodeinfo.Server.Version == "" {
		log.Print("WARNING: Unable to get INFO or node!")
		n.LastUpdateValid = false
		n.LastUpdateDelay = time.Since(n.LastUpdate)
		return false, fmt.Errorf("Unable to pull valid INFO for %s", n.Name)
	}

	n.Info = nodeinfo
	res, _ := conn.ConfigGet("maxmemory")
	maxmem, err := strconv.Atoi(res["maxmemory"])
	n.MaxMemory = maxmem
	if err != nil {
	}
	uptime := time.Duration(n.Info.Server.UptimeInSeconds) * time.Second
	now := time.Now()
	ud := time.Duration(0 - uptime)
	n.LastStart = now.Add(ud)

	cfg, err := conn.ConfigGet("save")
	if err != nil {
		log.Print("Unable to get 'save' from config call")
	}
	does_save := cfg["save"]
	if len(does_save) != 0 {
		n.SaveEnabled = true
	}
	n.AOFEnabled = n.Info.Persistence.AOFEnabled
	if n.MaxMemory == 0 {
		n.PercentUsed = 100.0
		n.MemoryUseCritical = true
	} else {
		n.PercentUsed = (float64(n.Info.Memory.UsedMemory) / float64(n.MaxMemory)) * 100.0
		if n.PercentUsed >= 80 {
			n.MemoryUseCritical = true
		} else if n.PercentUsed >= 60 {
			n.MemoryUseWarn = true
		}
	}

	// Pull Latency data
	res, _ = conn.ConfigGet("latency-monitor-threshold")
	n.LatencyThreshold, err = strconv.Atoi(res["latency-monitor-threshold"])
	if err == nil && n.LatencyThreshold > 0 {
		n.LatencyHistory, _ = conn.LatencyHistory("command")
		n.LatencyHistoryFastCommand, _ = conn.LatencyHistory("fast-command")
		n.LatencyDoctor, _ = conn.LatencyDoctor()
		n.LatencyMonitoringEnabled = true
	}

	// Pull slowlog data
	res, _ = conn.ConfigGet("slowlog-log-slower-than")
	n.SlowLogThreshold, err = strconv.ParseInt(res["slowlog-log-slower-than"], 0, 64)
	n.SlowLogLength, _ = conn.SlowLogLen()
	n.SlowLogRecords, _ = conn.SlowLogGet(n.SlowLogLength)

	var slavenodes []*RedisNode
	for _, slave := range n.Info.Replication.Slaves {
		snode, err := LoadNodeFromHostPort(slave.IP, slave.Port, n.Auth)
		if err != nil {
			log.Printf("Unable to load node from %s:%d. Error:%s", slave.IP, slave.Port, err)
			continue
		}
		slavenodes = append(slavenodes, snode)
	}
	if n.Slaves == nil {
		n.Slaves = make([]*RedisNode, 5)
	}
	n.Slaves = slavenodes
	n.LastUpdateValid = true
	n.LastUpdate = time.Now()
	n.LastUpdateDelay = time.Since(n.LastUpdate)
	NodesMap[n.Name] = n
	return true, nil
}
// AddSentinel adds a sentinel to the constellation
func (c *Constellation) AddSentinel(ip string, port int) error {
	if c.LocalSentinel.Name == "" {
		log.Print("Initializing LOCAL sentinel")
		if c.SentinelConfig.Host == "" {
			myhostname, err := os.Hostname()
			if err != nil {
				log.Print(err)
			}
			myip, err := net.LookupHost(myhostname)
			if err != nil {
				log.Print(err)
			}
			c.LocalSentinel.Host = myip[0]
		}
		address := fmt.Sprintf("%s:%d", c.SentinelConfig.Host, c.SentinelConfig.Port)
		c.LocalSentinel.Name = address
		var err error
		c.LocalSentinel.Connection, err = client.DialWithConfig(&client.DialConfig{Address: address})
		if err != nil {
			// Handle error reporting here! I don't thnk we want to do a
			// fatal here anymore
			log.Fatalf("LOCAL Sentinel '%s' failed connection attempt", c.LocalSentinel.Name)
		}
		c.LocalSentinel.Info, _ = c.LocalSentinel.Connection.SentinelInfo()
	}
	var sentinel Sentinel
	if port == 0 {
		err := fmt.Errorf("AddSentinel called w/ZERO port .. wtf, man?")
		return err
	}
	address := fmt.Sprintf("%s:%d", ip, port)
	log.Printf("*****************] Local Name: %s Add Called For: %s", c.LocalSentinel.Name, address)
	if address == c.LocalSentinel.Name {
		return nil
	}
	_, exists := c.RemoteSentinels[address]
	if exists {
		return nil
	}
	_, exists = c.BadSentinels[address]
	if exists {
		return nil
	}
	// Now to add to the PeerList for GroupCache
	// For now we are using just the IP and expect port 8000 by convention
	// This will change to serf/consul when that part is added I expect
	if c.PeerList == nil {
		c.PeerList = make(map[string]string)
	}
	_, exists = c.PeerList[address]
	if !exists {
		log.Print("New Peer: ", address)
		c.PeerList[address] = ip
		c.SetPeers()
	}
	c.PeerList[address] = ip
	sentinel.Name = address
	sentinel.Host = ip
	sentinel.Port = port
	_, known := c.RemoteSentinels[address]
	if known {
		log.Printf("Already have crawled '%s'", sentinel.Name)
	} else {
		log.Printf("Adding REMOTE Sentinel '%s'", address)
		conn, err := client.DialWithConfig(&client.DialConfig{Address: address})
		if err != nil {
			// Handle error reporting here!
			err = fmt.Errorf("AddSentinel -> '%s' failed connection attempt", address)
			c.BadSentinels[address] = &sentinel
			return err
		}
		sentinel.Connection = conn
		sentinel.Info, _ = sentinel.Connection.SentinelInfo()
		if address != c.LocalSentinel.Name {
			log.Print("discovering pods on remote sentinel " + sentinel.Name)
			sentinel.LoadPods()
			pods, _ := sentinel.GetPods()
			log.Printf(" %d Pods to load from %s ", len(pods), address)
			c.RemoteSentinels[address] = &sentinel
			for _, pod := range pods {
				if pod.Name == "" {
					log.Print("WUT: Have a nameless pod. This is probably a bug.")
					continue
				}
				_, islocal := c.LocalPodMap[pod.Name]
				if islocal {
					log.Print("Skipping local pod")
					continue
				}
				_, isremote := c.RemotePodMap[pod.Name]
				if isremote {
					log.Print("Skipping known remote pod")
					continue
				}
				log.Print("Adding DISCOVERED remotely managed pod " + pod.Name)
				c.GetPodAuth(pod.Name)
				log.Print("Got auth for pod")
				c.LoadNodesForPod(&pod, &sentinel)
				newsentinels, _ := sentinel.GetSentinels(pod.Name)
				pod.SentinelCount = len(newsentinels)
				c.PodToSentinelsMap[pod.Name] = newsentinels
				c.RemotePodMap[pod.Name] = &pod
				for _, ns := range newsentinels {
					_, known := c.RemoteSentinels[ns.Name]
					if known {
						continue
					}
					if ns.Name == c.LocalSentinel.Name || ns.Name == sentinel.Name {
						continue
					}
					c.AddSentinelByAddress(ns.Name)
				}
			}
		}
	}
	return nil
}
示例#19
0
// UpdateData will check if an update is needed, and update if so. It returns a
// boolean indicating if an update was done and an err.
func (n *RedisNode) UpdateData() (bool, error) {
	// If the last update was successful and it has been less than 10 seconds,
	// don't bother.
	if n.LastUpdateValid {
		elapsed := time.Since(n.LastUpdate)
		if elapsed.Seconds() < 90 {
			n.LastUpdateDelay = time.Since(n.LastUpdate)
			return false, nil
		}
	}
	dconf := client.DialConfig{Address: n.Name, Password: n.Auth, Network: "tcp"}
	conn, err := client.DialWithConfig(&dconf)
	if err != nil {
		log.Print("unable to connect to node. Err:", err)
		n.LastUpdateValid = false
		n.LastUpdateDelay = time.Since(n.LastUpdate)
		return false, err
	}
	defer conn.ClosePool()
	pinged := conn.Ping()
	if pinged != nil {
		err = fmt.Errorf("Unable to PING node %s with config %+v. ERROR: %s", n.Name, dconf, pinged)
		n.LastUpdateValid = false
		n.LastUpdateDelay = time.Since(n.LastUpdate)
		return false, err
	}
	nodeinfo, err := conn.Info()
	if err != nil {
		log.Print("Info error on node. Err:", err)
		n.LastUpdateValid = false
		n.LastUpdateDelay = time.Since(n.LastUpdate)
		return false, err
	}
	if nodeinfo.Server.Version == "" {
		log.Print("WARNING: Unable to get INFO or node!")
		log.Printf("Pulled: %+v", nodeinfo)
		n.LastUpdateValid = false
		n.LastUpdateDelay = time.Since(n.LastUpdate)
		return false, fmt.Errorf("Info() was blank, no errors")
	}

	n.Info = nodeinfo
	res, _ := conn.ConfigGet("maxmemory")
	maxmem, err := strconv.Atoi(res["maxmemory"])
	n.MaxMemory = maxmem
	if err != nil {
	}
	uptime := time.Duration(n.Info.Server.UptimeInSeconds) * time.Second
	now := time.Now()
	ud := time.Duration(0 - uptime)
	n.LastStart = now.Add(ud)

	cfg, _ := conn.ConfigGet("save")
	does_save := cfg["save"]
	if len(does_save) != 0 {
		n.SaveEnabled = true
	}
	n.AOFEnabled = n.Info.Persistence.AOFEnabled
	if n.MaxMemory == 0 {
		n.PercentUsed = 100.0
		n.MemoryUseCritical = true
	} else {
		n.PercentUsed = (float64(n.Info.Memory.UsedMemory) / float64(n.MaxMemory)) * 100.0
		if n.PercentUsed >= 80 {
			n.MemoryUseCritical = true
		} else if n.PercentUsed >= 60 {
			n.MemoryUseWarn = true
		}
	}

	// Pull Latency data
	res, _ = conn.ConfigGet("latency-monitor-threshold")
	n.LatencyThreshold, err = strconv.Atoi(res["latency-monitor-threshold"])
	if err == nil && n.LatencyThreshold > 0 {
		n.LatencyHistory, _ = conn.LatencyHistory("command")
		n.LatencyDoctor, _ = conn.LatencyDoctor()
		n.LatencyMonitoringEnabled = true
	}

	// Pull slowlog data
	res, _ = conn.ConfigGet("slowlog-log-slower-than")
	n.SlowLogThreshold, err = strconv.ParseInt(res["slowlog-log-slower-than"], 0, 64)
	n.SlowLogLength, _ = conn.SlowLogLen()
	n.SlowLogRecords, _ = conn.SlowLogGet(n.SlowLogLength)
	log.Printf("Slowlog: %+v", n.SlowLogRecords)
	for _, r := range n.SlowLogRecords {
		log.Printf("slow: %+v", r)
	}

	var slavenodes []*RedisNode
	for _, slave := range n.Info.Replication.Slaves {
		snode, err := LoadNodeFromHostPort(slave.IP, slave.Port, n.Auth)
		if err != nil {
			log.Printf("Unable to load node from %s:%d. Error:%s", slave.IP, slave.Port, err)
			continue
		}
		slavenodes = append(slavenodes, snode)
	}
	n.Slaves = slavenodes
	n.LastUpdateValid = true
	n.LastUpdate = time.Now()
	n.LastUpdateDelay = time.Since(n.LastUpdate)
	return true, nil
}
示例#20
0
// CloneServer does the heavy lifting to clone one Redis instance to another.
func CloneServer(originHost, cloneHost string, promoteWhenComplete, reconfigureSlaves bool, syncTimeout float64, roleRequired string) (result map[string]string) {
	jobId := uuid.New()
	result = map[string]string{
		"origin":      originHost,
		"clone":       cloneHost,
		"requestTime": fmt.Sprintf("%s", time.Now()),
		"jobid":       jobId,
		"status":      "pending",
		"error":       "",
	}

	if cloneHost == originHost {
		log.Print("Can not clone a host to itself, aborting")
		result["status"] = "ERROR"
		result["error"] = "Can not clone a node to itself"
		return
	}

	// Connect to the Origin node
	originConf := client.DialConfig{Address: originHost}
	origin, err := client.DialWithConfig(&originConf)
	if err != nil {
		log.Println("Unable to connect to origin", err)
		result["status"] = "ERROR"
		result["error"] = "Unable to connect to origin"
		return
	} else {
		log.Print("Connection to origin confirmed")
	}
	// obtain node information
	info, err := origin.Info()
	role := info.Replication.Role
	if err != nil {
		log.Printf("Unable to get the role of the origin instance")
		result["status"] = "ERROR"
		result["error"] = "Unable to get replication role for origin"
		return
	}

	log.Print("Role:", role)
	// verify the role we get matches our condition for a backup
	switch role {
	case roleRequired:
		log.Print("acceptable role confirmed, now to perform a clone...")
	default:
		log.Print("Role mismatch, no clone will be performed")
		result["status"] = "ERROR"
		result["error"] = "Role requirement not met"
		return
	}
	// Now connect to the clone ...
	cloneConf := client.DialConfig{Address: cloneHost}
	clone, err := client.DialWithConfig(&cloneConf)
	if err != nil {
		log.Println("Unable to connect to clone")
		result["status"] = "ERROR"
		result["error"] = "Unable to connect to clone target"
		return
	} else {
		log.Print("Connection to clone confirmed")
	}
	clone.Info()

	oconfig, err := origin.ConfigGet("*")
	if err != nil {
		log.Println("Unable to get origin config, aborting on err:", err)
		result["status"] = "ERROR"
		result["error"] = "Unable to get config from origin"
		return
	}
	// OK, now we are ready to start cloning
	log.Print("Cloning config")
	for k, v := range oconfig {
		// slaveof is not clone-able and is set separately, so skip it
		if k == "slaveof" {
			continue
		}
		err := clone.ConfigSet(k, v)
		if err != nil {
			if !strings.Contains(err.Error(), "Unsupported CONFIG parameter") {
				log.Printf("Unable to set key '%s' to val '%s' on clone due to Error '%s'\n", k, v, err)
			}
		}
	}
	log.Print("Config cloned, now syncing data")
	switch role {
	case "slave":
		// If we are cloning a slave we are assuming it needs to look just like
		// the others, so we simply clone the settings and slave it to the
		// origin's master
		slaveof := strings.Split(oconfig["slaveof"], " ")
		log.Printf("Need to set clone to slave to %s on port %s\n", slaveof[0], slaveof[1])
		slaveres := clone.SlaveOf(slaveof[0], slaveof[1])
		if slaveres != nil {
			log.Printf("Unable to clone slave setting! Error: '%s'\n", slaveres)
		} else {
			log.Print("Successfully cloned new slave")
			return
		}
	case "master":
		// master clones can get tricky.
		// First, slave to the origin nde to get a copy of the data
		log.Print("Role being cloned is 'master'")
		log.Print("First, we need to slave to the original master to pull data down")
		slaveof := strings.Split(originHost, ":")
		slaveres := clone.SlaveOf(slaveof[0], slaveof[1])
		if slaveres != nil {
			if !strings.Contains(slaveres.Error(), "Already connected") {
				log.Printf("Unable to slave clone to origin! Error: '%s'\n", slaveres)
				log.Print("Aborting clone so you can investigate why.")
				return
			}
		}
		log.Printf("Successfully cloned to %s:%s\n", slaveof[0], slaveof[1])

		syncInProgress := true
		new_info, _ := clone.Info()
		syncInProgress = new_info.Replication.MasterSyncInProgress || new_info.Replication.MasterLinkStatus == "down"
		syncTime := 0.0
		if syncInProgress {
			log.Print("Sync in progress...")
			for {
				new_info, _ := clone.Info()
				syncInProgress = new_info.Replication.MasterSyncInProgress || new_info.Replication.MasterLinkStatus == "down"
				if syncInProgress {
					syncTime += .5
					if syncTime >= syncTimeout {
						break
					}
					time.Sleep(time.Duration(500) * time.Millisecond)
				} else {
					break
				}
			}
		}
		if syncInProgress {
			log.Print("Sync took longer than expected, aborting until this is better handled!")
			result["message"] = "Sync in progress"
			return
		}
		// Now we have synced data.
		// Next we need to see if we should promote the new clone to a master
		// this is useful for migrating a master but also for providing a
		// production clone for dev or testing
		log.Print("Now checking for slave promotion")
		if promoteWhenComplete {
			promoted := clone.SlaveOf("no", "one")
			if promoted != nil {
				log.Print("Was unable to promote clone to master, investigate why!")
				return
			}
			log.Print("Promoted clone to master")
			// IF we are migrating a master entirely, we want to reconfigure
			// it's slaves to point to the new master
			// While it might make sense to promote the clone after slaving,
			// doing that means writes are lost in between slave migration and
			// promotion. This gets tricky, which is why by default we don't do it.
			if !reconfigureSlaves {
				log.Print("Not instructed to promote existing slaves")
				log.Print("Clone complete")
				result["status"] = "Complete"
				return
			} else {
				info, _ := origin.Info()
				slaveof := strings.Split(cloneHost, ":")
				desired_port, _ := strconv.Atoi(slaveof[1])
				for index, data := range info.Replication.Slaves {
					log.Printf("Reconfiguring slave %d/%d\n", index, info.Replication.ConnectedSlaves)
					fmt.Printf("Slave data: %+v\n", data)
					slave_connstring := fmt.Sprintf("%s:%d", data.IP, data.Port)
					slaveconn, err := client.DialWithConfig(&client.DialConfig{Address: slave_connstring})
					if err != nil {
						log.Printf("Unable to connect to slave '%s', skipping", slave_connstring)
						continue
					}
					err = slaveconn.SlaveOf(slaveof[0], slaveof[1])
					if err != nil {
						log.Printf("Unable to slave %s to clone. Err: '%s'", slave_connstring, err)
						continue
					}
					time.Sleep(time.Duration(100) * time.Millisecond) // needed to give the slave time to sync.
					slave_info, _ := slaveconn.Info()
					if slave_info.Replication.MasterHost == slaveof[0] {
						if slave_info.Replication.MasterPort == desired_port {
							log.Printf("Slaved %s to clone", slave_connstring)
						} else {
							log.Print("Hmm, slave settings don't match, look into this on slave", data.IP, data.Port)
						}
					}
				}
				result["status"] = "Complete"
			}
		}
		result["status"] = "Complete"
	}
	return
}