func (s *TestSuite) TestForgetMaster(c *C) { _, _ = inst.ReadTopologyInstance(&masterKey) _, found, _ := inst.ReadInstance(&masterKey) c.Assert(found, Equals, true) inst.ForgetInstance(&masterKey) _, found, _ = inst.ReadInstance(&masterKey) c.Assert(found, Equals, false) }
func (s *TestSuite) TestDiscover(c *C) { var err error _, err = db.ExecOrchestrator("delete from database_instance where hostname = ? and port = ?", masterKey.Hostname, masterKey.Port) _, err = db.ExecOrchestrator("delete from database_instance where hostname = ? and port = ?", slave1Key.Hostname, slave1Key.Port) _, err = db.ExecOrchestrator("delete from database_instance where hostname = ? and port = ?", slave2Key.Hostname, slave2Key.Port) _, err = db.ExecOrchestrator("delete from database_instance where hostname = ? and port = ?", slave3Key.Hostname, slave3Key.Port) _, found, _ := inst.ReadInstance(&masterKey) c.Assert(found, Equals, false) _, _ = inst.ReadTopologyInstance(&slave1Key) orchestrator.StartDiscovery(slave1Key) _, found, err = inst.ReadInstance(&slave1Key) c.Assert(found, Equals, true) c.Assert(err, IsNil) }
func (s *TestSuite) TestMakeCoMasterAndBackAndFailOthersToBecomeCoMasters(c *C) { clearTestMaintenance() slave1, err := inst.MakeCoMaster(&slave1Key) c.Assert(err, IsNil) // Now master & slave1 expected to be co-masters. Check! master, _, _ := inst.ReadInstance(&masterKey) c.Assert(master.MasterKey.Port, Not(Equals), inst.InvalidPort) c.Assert(master.IsSlaveOf(slave1), Equals, true) c.Assert(slave1.IsSlaveOf(master), Equals, true) // Verify can't have additional co-masters _, err = inst.MakeCoMaster(&masterKey) c.Assert(err, Not(IsNil)) _, err = inst.MakeCoMaster(&slave1Key) c.Assert(err, Not(IsNil)) _, err = inst.MakeCoMaster(&slave2Key) c.Assert(err, Not(IsNil)) // detach - resotre to original state master, err = inst.DetachSlaveFromMaster(&masterKey) c.Assert(err, IsNil) c.Assert(master.MasterKey.Port, Equals, inst.InvalidPort) }
func (s *TestSuite) TestReadTopologyAndInstanceSlave(c *C) { i, _ := inst.ReadTopologyInstance(&slave1Key) iRead, found, _ := inst.ReadInstance(&slave1Key) c.Assert(found, Equals, true) c.Assert(iRead.Key.Hostname, Equals, i.Key.Hostname) c.Assert(iRead.Version, Equals, i.Version) }
func (s *TestSuite) TestReadTopologyAndInstanceMaster(c *C) { i, _ := inst.ReadTopologyInstance(&masterKey) iRead, found, _ := inst.ReadInstance(&masterKey) c.Assert(found, Equals, true) c.Assert(iRead.Key.Hostname, Equals, i.Key.Hostname) c.Assert(iRead.Version, Equals, i.Version) c.Assert(len(iRead.SlaveHosts), Equals, len(i.SlaveHosts)) }
// Instance reads and returns an instance's details. func (this *HttpAPI) Instance(params martini.Params, r render.Render) { instanceKey, err := this.getInstanceKey(params["host"], params["port"]) if err != nil { r.JSON(200, &APIResponse{Code: ERROR, Message: err.Error()}) return } instance, found, err := inst.ReadInstance(&instanceKey) if (!found) || (err != nil) { r.JSON(200, &APIResponse{Code: ERROR, Message: fmt.Sprintf("Cannot read instance: %+v", instanceKey)}) return } r.JSON(200, instance) }
// DiscoverInstance will attempt discovering an instance (unless it is already up to date) and will // list down its master and slaves (if any) for further discovery. func DiscoverInstance(instanceKey inst.InstanceKey) { instanceKey.Formalize() if !instanceKey.IsValid() { return } instance, found, err := inst.ReadInstance(&instanceKey) if found && instance.IsUpToDate && instance.IsLastCheckValid { // we've already discovered this one. Skip! goto Cleanup } // First we've ever heard of this instance. Continue investigation: instance, err = inst.ReadTopologyInstance(&instanceKey) // panic can occur (IO stuff). Therefore it may happen // that instance is nil. Check it. if err != nil || instance == nil { goto Cleanup } fmt.Printf("host: %+v, master: %+v\n", instance.Key, instance.MasterKey) // Investigate slaves: for _, slaveKey := range instance.SlaveHosts.GetInstanceKeys() { discoveryInstanceKeys <- slaveKey } // Investigate master: discoveryInstanceKeys <- instance.MasterKey Cleanup: } // Start discovery begins a one time asynchronuous discovery process for the given // instance and all of its topology connected instances. // That is, the instance will be investigated for master and slaves, and the routines will follow on // each and every such found master/slave. // In essense, assuming all slaves in a replication topology are running, and given a single instance // in such topology, this function will detect the entire topology. func StartDiscovery(instanceKey inst.InstanceKey) { log.Infof("Starting discovery at %+v", instanceKey) pendingTokens := make(chan bool, maxConcurrency) completedTokens := make(chan bool, maxConcurrency) AccountedDiscoverInstance(instanceKey, pendingTokens, completedTokens) go handleDiscoveryRequests(pendingTokens, completedTokens) // Block until all are complete for { select { case <-pendingTokens: <-completedTokens default: inst.AuditOperation("start-discovery", &instanceKey, "") return } } } // ContinuousDiscovery starts an asynchronuous infinite discovery process where instances are // periodically investigated and their status captured, and long since unseen instances are // purged and forgotten. func ContinuousDiscovery() { log.Infof("Starting continuous discovery") go handleDiscoveryRequests(nil, nil) tick := time.Tick(time.Duration(config.Config.DiscoveryPollSeconds) * time.Second) forgetUnseenTick := time.Tick(time.Hour) for _ = range tick { instanceKeys, _ := inst.ReadOutdatedInstanceKeys() log.Debugf("outdated keys: %+v", instanceKeys) for _, instanceKey := range instanceKeys { discoveryInstanceKeys <- instanceKey } // See if we should also forget instances (lower frequency) select { case <-forgetUnseenTick: inst.ForgetLongUnseenInstances() default: } } }
func (s *TestSuite) TestCluster(c *C) { inst.ReadInstance(&masterKey) orchestrator.StartDiscovery(slave1Key) instances, _ := inst.ReadClusterInstances(fmt.Sprintf("%s:%d", masterKey.Hostname, masterKey.Port)) c.Assert(len(instances) >= 1, Equals, true) }