// checkAndRecoverDeadCoMaster checks a given analysis, decides whether to take action, and possibly takes action // Returns true when action was taken. func checkAndRecoverDeadCoMaster(analysisEntry inst.ReplicationAnalysis, candidateInstanceKey *inst.InstanceKey, forceInstanceRecovery bool, skipProcesses bool) (bool, *TopologyRecovery, error) { failedInstanceKey := &analysisEntry.AnalyzedInstanceKey if !(forceInstanceRecovery || analysisEntry.ClusterDetails.HasAutomatedMasterRecovery) { return false, nil, nil } topologyRecovery, err := AttemptRecoveryRegistration(&analysisEntry, !forceInstanceRecovery, !forceInstanceRecovery) if topologyRecovery == nil { log.Debugf("topology_recovery: found an active or recent recovery on %+v. Will not issue another RecoverDeadCoMaster.", analysisEntry.AnalyzedInstanceKey) return false, nil, err } // That's it! We must do recovery! recoverDeadCoMasterCounter.Inc(1) coMaster, lostSlaves, err := RecoverDeadCoMaster(topologyRecovery, skipProcesses) ResolveRecovery(topologyRecovery, coMaster) if coMaster == nil { inst.AuditOperation("recover-dead-co-master", failedInstanceKey, "Failure: no slave promoted.") } else { inst.AuditOperation("recover-dead-co-master", failedInstanceKey, fmt.Sprintf("promoted co-master: %+v", coMaster.Key)) } topologyRecovery.LostSlaves.AddInstances(lostSlaves) if coMaster != nil { // success recoverDeadCoMasterSuccessCounter.Inc(1) if config.Config.ApplyMySQLPromotionAfterMasterFailover { log.Debugf("topology_recovery: - RecoverDeadMaster: will apply MySQL changes to promoted master") inst.DetachSlaveOperation(&coMaster.Key) inst.SetReadOnly(&coMaster.Key, false) } if !skipProcesses { // Execute post intermediate-master-failover processes topologyRecovery.SuccessorKey = &coMaster.Key executeProcesses(config.Config.PostMasterFailoverProcesses, "PostMasterFailoverProcesses", topologyRecovery, false) } } else { recoverDeadCoMasterFailureCounter.Inc(1) } return true, topologyRecovery, err }
func RecoverDeadMaster(analysisEntry inst.ReplicationAnalysis, skipProcesses bool) (promotedSlave *inst.Instance, lostSlaves [](*inst.Instance), err error) { failedInstanceKey := &analysisEntry.AnalyzedInstanceKey if ok, err := AttemptRecoveryRegistration(&analysisEntry); !ok { log.Debugf("topology_recovery: found an active or recent recovery on %+v. Will not issue another RecoverDeadMaster.", *failedInstanceKey) return nil, lostSlaves, err } inst.AuditOperation("recover-dead-master", failedInstanceKey, "problem found; will recover") if !skipProcesses { if err := executeProcesses(config.Config.PreFailoverProcesses, "PreFailoverProcesses", analysisEntry, nil, emptySlavesList, true); err != nil { return nil, lostSlaves, err } } log.Debugf("topology_recovery: RecoverDeadMaster: will recover %+v", *failedInstanceKey) var masterRecoveryType MasterRecoveryType = MasterRecoveryPseudoGTID if (analysisEntry.OracleGTIDImmediateTopology || analysisEntry.MariaDBGTIDImmediateTopology) && !analysisEntry.PseudoGTIDImmediateTopology { masterRecoveryType = MasterRecoveryGTID } else if analysisEntry.BinlogServerImmediateTopology { masterRecoveryType = MasterRecoveryBinlogServer } log.Debugf("topology_recovery: RecoverDeadMaster: masterRecoveryType=%+v", masterRecoveryType) switch masterRecoveryType { case MasterRecoveryGTID: { lostSlaves, _, promotedSlave, err = inst.RegroupSlavesGTID(failedInstanceKey, true, nil) } case MasterRecoveryPseudoGTID: { lostSlaves, _, _, promotedSlave, err = inst.RegroupSlavesIncludingSubSlavesOfBinlogServers(failedInstanceKey, true, nil) } case MasterRecoveryBinlogServer: { promotedSlave, err = inst.RegroupSlavesBinlogServers(failedInstanceKey, true, nil) } } if promotedSlave != nil && len(lostSlaves) > 0 && config.Config.DetachLostSlavesAfterMasterFailover { log.Debugf("topology_recovery: - RecoverDeadMaster: lost %+v slaves during recovery process; detaching them", len(lostSlaves)) go func() { for _, slave := range lostSlaves { slave := slave inst.DetachSlaveOperation(&slave.Key) } }() } if config.Config.MasterFailoverLostInstancesDowntimeMinutes > 0 { inst.BeginDowntime(failedInstanceKey, inst.GetMaintenanceOwner(), "RecoverDeadMaster indicates this instance is lost", config.Config.MasterFailoverLostInstancesDowntimeMinutes*60) for _, slave := range lostSlaves { slave := slave inst.BeginDowntime(&slave.Key, inst.GetMaintenanceOwner(), "RecoverDeadMaster indicates this instance is lost", config.Config.MasterFailoverLostInstancesDowntimeMinutes*60) } } if promotedSlave == nil { log.Debugf("topology_recovery: - RecoverDeadMaster: Failure: no slave promoted.") inst.AuditOperation("recover-dead-master", failedInstanceKey, "Failure: no slave promoted.") } else { log.Debugf("topology_recovery: - RecoverDeadMaster: promoted slave is %+v", promotedSlave.Key) inst.AuditOperation("recover-dead-master", failedInstanceKey, fmt.Sprintf("master: %+v", promotedSlave.Key)) } return promotedSlave, lostSlaves, err }
// Cli initiates a command line interface, executing requested command. func Cli(command string, strict bool, instance string, destination string, owner string, reason string, duration string, pattern string, clusterAlias string, pool string, hostnameFlag string) { if instance != "" && !strings.Contains(instance, ":") { instance = fmt.Sprintf("%s:%d", instance, config.Config.DefaultInstancePort) } instanceKey, err := inst.ParseInstanceKey(instance) if err != nil { instanceKey = nil } rawInstanceKey, err := inst.NewRawInstanceKey(instance) if err != nil { rawInstanceKey = nil } if destination != "" && !strings.Contains(destination, ":") { destination = fmt.Sprintf("%s:%d", destination, config.Config.DefaultInstancePort) } destinationKey, err := inst.ParseInstanceKey(destination) if err != nil { destinationKey = nil } if hostname, err := os.Hostname(); err == nil { thisInstanceKey = &inst.InstanceKey{Hostname: hostname, Port: int(config.Config.DefaultInstancePort)} } postponedFunctionsContainer := inst.NewPostponedFunctionsContainer() if len(owner) == 0 { // get os username as owner usr, err := user.Current() if err != nil { log.Fatale(err) } owner = usr.Username } inst.SetMaintenanceOwner(owner) skipDatabaseCommands := false switch command { case "reset-internal-db-deployment": skipDatabaseCommands = true case "help": skipDatabaseCommands = true } if !skipDatabaseCommands { process.ContinuousRegistration(string(process.OrchestratorExecutionCliMode), command) } // begin commands switch command { // smart mode case registerCliCommand("relocate", "Smart relocation", `Relocate a slave beneath another instance`), registerCliCommand("relocate-below", "Smart relocation", `Synonym to 'relocate', will be deprecated`): { instanceKey = deduceInstanceKeyIfNeeded(instance, instanceKey) if destinationKey == nil { log.Fatal("Cannot deduce destination:", destination) } _, err := inst.RelocateBelow(instanceKey, destinationKey) if err != nil { log.Fatale(err) } fmt.Println(fmt.Sprintf("%s<%s", instanceKey.DisplayString(), destinationKey.DisplayString())) } case registerCliCommand("relocate-slaves", "Smart relocation", `Relocates all or part of the slaves of a given instance under another instance`): { instanceKey = deduceInstanceKeyIfNeeded(instance, instanceKey) if destinationKey == nil { log.Fatal("Cannot deduce destination:", destination) } slaves, _, err, errs := inst.RelocateSlaves(instanceKey, destinationKey, pattern) if err != nil { log.Fatale(err) } else { for _, e := range errs { log.Errore(e) } for _, slave := range slaves { fmt.Println(slave.Key.DisplayString()) } } } case registerCliCommand("regroup-slaves", "Smart relocation", `Given an instance, pick one of its slave and make it local master of its siblings`): { if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } lostSlaves, equalSlaves, aheadSlaves, promotedSlave, err := inst.RegroupSlaves(instanceKey, false, func(candidateSlave *inst.Instance) { fmt.Println(candidateSlave.Key.DisplayString()) }, postponedFunctionsContainer) postponedFunctionsContainer.InvokePostponed() if promotedSlave == nil { log.Fatalf("Could not regroup slaves of %+v; error: %+v", *instanceKey, err) } fmt.Println(fmt.Sprintf("%s lost: %d, trivial: %d, pseudo-gtid: %d", promotedSlave.Key.DisplayString(), len(lostSlaves), len(equalSlaves), len(aheadSlaves))) if err != nil { log.Fatale(err) } } // General replication commands // move, binlog file:pos case registerCliCommand("move-up", "Classic file:pos relocation", `Move a slave one level up the topology`): { instanceKey = deduceInstanceKeyIfNeeded(instance, instanceKey) instance, err := inst.MoveUp(instanceKey) if err != nil { log.Fatale(err) } fmt.Println(fmt.Sprintf("%s<%s", instanceKey.DisplayString(), instance.MasterKey.DisplayString())) } case registerCliCommand("move-up-slaves", "Classic file:pos relocation", `Moves slaves of the given instance one level up the topology`): { if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } movedSlaves, _, err, errs := inst.MoveUpSlaves(instanceKey, pattern) if err != nil { log.Fatale(err) } else { for _, e := range errs { log.Errore(e) } for _, slave := range movedSlaves { fmt.Println(slave.Key.DisplayString()) } } } case registerCliCommand("move-below", "Classic file:pos relocation", `Moves a slave beneath its sibling. Both slaves must be actively replicating from same master.`): { instanceKey = deduceInstanceKeyIfNeeded(instance, instanceKey) if destinationKey == nil { log.Fatal("Cannot deduce destination/sibling:", destination) } _, err := inst.MoveBelow(instanceKey, destinationKey) if err != nil { log.Fatale(err) } fmt.Println(fmt.Sprintf("%s<%s", instanceKey.DisplayString(), destinationKey.DisplayString())) } case registerCliCommand("move-equivalent", "Classic file:pos relocation", `Moves a slave beneath another server, based on previously recorded "equivalence coordinates"`): { instanceKey = deduceInstanceKeyIfNeeded(instance, instanceKey) if destinationKey == nil { log.Fatal("Cannot deduce destination:", destination) } _, err := inst.MoveEquivalent(instanceKey, destinationKey) if err != nil { log.Fatale(err) } fmt.Println(fmt.Sprintf("%s<%s", instanceKey.DisplayString(), destinationKey.DisplayString())) } case registerCliCommand("repoint", "Classic file:pos relocation", `Make the given instance replicate from another instance without changing the binglog coordinates. Use with care`): { instanceKey = deduceInstanceKeyIfNeeded(instance, instanceKey) // destinationKey can be null, in which case the instance repoints to its existing master instance, err := inst.Repoint(instanceKey, destinationKey, inst.GTIDHintNeutral) if err != nil { log.Fatale(err) } fmt.Println(fmt.Sprintf("%s<%s", instanceKey.DisplayString(), instance.MasterKey.DisplayString())) } case registerCliCommand("repoint-slaves", "Classic file:pos relocation", `Repoint all slaves of given instance to replicate back from the instance. Use with care`): { instanceKey = deduceInstanceKeyIfNeeded(instance, instanceKey) repointedSlaves, err, errs := inst.RepointSlavesTo(instanceKey, pattern, destinationKey) if err != nil { log.Fatale(err) } else { for _, e := range errs { log.Errore(e) } for _, slave := range repointedSlaves { fmt.Println(fmt.Sprintf("%s<%s", slave.Key.DisplayString(), instanceKey.DisplayString())) } } } case registerCliCommand("enslave-siblings", "Classic file:pos relocation", `Turn all siblings of a slave into its sub-slaves.`): { if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } _, _, err := inst.EnslaveSiblings(instanceKey) if err != nil { log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } case registerCliCommand("enslave-master", "Classic file:pos relocation", `Turn an instance into a master of its own master; essentially switch the two.`): { if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } _, err := inst.EnslaveMaster(instanceKey) if err != nil { log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } case registerCliCommand("make-co-master", "Classic file:pos relocation", `Create a master-master replication. Given instance is a slave which replicates directly from a master.`): { instanceKey = deduceInstanceKeyIfNeeded(instance, instanceKey) _, err := inst.MakeCoMaster(instanceKey) if err != nil { log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } case registerCliCommand("get-candidate-slave", "Classic file:pos relocation", `Information command suggesting the most up-to-date slave of a given instance that is good for promotion`): { if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } instance, _, _, _, err := inst.GetCandidateSlave(instanceKey, false) if err != nil { log.Fatale(err) } else { fmt.Println(instance.Key.DisplayString()) } } case registerCliCommand("regroup-slaves-bls", "Binlog server relocation", `Regroup Binlog Server slaves of a given instance`): { if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } _, promotedBinlogServer, err := inst.RegroupSlavesBinlogServers(instanceKey, false) if promotedBinlogServer == nil { log.Fatalf("Could not regroup binlog server slaves of %+v; error: %+v", *instanceKey, err) } fmt.Println(promotedBinlogServer.Key.DisplayString()) if err != nil { log.Fatale(err) } } // move, GTID case registerCliCommand("move-gtid", "GTID relocation", `Move a slave beneath another instance.`): { instanceKey = deduceInstanceKeyIfNeeded(instance, instanceKey) if destinationKey == nil { log.Fatal("Cannot deduce destination:", destination) } _, err := inst.MoveBelowGTID(instanceKey, destinationKey) if err != nil { log.Fatale(err) } fmt.Println(fmt.Sprintf("%s<%s", instanceKey.DisplayString(), destinationKey.DisplayString())) } case registerCliCommand("move-slaves-gtid", "GTID relocation", `Moves all slaves of a given instance under another (destination) instance using GTID`): { instanceKey = deduceInstanceKeyIfNeeded(instance, instanceKey) if destinationKey == nil { log.Fatal("Cannot deduce destination:", destination) } movedSlaves, _, err, errs := inst.MoveSlavesGTID(instanceKey, destinationKey, pattern) if err != nil { log.Fatale(err) } else { for _, e := range errs { log.Errore(e) } for _, slave := range movedSlaves { fmt.Println(slave.Key.DisplayString()) } } } case registerCliCommand("regroup-slaves-gtid", "GTID relocation", `Given an instance, pick one of its slave and make it local master of its siblings, using GTID.`): { if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } lostSlaves, movedSlaves, promotedSlave, err := inst.RegroupSlavesGTID(instanceKey, false, func(candidateSlave *inst.Instance) { fmt.Println(candidateSlave.Key.DisplayString()) }) if promotedSlave == nil { log.Fatalf("Could not regroup slaves of %+v; error: %+v", *instanceKey, err) } fmt.Println(fmt.Sprintf("%s lost: %d, moved: %d", promotedSlave.Key.DisplayString(), len(lostSlaves), len(movedSlaves))) if err != nil { log.Fatale(err) } } // Pseudo-GTID case registerCliCommand("match", "Pseudo-GTID relocation", `Matches a slave beneath another (destination) instance using Pseudo-GTID`), registerCliCommand("match-below", "Pseudo-GTID relocation", `Synonym to 'match', will be deprecated`): { instanceKey = deduceInstanceKeyIfNeeded(instance, instanceKey) if destinationKey == nil { log.Fatal("Cannot deduce destination:", destination) } _, _, err := inst.MatchBelow(instanceKey, destinationKey, true) if err != nil { log.Fatale(err) } fmt.Println(fmt.Sprintf("%s<%s", instanceKey.DisplayString(), destinationKey.DisplayString())) } case registerCliCommand("match-up", "Pseudo-GTID relocation", `Transport the slave one level up the hierarchy, making it child of its grandparent, using Pseudo-GTID`): { instanceKey = deduceInstanceKeyIfNeeded(instance, instanceKey) instance, _, err := inst.MatchUp(instanceKey, true) if err != nil { log.Fatale(err) } fmt.Println(fmt.Sprintf("%s<%s", instanceKey.DisplayString(), instance.MasterKey.DisplayString())) } case registerCliCommand("rematch", "Pseudo-GTID relocation", `Reconnect a slave onto its master, via PSeudo-GTID.`): { instanceKey = deduceInstanceKeyIfNeeded(instance, instanceKey) instance, _, err := inst.RematchSlave(instanceKey, true) if err != nil { log.Fatale(err) } fmt.Println(fmt.Sprintf("%s<%s", instanceKey.DisplayString(), instance.MasterKey.DisplayString())) } case registerCliCommand("match-slaves", "Pseudo-GTID relocation", `Matches all slaves of a given instance under another (destination) instance using Pseudo-GTID`), registerCliCommand("multi-match-slaves", "Pseudo-GTID relocation", `Synonym to 'match-slaves', will be deprecated`): { // Move all slaves of "instance" beneath "destination" if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } if destinationKey == nil { log.Fatal("Cannot deduce destination:", destination) } matchedSlaves, _, err, errs := inst.MultiMatchSlaves(instanceKey, destinationKey, pattern) if err != nil { log.Fatale(err) } else { for _, e := range errs { log.Errore(e) } for _, slave := range matchedSlaves { fmt.Println(slave.Key.DisplayString()) } } } case registerCliCommand("match-up-slaves", "Pseudo-GTID relocation", `Matches slaves of the given instance one level up the topology, making them siblings of given instance, using Pseudo-GTID`): { if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } matchedSlaves, _, err, errs := inst.MatchUpSlaves(instanceKey, pattern) if err != nil { log.Fatale(err) } else { for _, e := range errs { log.Errore(e) } for _, slave := range matchedSlaves { fmt.Println(slave.Key.DisplayString()) } } } case registerCliCommand("regroup-slaves-pgtid", "Pseudo-GTID relocation", `Given an instance, pick one of its slave and make it local master of its siblings, using Pseudo-GTID.`): { if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } lostSlaves, equalSlaves, aheadSlaves, promotedSlave, err := inst.RegroupSlavesPseudoGTID(instanceKey, false, func(candidateSlave *inst.Instance) { fmt.Println(candidateSlave.Key.DisplayString()) }, postponedFunctionsContainer) postponedFunctionsContainer.InvokePostponed() if promotedSlave == nil { log.Fatalf("Could not regroup slaves of %+v; error: %+v", *instanceKey, err) } fmt.Println(fmt.Sprintf("%s lost: %d, trivial: %d, pseudo-gtid: %d", promotedSlave.Key.DisplayString(), len(lostSlaves), len(equalSlaves), len(aheadSlaves))) if err != nil { log.Fatale(err) } } // General replication commands case registerCliCommand("enable-gtid", "Replication, general", `If possible, turn on GTID replication`): { instanceKey = deduceInstanceKeyIfNeeded(instance, instanceKey) _, err := inst.EnableGTID(instanceKey) if err != nil { log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } case registerCliCommand("disable-gtid", "Replication, general", `Turn off GTID replication, back to file:pos replication`): { instanceKey = deduceInstanceKeyIfNeeded(instance, instanceKey) _, err := inst.DisableGTID(instanceKey) if err != nil { log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } case registerCliCommand("reset-master-gtid-remove-own-uuid", "Replication, general", `Reset master on instance, remove GTID entries generated by instance`): { instanceKey = deduceInstanceKeyIfNeeded(instance, instanceKey) _, err := inst.ResetMasterGTIDOperation(instanceKey, true, "") if err != nil { log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } case registerCliCommand("skip-query", "Replication, general", `Skip a single statement on a slave; either when running with GTID or without`): { instanceKey = deduceInstanceKeyIfNeeded(instance, instanceKey) _, err := inst.SkipQuery(instanceKey) if err != nil { log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } case registerCliCommand("stop-slave", "Replication, general", `Issue a STOP SLAVE on an instance`): { instanceKey = deduceInstanceKeyIfNeeded(instance, instanceKey) _, err := inst.StopSlave(instanceKey) if err != nil { log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } case registerCliCommand("start-slave", "Replication, general", `Issue a START SLAVE on an instance`): { instanceKey = deduceInstanceKeyIfNeeded(instance, instanceKey) _, err := inst.StartSlave(instanceKey) if err != nil { log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } case registerCliCommand("restart-slave", "Replication, general", `STOP and START SLAVE on an instance`): { instanceKey = deduceInstanceKeyIfNeeded(instance, instanceKey) _, err := inst.RestartSlave(instanceKey) if err != nil { log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } case registerCliCommand("reset-slave", "Replication, general", `Issues a RESET SLAVE command; use with care`): { instanceKey = deduceInstanceKeyIfNeeded(instance, instanceKey) _, err := inst.ResetSlaveOperation(instanceKey) if err != nil { log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } case registerCliCommand("detach-slave", "Replication, general", `Stops replication and modifies binlog position into an impossible, yet reversible, value.`): { instanceKey = deduceInstanceKeyIfNeeded(instance, instanceKey) _, err := inst.DetachSlaveOperation(instanceKey) if err != nil { log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } case registerCliCommand("reattach-slave", "Replication, general", `Undo a detach-slave operation`): { instanceKey = deduceInstanceKeyIfNeeded(instance, instanceKey) _, err := inst.ReattachSlaveOperation(instanceKey) if err != nil { log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } case registerCliCommand("detach-slave-master-host", "Replication, general", `Stops replication and modifies Master_Host into an impossible, yet reversible, value.`): { if instanceKey == nil { instanceKey = assignThisInstanceKey() } if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } _, err := inst.DetachSlaveMasterHost(instanceKey) if err != nil { log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } case registerCliCommand("reattach-slave-master-host", "Replication, general", `Undo a detach-slave-master-host operation`): { if instanceKey == nil { instanceKey = assignThisInstanceKey() } if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } _, err := inst.ReattachSlaveMasterHost(instanceKey) if err != nil { log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } case registerCliCommand("master-pos-wait", "Replication, general", `Wait until slave reaches given replication coordinates (--binlog=file:pos)`): { if instanceKey == nil { instanceKey = assignThisInstanceKey() } if instanceKey == nil { log.Fatalf("Unresolved instance") } instance, err := inst.ReadTopologyInstance(instanceKey) if err != nil { log.Fatale(err) } if instance == nil { log.Fatalf("Instance not found: %+v", *instanceKey) } var binlogCoordinates *inst.BinlogCoordinates if binlogCoordinates, err = inst.ParseBinlogCoordinates(*config.RuntimeCLIFlags.BinlogFile); err != nil { log.Fatalf("Expecing --binlog argument as file:pos") } _, err = inst.MasterPosWait(instanceKey, binlogCoordinates) if err != nil { log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } // Pool case registerCliCommand("set-read-only", "Instance", `Turn an instance read-only, via SET GLOBAL read_only := 1`): { instanceKey = deduceInstanceKeyIfNeeded(instance, instanceKey) _, err := inst.SetReadOnly(instanceKey, true) if err != nil { log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } case registerCliCommand("set-writeable", "Instance", `Turn an instance writeable, via SET GLOBAL read_only := 0`): { instanceKey = deduceInstanceKeyIfNeeded(instance, instanceKey) _, err := inst.SetReadOnly(instanceKey, false) if err != nil { log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } // Binary log operations case registerCliCommand("flush-binary-logs", "Binary logs", `Flush binary logs on an instance`): { instanceKey = deduceInstanceKeyIfNeeded(instance, instanceKey) var err error if *config.RuntimeCLIFlags.BinlogFile == "" { _, err = inst.FlushBinaryLogs(instanceKey, 1) } else { _, err = inst.FlushBinaryLogsTo(instanceKey, *config.RuntimeCLIFlags.BinlogFile) } if err != nil { log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } case registerCliCommand("purge-binary-logs", "Binary logs", `Purge binary logs of an instance`): { instanceKey = deduceInstanceKeyIfNeeded(instance, instanceKey) var err error if *config.RuntimeCLIFlags.BinlogFile == "" { log.Fatal("expecting --binlog value") } _, err = inst.PurgeBinaryLogsTo(instanceKey, *config.RuntimeCLIFlags.BinlogFile) if err != nil { log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } case registerCliCommand("last-pseudo-gtid", "Binary logs", `Find latest Pseudo-GTID entry in instance's binary logs`): { if instanceKey == nil { instanceKey = assignThisInstanceKey() } if instanceKey == nil { log.Fatalf("Unresolved instance") } instance, err := inst.ReadTopologyInstance(instanceKey) if err != nil { log.Fatale(err) } if instance == nil { log.Fatalf("Instance not found: %+v", *instanceKey) } coordinates, text, err := inst.FindLastPseudoGTIDEntry(instance, instance.RelaylogCoordinates, nil, strict, nil) if err != nil { log.Fatale(err) } fmt.Println(fmt.Sprintf("%+v:%s", *coordinates, text)) } case registerCliCommand("find-binlog-entry", "Binary logs", `Get binlog file:pos of entry given by --pattern (exact full match, not a regular expression) in a given instance`): { if pattern == "" { log.Fatal("No pattern given") } if instanceKey == nil { instanceKey = assignThisInstanceKey() } if instanceKey == nil { log.Fatalf("Unresolved instance") } instance, err := inst.ReadTopologyInstance(instanceKey) if err != nil { log.Fatale(err) } if instance == nil { log.Fatalf("Instance not found: %+v", *instanceKey) } coordinates, err := inst.SearchEntryInInstanceBinlogs(instance, pattern, false) if err != nil { log.Fatale(err) } fmt.Println(fmt.Sprintf("%+v", *coordinates)) } case registerCliCommand("correlate-binlog-pos", "Binary logs", `Given an instance (-i) and binlog coordinates (--binlog=file:pos), find the correlated coordinates in another instance (-d)`): { if instanceKey == nil { instanceKey = assignThisInstanceKey() } if instanceKey == nil { log.Fatalf("Unresolved instance") } instance, err := inst.ReadTopologyInstance(instanceKey) if err != nil { log.Fatale(err) } if instance == nil { log.Fatalf("Instance not found: %+v", *instanceKey) } if !instance.LogBinEnabled { log.Fatalf("Instance does not have binary logs: %+v", *instanceKey) } if destinationKey == nil { log.Fatal("Cannot deduce target instance:", destination) } otherInstance, err := inst.ReadTopologyInstance(destinationKey) if err != nil { log.Fatale(err) } if otherInstance == nil { log.Fatalf("Instance not found: %+v", *destinationKey) } var binlogCoordinates *inst.BinlogCoordinates if *config.RuntimeCLIFlags.BinlogFile == "" { binlogCoordinates = &instance.SelfBinlogCoordinates } else { if binlogCoordinates, err = inst.ParseBinlogCoordinates(*config.RuntimeCLIFlags.BinlogFile); err != nil { log.Fatalf("Expecing --binlog argument as file:pos") } } coordinates, _, err := inst.CorrelateBinlogCoordinates(instance, binlogCoordinates, otherInstance) if err != nil { log.Fatale(err) } fmt.Println(fmt.Sprintf("%+v", *coordinates)) } // Pool case registerCliCommand("submit-pool-instances", "Pools", `Submit a pool name with a list of instances in that pool`): { if pool == "" { log.Fatal("Please submit --pool") } err := inst.ApplyPoolInstances(pool, instance) if err != nil { log.Fatale(err) } } case registerCliCommand("cluster-pool-instances", "Pools", `List all pools and their associated instances`): { clusterPoolInstances, err := inst.ReadAllClusterPoolInstances() if err != nil { log.Fatale(err) } for _, clusterPoolInstance := range clusterPoolInstances { fmt.Println(fmt.Sprintf("%s\t%s\t%s\t%s:%d", clusterPoolInstance.ClusterName, clusterPoolInstance.ClusterAlias, clusterPoolInstance.Pool, clusterPoolInstance.Hostname, clusterPoolInstance.Port)) } } // Information case registerCliCommand("find", "Information", `Find instances whose hostname matches given regex pattern`): { if pattern == "" { log.Fatal("No pattern given") } instances, err := inst.FindInstances(pattern) if err != nil { log.Fatale(err) } else { for _, instance := range instances { fmt.Println(instance.Key.DisplayString()) } } } case registerCliCommand("clusters", "Information", `List all clusters known to orchestrator`): { clusters, err := inst.ReadClusters() if err != nil { log.Fatale(err) } else { fmt.Println(strings.Join(clusters, "\n")) } } case registerCliCommand("topology", "Information", `Show an ascii-graph of a replication topology, given a member of that topology`): { instanceKey = deduceInstanceKeyIfNeeded(instance, instanceKey) output, err := inst.ASCIITopology(instanceKey, pattern) if err != nil { log.Fatale(err) } fmt.Println(output) } case registerCliCommand("which-instance", "Information", `Output the fully-qualified hostname:port representation of the given instance, or error if unknown`): { if instanceKey == nil { instanceKey = assignThisInstanceKey() } if instanceKey == nil { log.Fatalf("Unable to get master: unresolved instance") } instance, _, err := inst.ReadInstance(instanceKey) if err != nil { log.Fatale(err) } if instance == nil { log.Fatalf("Instance not found: %+v", *instanceKey) } fmt.Println(instance.Key.DisplayString()) } case registerCliCommand("which-cluster", "Information", `Output the name of the cluster an instance belongs to, or error if unknown to orchestrator`): { clusterName := getClusterName(clusterAlias, instanceKey) fmt.Println(clusterName) } case registerCliCommand("which-cluster-instances", "Information", `Output the list of instances participating in same cluster as given instance`): { clusterName := getClusterName(clusterAlias, instanceKey) instances, err := inst.ReadClusterInstances(clusterName) if err != nil { log.Fatale(err) } for _, clusterInstance := range instances { fmt.Println(clusterInstance.Key.DisplayString()) } } case registerCliCommand("which-cluster-osc-slaves", "Information", `Output a list of slaves in same cluster as given instance, that could serve as a pt-online-schema-change operation control slaves`): { clusterName := getClusterName(clusterAlias, instanceKey) instances, err := inst.GetClusterOSCSlaves(clusterName) if err != nil { log.Fatale(err) } for _, clusterInstance := range instances { fmt.Println(clusterInstance.Key.DisplayString()) } } case registerCliCommand("which-master", "Information", `Output the fully-qualified hostname:port representation of a given instance's master`): { if instanceKey == nil { instanceKey = assignThisInstanceKey() } if instanceKey == nil { log.Fatalf("Unable to get master: unresolved instance") } instance, _, err := inst.ReadInstance(instanceKey) if err != nil { log.Fatale(err) } if instance == nil { log.Fatalf("Instance not found: %+v", *instanceKey) } fmt.Println(instance.MasterKey.DisplayString()) } case registerCliCommand("which-slaves", "Information", `Output the fully-qualified hostname:port list of slaves of a given instance`): { if instanceKey == nil { instanceKey = assignThisInstanceKey() } if instanceKey == nil { log.Fatalf("Unable to get slaves: unresolved instance") } slaves, err := inst.ReadSlaveInstances(instanceKey) if err != nil { log.Fatale(err) } for _, slave := range slaves { fmt.Println(slave.Key.DisplayString()) } } case registerCliCommand("instance-status", "Information", `Output short status on a given instance`): { if instanceKey == nil { instanceKey = assignThisInstanceKey() } if instanceKey == nil { log.Fatalf("Unable to get status: unresolved instance") } instance, _, err := inst.ReadInstance(instanceKey) if err != nil { log.Fatale(err) } if instance == nil { log.Fatalf("Instance not found: %+v", *instanceKey) } fmt.Println(instance.HumanReadableDescription()) } case registerCliCommand("get-cluster-heuristic-lag", "Information", `For a given cluster (indicated by an instance or alias), output a heuristic "representative" lag of that cluster`): { clusterName := getClusterName(clusterAlias, instanceKey) lag, err := inst.GetClusterHeuristicLag(clusterName) if err != nil { log.Fatale(err) } fmt.Println(lag) } // Instance management case registerCliCommand("discover", "Instance management", `Lookup an instance, investigate it`): { instanceKey = deduceInstanceKeyIfNeeded(instance, instanceKey) instance, err := inst.ReadTopologyInstance(instanceKey) if err != nil { log.Fatale(err) } fmt.Println(instance.Key.DisplayString()) } case registerCliCommand("forget", "Instance management", `Forget about an instance's existence`): { if rawInstanceKey == nil { rawInstanceKey = assignThisInstanceKey() } if rawInstanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } err := inst.ForgetInstance(rawInstanceKey) if err != nil { log.Fatale(err) } fmt.Println(rawInstanceKey.DisplayString()) } case registerCliCommand("begin-maintenance", "Instance management", `Request a maintenance lock on an instance`): { instanceKey = deduceInstanceKeyIfNeeded(instance, instanceKey) if reason == "" { log.Fatal("--reason option required") } var durationSeconds int = 0 if duration != "" { durationSeconds, err = util.SimpleTimeToSeconds(duration) if err != nil { log.Fatale(err) } if durationSeconds < 0 { log.Fatalf("Duration value must be non-negative. Given value: %d", durationSeconds) } } maintenanceKey, err := inst.BeginBoundedMaintenance(instanceKey, inst.GetMaintenanceOwner(), reason, uint(durationSeconds)) if err == nil { log.Infof("Maintenance key: %+v", maintenanceKey) log.Infof("Maintenance duration: %d seconds", durationSeconds) } if err != nil { log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } case registerCliCommand("end-maintenance", "Instance management", `Remove maintenance lock from an instance`): { instanceKey = deduceInstanceKeyIfNeeded(instance, instanceKey) err := inst.EndMaintenanceByInstanceKey(instanceKey) if err != nil { log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } case registerCliCommand("begin-downtime", "Instance management", `Mark an instance as downtimed`): { instanceKey = deduceInstanceKeyIfNeeded(instance, instanceKey) if reason == "" { log.Fatal("--reason option required") } var durationSeconds int = 0 if duration != "" { durationSeconds, err = util.SimpleTimeToSeconds(duration) if err != nil { log.Fatale(err) } if durationSeconds < 0 { log.Fatalf("Duration value must be non-negative. Given value: %d", durationSeconds) } } err := inst.BeginDowntime(instanceKey, inst.GetMaintenanceOwner(), reason, uint(durationSeconds)) if err == nil { log.Infof("Downtime duration: %d seconds", durationSeconds) } else { log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } case registerCliCommand("end-downtime", "Instance management", `Indicate an instance is no longer downtimed`): { instanceKey = deduceInstanceKeyIfNeeded(instance, instanceKey) err := inst.EndDowntime(instanceKey) if err != nil { log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } // Recovery & analysis case registerCliCommand("recover", "Recovery", `Do auto-recovery given a dead instance`), registerCliCommand("recover-lite", "Recovery", `Do auto-recovery given a dead instance. Orchestrator chooses the best course of actionwithout executing external processes`): { if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } recoveryAttempted, promotedInstanceKey, err := logic.CheckAndRecover(instanceKey, destinationKey, (command == "recover-lite")) if err != nil { log.Fatale(err) } if recoveryAttempted { if promotedInstanceKey == nil { log.Fatalf("Recovery attempted yet no slave promoted") } fmt.Println(promotedInstanceKey.DisplayString()) } } case registerCliCommand("replication-analysis", "Recovery", `Request an analysis of potential crash incidents in all known topologies`): { analysis, err := inst.GetReplicationAnalysis("", false, false) if err != nil { log.Fatale(err) } for _, entry := range analysis { fmt.Println(fmt.Sprintf("%s (cluster %s): %s", entry.AnalyzedInstanceKey.DisplayString(), entry.ClusterDetails.ClusterName, entry.Analysis)) } } case registerCliCommand("ack-cluster-recoveries", "Recovery", `Acknowledge recoveries for a given cluster; this unblocks pending future recoveries`): { if reason == "" { log.Fatal("--reason option required (comment your ack)") } clusterName := getClusterName(clusterAlias, instanceKey) countRecoveries, err := logic.AcknowledgeClusterRecoveries(clusterName, inst.GetMaintenanceOwner(), reason) if err != nil { log.Fatale(err) } fmt.Println(fmt.Sprintf("%d recoveries acknowldged", countRecoveries)) } case registerCliCommand("ack-instance-recoveries", "Recovery", `Acknowledge recoveries for a given instance; this unblocks pending future recoveries`): { if reason == "" { log.Fatal("--reason option required (comment your ack)") } instanceKey = deduceInstanceKeyIfNeeded(instance, instanceKey) countRecoveries, err := logic.AcknowledgeInstanceRecoveries(instanceKey, inst.GetMaintenanceOwner(), reason) if err != nil { log.Fatale(err) } fmt.Println(fmt.Sprintf("%d recoveries acknowldged", countRecoveries)) } // Instance meta case registerCliCommand("register-candidate", "Instance, meta", `Indicate that a specific instance is a preferred candidate for master promotion`): { instanceKey = deduceInstanceKeyIfNeeded(instance, instanceKey) err := inst.RegisterCandidateInstance(instanceKey) if err != nil { log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } case registerCliCommand("register-hostname-unresolve", "Instance, meta", `Assigns the given instance a virtual (aka "unresolved") name`): { instanceKey = deduceInstanceKeyIfNeeded(instance, instanceKey) err := inst.RegisterHostnameUnresolve(instanceKey, hostnameFlag) if err != nil { log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } case registerCliCommand("deregister-hostname-unresolve", "Instance, meta", `Explicitly deregister/dosassociate a hostname with an "unresolved" name`): { instanceKey = deduceInstanceKeyIfNeeded(instance, instanceKey) err := inst.DeregisterHostnameUnresolve(instanceKey) if err != nil { log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } // meta case registerCliCommand("snapshot-topologies", "Meta", `Take a snapshot of existing topologies.`): { err := inst.SnapshotTopologies() if err != nil { log.Fatale(err) } } case registerCliCommand("continuous", "Meta", `Enter continuous mode, and actively poll for instances, diagnose problems, do maintenance`): { logic.ContinuousDiscovery() } case registerCliCommand("resolve", "Meta", `Resolve given hostname`): { if rawInstanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } if conn, err := net.Dial("tcp", rawInstanceKey.DisplayString()); err == nil { log.Debugf("tcp test is good; got connection %+v", conn) conn.Close() } else { log.Fatale(err) } if cname, err := inst.GetCNAME(rawInstanceKey.Hostname); err == nil { log.Debugf("GetCNAME() %+v, %+v", cname, err) rawInstanceKey.Hostname = cname fmt.Println(rawInstanceKey.DisplayString()) } else { log.Fatale(err) } } case registerCliCommand("reset-hostname-resolve-cache", "Meta", `Clear the hostname resolve cache`): { err := inst.ResetHostnameResolveCache() if err != nil { log.Fatale(err) } fmt.Println("hostname resolve cache cleared") } case registerCliCommand("reset-internal-db-deployment", "Meta, internal", `Clear internal db deployment history, use if somehow corrupted internal deployment history`): { config.Config.SkipOrchestratorDatabaseUpdate = true db.ResetInternalDeployment() fmt.Println("Internal db deployment history reset. Next orchestrator execution will rebuild internal db structure (no data will be lost)") } // Help case "help": { fmt.Fprintf(os.Stderr, availableCommandsUsage()) } default: log.Fatalf("Unknown command: \"%s\". %s", command, availableCommandsUsage()) } }
// Cli initiates a command line interface, executing requested command. func Cli(command string, strict bool, instance string, destination string, owner string, reason string, duration string, pattern string, clusterAlias string, pool string, hostnameFlag string) { if instance != "" && !strings.Contains(instance, ":") { instance = fmt.Sprintf("%s:%d", instance, config.Config.DefaultInstancePort) } instanceKey, err := inst.ParseInstanceKey(instance) if err != nil { instanceKey = nil } rawInstanceKey, err := inst.NewRawInstanceKey(instance) if err != nil { rawInstanceKey = nil } if destination != "" && !strings.Contains(destination, ":") { destination = fmt.Sprintf("%s:%d", destination, config.Config.DefaultInstancePort) } destinationKey, err := inst.ParseInstanceKey(destination) if err != nil { destinationKey = nil } if hostname, err := os.Hostname(); err == nil { thisInstanceKey = &inst.InstanceKey{Hostname: hostname, Port: int(config.Config.DefaultInstancePort)} } if len(owner) == 0 { // get os username as owner usr, err := user.Current() if err != nil { log.Fatale(err) } owner = usr.Username } inst.SetMaintenanceOwner(owner) // begin commands switch command { // Instance meta case cliCommand("discover"): { if instanceKey == nil { instanceKey = thisInstanceKey } if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } instance, err := inst.ReadTopologyInstance(instanceKey) if err != nil { log.Fatale(err) } fmt.Println(instance.Key.DisplayString()) } case cliCommand("forget"): { if rawInstanceKey == nil { rawInstanceKey = thisInstanceKey } if rawInstanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } err := inst.ForgetInstance(rawInstanceKey) if err != nil { log.Fatale(err) } fmt.Println(rawInstanceKey.DisplayString()) } case cliCommand("resolve"): { if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } if conn, err := net.Dial("tcp", instanceKey.DisplayString()); err == nil { conn.Close() } else { log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } case cliCommand("register-hostname-unresolve"): { if instanceKey == nil { instanceKey = thisInstanceKey } if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } err := inst.RegisterHostnameUnresolve(instanceKey, hostnameFlag) if err != nil { log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } case cliCommand("deregister-hostname-unresolve"): { if instanceKey == nil { instanceKey = thisInstanceKey } if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } err := inst.DeregisterHostnameUnresolve(instanceKey) if err != nil { log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } case cliCommand("register-candidate"): { if instanceKey == nil { instanceKey = thisInstanceKey } if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } err := inst.RegisterCandidateInstance(instanceKey) if err != nil { log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } // Instance case cliCommand("begin-maintenance"): { if instanceKey == nil { instanceKey = thisInstanceKey } if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } if reason == "" { log.Fatal("--reason option required") } var durationSeconds int = 0 if duration != "" { durationSeconds, err = util.SimpleTimeToSeconds(duration) if err != nil { log.Fatale(err) } if durationSeconds < 0 { log.Fatalf("Duration value must be non-negative. Given value: %d", durationSeconds) } } maintenanceKey, err := inst.BeginBoundedMaintenance(instanceKey, inst.GetMaintenanceOwner(), reason, uint(durationSeconds)) if err == nil { log.Infof("Maintenance key: %+v", maintenanceKey) log.Infof("Maintenance duration: %d seconds", durationSeconds) } if err != nil { log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } case cliCommand("end-maintenance"): { if instanceKey == nil { instanceKey = thisInstanceKey } if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } err := inst.EndMaintenanceByInstanceKey(instanceKey) if err != nil { log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } case cliCommand("begin-downtime"): { if instanceKey == nil { instanceKey = thisInstanceKey } if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } if reason == "" { log.Fatal("--reason option required") } var durationSeconds int = 0 if duration != "" { durationSeconds, err = util.SimpleTimeToSeconds(duration) if err != nil { log.Fatale(err) } if durationSeconds < 0 { log.Fatalf("Duration value must be non-negative. Given value: %d", durationSeconds) } } err := inst.BeginDowntime(instanceKey, inst.GetMaintenanceOwner(), reason, uint(durationSeconds)) if err == nil { log.Infof("Downtime duration: %d seconds", durationSeconds) } else { log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } case cliCommand("end-downtime"): { if instanceKey == nil { instanceKey = thisInstanceKey } if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } err := inst.EndDowntime(instanceKey) if err != nil { log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } case cliCommand("set-read-only"): { if instanceKey == nil { instanceKey = thisInstanceKey } if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } _, err := inst.SetReadOnly(instanceKey, true) if err != nil { log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } case cliCommand("set-writeable"): { if instanceKey == nil { instanceKey = thisInstanceKey } if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } _, err := inst.SetReadOnly(instanceKey, false) if err != nil { log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } case cliCommand("flush-binary-logs"): { if instanceKey == nil { instanceKey = thisInstanceKey } if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } var err error if *config.RuntimeCLIFlags.BinlogFile == "" { err = inst.FlushBinaryLogs(instanceKey, 1) } else { _, err = inst.FlushBinaryLogsTo(instanceKey, *config.RuntimeCLIFlags.BinlogFile) } if err != nil { log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } case cliCommand("last-pseudo-gtid"): { if instanceKey == nil { instanceKey = thisInstanceKey } if instanceKey == nil { log.Fatalf("Unresolved instance") } instance, err := inst.ReadTopologyInstance(instanceKey) if err != nil { log.Fatale(err) } if instance == nil { log.Fatalf("Instance not found: %+v", *instanceKey) } coordinates, text, err := inst.FindLastPseudoGTIDEntry(instance, instance.RelaylogCoordinates, strict, nil) if err != nil { log.Fatale(err) } fmt.Println(fmt.Sprintf("%+v:%s", *coordinates, text)) } // replication case cliCommand("stop-slave"): { if instanceKey == nil { instanceKey = thisInstanceKey } if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } _, err := inst.StopSlave(instanceKey) if err != nil { log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } case cliCommand("start-slave"): { if instanceKey == nil { instanceKey = thisInstanceKey } if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } _, err := inst.StartSlave(instanceKey) if err != nil { log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } case cliCommand("restart-slave"): { if instanceKey == nil { instanceKey = thisInstanceKey } if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } _, err := inst.RestartSlave(instanceKey) if err != nil { log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } case cliCommand("reset-slave"): { if instanceKey == nil { instanceKey = thisInstanceKey } if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } _, err := inst.ResetSlaveOperation(instanceKey) if err != nil { log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } case cliCommand("detach-slave"): { if instanceKey == nil { instanceKey = thisInstanceKey } if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } _, err := inst.DetachSlaveOperation(instanceKey) if err != nil { log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } case cliCommand("reattach-slave"): { if instanceKey == nil { instanceKey = thisInstanceKey } if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } _, err := inst.ReattachSlaveOperation(instanceKey) if err != nil { log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } case cliCommand("enable-gtid"): { if instanceKey == nil { instanceKey = thisInstanceKey } if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } _, err := inst.EnableGTID(instanceKey) if err != nil { log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } case cliCommand("disable-gtid"): { if instanceKey == nil { instanceKey = thisInstanceKey } if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } _, err := inst.DisableGTID(instanceKey) if err != nil { log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } case cliCommand("skip-query"): { if instanceKey == nil { instanceKey = thisInstanceKey } if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } _, err := inst.SkipQuery(instanceKey) if err != nil { log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } // move case cliCommand("relocate"), cliCommand("relocate-below"): { if instanceKey == nil { instanceKey = thisInstanceKey } if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } if destinationKey == nil { log.Fatal("Cannot deduce destination:", destination) } _, err := inst.RelocateBelow(instanceKey, destinationKey) if err != nil { log.Fatale(err) } fmt.Println(fmt.Sprintf("%s<%s", instanceKey.DisplayString(), destinationKey.DisplayString())) } case cliCommand("relocate-slaves"): { if instanceKey == nil { instanceKey = thisInstanceKey } if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } if destinationKey == nil { log.Fatal("Cannot deduce destination:", destination) } slaves, _, err, errs := inst.RelocateSlaves(instanceKey, destinationKey, pattern) if err != nil { log.Fatale(err) } else { for _, e := range errs { log.Errore(e) } for _, slave := range slaves { fmt.Println(slave.Key.DisplayString()) } } } case cliCommand("move-up"): { if instanceKey == nil { instanceKey = thisInstanceKey } if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } instance, err := inst.MoveUp(instanceKey) if err != nil { log.Fatale(err) } fmt.Println(fmt.Sprintf("%s<%s", instanceKey.DisplayString(), instance.MasterKey.DisplayString())) } case cliCommand("move-up-slaves"): { if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } movedSlaves, _, err, errs := inst.MoveUpSlaves(instanceKey, pattern) if err != nil { log.Fatale(err) } else { for _, e := range errs { log.Errore(e) } for _, slave := range movedSlaves { fmt.Println(slave.Key.DisplayString()) } } } case cliCommand("move-below"): { if instanceKey == nil { instanceKey = thisInstanceKey } if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } if destinationKey == nil { log.Fatal("Cannot deduce sibling:", destination) } _, err := inst.MoveBelow(instanceKey, destinationKey) if err != nil { log.Fatale(err) } fmt.Println(fmt.Sprintf("%s<%s", instanceKey.DisplayString(), destinationKey.DisplayString())) } case cliCommand("move-equivalent"): { if instanceKey == nil { instanceKey = thisInstanceKey } if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } if destinationKey == nil { log.Fatal("Cannot deduce sibling:", destination) } _, err := inst.MoveEquivalent(instanceKey, destinationKey) if err != nil { log.Fatale(err) } fmt.Println(fmt.Sprintf("%s<%s", instanceKey.DisplayString(), destinationKey.DisplayString())) } case cliCommand("move-gtid"): { if instanceKey == nil { instanceKey = thisInstanceKey } if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } if destinationKey == nil { log.Fatal("Cannot deduce sibling:", destination) } _, err := inst.MoveBelowGTID(instanceKey, destinationKey) if err != nil { log.Fatale(err) } fmt.Println(fmt.Sprintf("%s<%s", instanceKey.DisplayString(), destinationKey.DisplayString())) } case cliCommand("move-slaves-gtid"): { if instanceKey == nil { instanceKey = thisInstanceKey } if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } if destinationKey == nil { log.Fatal("Cannot deduce destination:", destination) } movedSlaves, _, err, errs := inst.MoveSlavesGTID(instanceKey, destinationKey, pattern) if err != nil { log.Fatale(err) } else { for _, e := range errs { log.Errore(e) } for _, slave := range movedSlaves { fmt.Println(slave.Key.DisplayString()) } } } case cliCommand("repoint"): { if instanceKey == nil { instanceKey = thisInstanceKey } if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } // destinationKey can be null, in which case the instance repoints to its existing master instance, err := inst.Repoint(instanceKey, destinationKey, inst.GTIDHintNeutral) if err != nil { log.Fatale(err) } fmt.Println(fmt.Sprintf("%s<%s", instanceKey.DisplayString(), instance.MasterKey.DisplayString())) } case cliCommand("repoint-slaves"): { if instanceKey == nil { instanceKey = thisInstanceKey } if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } repointedSlaves, err, errs := inst.RepointSlavesTo(instanceKey, pattern, destinationKey) if err != nil { log.Fatale(err) } else { for _, e := range errs { log.Errore(e) } for _, slave := range repointedSlaves { fmt.Println(fmt.Sprintf("%s<%s", slave.Key.DisplayString(), instanceKey.DisplayString())) } } } case cliCommand("enslave-siblings"): { if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } _, _, err := inst.EnslaveSiblings(instanceKey) if err != nil { log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } case cliCommand("enslave-master"): { if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } _, err := inst.EnslaveMaster(instanceKey) if err != nil { log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } case cliCommand("make-co-master"): { if instanceKey == nil { instanceKey = thisInstanceKey } if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } _, err := inst.MakeCoMaster(instanceKey) if err != nil { log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } // Pseudo-GTID case cliCommand("match"), cliCommand("match-below"): { if instanceKey == nil { instanceKey = thisInstanceKey } if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } if destinationKey == nil { log.Fatal("Cannot deduce destination:", destination) } _, _, err := inst.MatchBelow(instanceKey, destinationKey, true) if err != nil { log.Fatale(err) } fmt.Println(fmt.Sprintf("%s<%s", instanceKey.DisplayString(), destinationKey.DisplayString())) } case cliCommand("match-up"): { if instanceKey == nil { instanceKey = thisInstanceKey } if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } instance, _, err := inst.MatchUp(instanceKey, true) if err != nil { log.Fatale(err) } fmt.Println(fmt.Sprintf("%s<%s", instanceKey.DisplayString(), instance.MasterKey.DisplayString())) } case cliCommand("rematch"): { if instanceKey == nil { instanceKey = thisInstanceKey } if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } instance, _, err := inst.RematchSlave(instanceKey, true) if err != nil { log.Fatale(err) } fmt.Println(fmt.Sprintf("%s<%s", instanceKey.DisplayString(), instance.MasterKey.DisplayString())) } case cliCommand("get-candidate-slave"): { if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } instance, _, _, _, err := inst.GetCandidateSlave(instanceKey, false) if err != nil { log.Fatale(err) } else { fmt.Println(instance.Key.DisplayString()) } } case cliCommand("match-slaves"), cliCommand("multi-match-slaves"): { // Move all slaves of "instance" beneath "destination" if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } if destinationKey == nil { log.Fatal("Cannot deduce destination:", destination) } matchedSlaves, _, err, errs := inst.MultiMatchSlaves(instanceKey, destinationKey, pattern) if err != nil { log.Fatale(err) } else { for _, e := range errs { log.Errore(e) } for _, slave := range matchedSlaves { fmt.Println(slave.Key.DisplayString()) } } } case cliCommand("match-up-slaves"): { if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } matchedSlaves, _, err, errs := inst.MatchUpSlaves(instanceKey, pattern) if err != nil { log.Fatale(err) } else { for _, e := range errs { log.Errore(e) } for _, slave := range matchedSlaves { fmt.Println(slave.Key.DisplayString()) } } } case cliCommand("regroup-slaves"): { if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } lostSlaves, equalSlaves, aheadSlaves, promotedSlave, err := inst.RegroupSlaves(instanceKey, false, func(candidateSlave *inst.Instance) { fmt.Println(candidateSlave.Key.DisplayString()) }) if promotedSlave == nil { log.Fatalf("Could not regroup slaves of %+v; error: %+v", *instanceKey, err) } fmt.Println(fmt.Sprintf("%s lost: %d, trivial: %d, pseudo-gtid: %d", promotedSlave.Key.DisplayString(), len(lostSlaves), len(equalSlaves), len(aheadSlaves))) if err != nil { log.Fatale(err) } } case cliCommand("regroup-slaves-gtid"): { if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } lostSlaves, movedSlaves, promotedSlave, err := inst.RegroupSlavesGTID(instanceKey, false, func(candidateSlave *inst.Instance) { fmt.Println(candidateSlave.Key.DisplayString()) }) if promotedSlave == nil { log.Fatalf("Could not regroup slaves of %+v; error: %+v", *instanceKey, err) } fmt.Println(fmt.Sprintf("%s lost: %d, moved: %d", promotedSlave.Key.DisplayString(), len(lostSlaves), len(movedSlaves))) if err != nil { log.Fatale(err) } } case cliCommand("regroup-slaves-bls"): { if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } promotedBinlogServer, err := inst.RegroupSlavesBinlogServers(instanceKey, false, nil) if promotedBinlogServer == nil { log.Fatalf("Could not regroup binlog server slaves of %+v; error: %+v", *instanceKey, err) } fmt.Println(promotedBinlogServer.Key.DisplayString()) if err != nil { log.Fatale(err) } } // cluster case cliCommand("clusters"): { clusters, err := inst.ReadClusters() if err != nil { log.Fatale(err) } else { fmt.Println(strings.Join(clusters, "\n")) } } case cliCommand("find"): { if pattern == "" { log.Fatal("No pattern given") } instances, err := inst.FindInstances(pattern) if err != nil { log.Fatale(err) } else { for _, instance := range instances { fmt.Println(instance.Key.DisplayString()) } } } case cliCommand("topology"): { if instanceKey == nil { instanceKey = thisInstanceKey } if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } output, err := inst.ASCIITopology(instanceKey, pattern) if err != nil { log.Fatale(err) } fmt.Println(output) } case cliCommand("which-instance"): { if instanceKey == nil { instanceKey = thisInstanceKey } if instanceKey == nil { log.Fatalf("Unable to get master: unresolved instance") } instance, _, err := inst.ReadInstance(instanceKey) if err != nil { log.Fatale(err) } if instance == nil { log.Fatalf("Instance not found: %+v", *instanceKey) } fmt.Println(instance.Key.DisplayString()) } case cliCommand("which-master"): { if instanceKey == nil { instanceKey = thisInstanceKey } if instanceKey == nil { log.Fatalf("Unable to get master: unresolved instance") } instance, _, err := inst.ReadInstance(instanceKey) if err != nil { log.Fatale(err) } if instance == nil { log.Fatalf("Instance not found: %+v", *instanceKey) } fmt.Println(instance.MasterKey.DisplayString()) } case cliCommand("which-cluster"): { clusterName := getClusterName(clusterAlias, instanceKey) fmt.Println(clusterName) } case cliCommand("which-cluster-instances"): { clusterName := getClusterName(clusterAlias, instanceKey) instances, err := inst.ReadClusterInstances(clusterName) if err != nil { log.Fatale(err) } for _, clusterInstance := range instances { fmt.Println(clusterInstance.Key.DisplayString()) } } case cliCommand("which-cluster-osc-slaves"): { clusterName := getClusterName(clusterAlias, instanceKey) instances, err := inst.GetClusterOSCSlaves(clusterName) if err != nil { log.Fatale(err) } for _, clusterInstance := range instances { fmt.Println(clusterInstance.Key.DisplayString()) } } case cliCommand("which-slaves"): { if instanceKey == nil { instanceKey = thisInstanceKey } if instanceKey == nil { log.Fatalf("Unable to get slaves: unresolved instance") } slaves, err := inst.ReadSlaveInstances(instanceKey) if err != nil { log.Fatale(err) } for _, slave := range slaves { fmt.Println(slave.Key.DisplayString()) } } case cliCommand("instance-status"): { if instanceKey == nil { instanceKey = thisInstanceKey } if instanceKey == nil { log.Fatalf("Unable to get status: unresolved instance") } instance, _, err := inst.ReadInstance(instanceKey) if err != nil { log.Fatale(err) } if instance == nil { log.Fatalf("Instance not found: %+v", *instanceKey) } fmt.Println(instance.HumanReadableDescription()) } case cliCommand("get-cluster-heuristic-lag"): { clusterName := getClusterName(clusterAlias, instanceKey) lag, err := inst.GetClusterHeuristicLag(clusterName) if err != nil { log.Fatale(err) } fmt.Println(lag) } // meta case cliCommand("snapshot-topologies"): { err := inst.SnapshotTopologies() if err != nil { log.Fatale(err) } } case cliCommand("continuous"): { logic.ContinuousDiscovery() } case cliCommand("reset-hostname-resolve-cache"): { err := inst.ResetHostnameResolveCache() if err != nil { log.Fatale(err) } fmt.Println("hostname resolve cache cleared") } // Recovery & analysis case cliCommand("recover"), cliCommand("recover-lite"): { if instanceKey == nil { log.Fatal("Cannot deduce instance:", instance) } actionTaken, promotedInstance, err := logic.CheckAndRecover(instanceKey, destinationKey, true, (command == "recover-lite")) if err != nil { log.Fatale(err) } if actionTaken { fmt.Println(promotedInstance.Key.DisplayString()) } } case cliCommand("replication-analysis"): { analysis, err := inst.GetReplicationAnalysis(false) if err != nil { log.Fatale(err) } for _, entry := range analysis { fmt.Println(fmt.Sprintf("%s (cluster %s): %s", entry.AnalyzedInstanceKey.DisplayString(), entry.ClusterDetails.ClusterName, entry.Analysis)) } } // pool case cliCommand("submit-pool-instances"): { if pool == "" { log.Fatal("Please submit --pool") } err := inst.ApplyPoolInstances(pool, instance) if err != nil { log.Fatale(err) } } case cliCommand("cluster-pool-instances"): { clusterPoolInstances, err := inst.ReadAllClusterPoolInstances() if err != nil { log.Fatale(err) } for _, clusterPoolInstance := range clusterPoolInstances { fmt.Println(fmt.Sprintf("%s\t%s\t%s\t%s:%d", clusterPoolInstance.ClusterName, clusterPoolInstance.ClusterAlias, clusterPoolInstance.Pool, clusterPoolInstance.Hostname, clusterPoolInstance.Port)) } } default: log.Fatalf("Unknown command: \"%s\". Available commands (-c):\n\t%v", command, strings.Join(knownCommands, "\n\t")) } }
// RecoverDeadCoMaster recovers a dead co-master, complete logic inside func RecoverDeadCoMaster(topologyRecovery *TopologyRecovery, skipProcesses bool) (promotedSlave *inst.Instance, lostSlaves [](*inst.Instance), err error) { analysisEntry := &topologyRecovery.AnalysisEntry failedInstanceKey := &analysisEntry.AnalyzedInstanceKey otherCoMasterKey := &analysisEntry.AnalyzedInstanceMasterKey otherCoMaster, found, _ := inst.ReadInstance(otherCoMasterKey) if otherCoMaster == nil || !found { return nil, lostSlaves, topologyRecovery.AddError(log.Errorf("RecoverDeadCoMaster: could not read info for co-master %+v of %+v", *otherCoMasterKey, *failedInstanceKey)) } inst.AuditOperation("recover-dead-co-master", failedInstanceKey, "problem found; will recover") if !skipProcesses { if err := executeProcesses(config.Config.PreFailoverProcesses, "PreFailoverProcesses", topologyRecovery, true); err != nil { return nil, lostSlaves, topologyRecovery.AddError(err) } } log.Debugf("topology_recovery: RecoverDeadCoMaster: will recover %+v", *failedInstanceKey) var coMasterRecoveryType MasterRecoveryType = MasterRecoveryPseudoGTID if analysisEntry.OracleGTIDImmediateTopology || analysisEntry.MariaDBGTIDImmediateTopology { coMasterRecoveryType = MasterRecoveryGTID } log.Debugf("topology_recovery: RecoverDeadCoMaster: coMasterRecoveryType=%+v", coMasterRecoveryType) switch coMasterRecoveryType { case MasterRecoveryGTID: { lostSlaves, _, promotedSlave, err = inst.RegroupSlavesGTID(failedInstanceKey, true, nil) } case MasterRecoveryPseudoGTID: { lostSlaves, _, _, promotedSlave, err = inst.RegroupSlavesPseudoGTIDIncludingSubSlavesOfBinlogServers(failedInstanceKey, true, nil, &topologyRecovery.PostponedFunctionsContainer) } } topologyRecovery.AddError(err) mustPromoteOtherCoMaster := config.Config.CoMasterRecoveryMustPromoteOtherCoMaster if !otherCoMaster.ReadOnly { log.Debugf("topology_recovery: RecoverDeadCoMaster: other co-master %+v is writeable hence has to be promoted", otherCoMaster.Key) mustPromoteOtherCoMaster = true } log.Debugf("topology_recovery: RecoverDeadCoMaster: mustPromoteOtherCoMaster? %+v", mustPromoteOtherCoMaster) if promotedSlave != nil { topologyRecovery.ParticipatingInstanceKeys.AddKey(promotedSlave.Key) if mustPromoteOtherCoMaster { log.Debugf("topology_recovery: mustPromoteOtherCoMaster. Verifying that %+v is/can be promoted", *otherCoMasterKey) promotedSlave, err = replacePromotedSlaveWithCandidate(failedInstanceKey, promotedSlave, otherCoMasterKey) } else { // We are allowed to promote any server promotedSlave, err = replacePromotedSlaveWithCandidate(failedInstanceKey, promotedSlave, nil) if promotedSlave.DataCenter == otherCoMaster.DataCenter && promotedSlave.PhysicalEnvironment == otherCoMaster.PhysicalEnvironment && false { // and _still_ we prefer to promote the co-master! They're in same env & DC so no worries about geo issues! promotedSlave, err = replacePromotedSlaveWithCandidate(failedInstanceKey, promotedSlave, otherCoMasterKey) } } topologyRecovery.AddError(err) } if promotedSlave != nil { if mustPromoteOtherCoMaster && !promotedSlave.Key.Equals(otherCoMasterKey) { topologyRecovery.AddError(log.Errorf("RecoverDeadCoMaster: could not manage to promote other-co-master %+v; was only able to promote %+v; CoMasterRecoveryMustPromoteOtherCoMaster is true, therefore failing", *otherCoMasterKey, promotedSlave.Key)) promotedSlave = nil } } if promotedSlave != nil { topologyRecovery.ParticipatingInstanceKeys.AddKey(promotedSlave.Key) } // OK, we may have someone promoted. Either this was the other co-master or another slave. // Noting down that we DO NOT attempt to set a new co-master topology. We are good with remaining with a single master. // I tried solving the "let's promote a slave and create a new co-master setup" but this turns so complex due to various factors. // I see this as risky and not worth the questionable benefit. // Maybe future me is a smarter person and finds a simple solution. Unlikely. I'm getting dumber. // // ... // Now that we're convinved, take a look at what we can be left with: // Say we started with M1<->M2<-S1, with M2 failing, and we promoted S1. // We now have M1->S1 (because S1 is promoted), S1->M2 (because that's what it remembers), M2->M1 (because that's what it remembers) // !! This is an evil 3-node circle that must be broken. // config.Config.ApplyMySQLPromotionAfterMasterFailover, if true, will cause it to break, because we would RESET SLAVE on S1 // but we want to make sure the circle is broken no matter what. // So in the case we promoted not-the-other-co-master, we issue a detach-slave-master-host, which is a reversible operation if promotedSlave != nil && !promotedSlave.Key.Equals(otherCoMasterKey) { _, err = inst.DetachSlaveMasterHost(&promotedSlave.Key) topologyRecovery.AddError(log.Errore(err)) } if promotedSlave != nil && len(lostSlaves) > 0 && config.Config.DetachLostSlavesAfterMasterFailover { postponedFunction := func() error { log.Debugf("topology_recovery: - RecoverDeadCoMaster: lost %+v slaves during recovery process; detaching them", len(lostSlaves)) for _, slave := range lostSlaves { slave := slave inst.DetachSlaveOperation(&slave.Key) } return nil } topologyRecovery.AddPostponedFunction(postponedFunction) } if config.Config.MasterFailoverLostInstancesDowntimeMinutes > 0 { postponedFunction := func() error { inst.BeginDowntime(failedInstanceKey, inst.GetMaintenanceOwner(), "RecoverDeadCoMaster indicates this instance is lost", config.Config.MasterFailoverLostInstancesDowntimeMinutes*60) for _, slave := range lostSlaves { slave := slave inst.BeginDowntime(&slave.Key, inst.GetMaintenanceOwner(), "RecoverDeadCoMaster indicates this instance is lost", config.Config.MasterFailoverLostInstancesDowntimeMinutes*60) } return nil } topologyRecovery.AddPostponedFunction(postponedFunction) } return promotedSlave, lostSlaves, err }
// RecoverDeadMaster recovers a dead master, complete logic inside func RecoverDeadMaster(topologyRecovery *TopologyRecovery, skipProcesses bool) (promotedSlave *inst.Instance, lostSlaves [](*inst.Instance), err error) { analysisEntry := &topologyRecovery.AnalysisEntry failedInstanceKey := &analysisEntry.AnalyzedInstanceKey inst.AuditOperation("recover-dead-master", failedInstanceKey, "problem found; will recover") if !skipProcesses { if err := executeProcesses(config.Config.PreFailoverProcesses, "PreFailoverProcesses", topologyRecovery, true); err != nil { return nil, lostSlaves, topologyRecovery.AddError(err) } } log.Debugf("topology_recovery: RecoverDeadMaster: will recover %+v", *failedInstanceKey) var masterRecoveryType MasterRecoveryType = MasterRecoveryPseudoGTID if analysisEntry.OracleGTIDImmediateTopology || analysisEntry.MariaDBGTIDImmediateTopology { masterRecoveryType = MasterRecoveryGTID } else if analysisEntry.BinlogServerImmediateTopology { masterRecoveryType = MasterRecoveryBinlogServer } log.Debugf("topology_recovery: RecoverDeadMaster: masterRecoveryType=%+v", masterRecoveryType) switch masterRecoveryType { case MasterRecoveryGTID: { lostSlaves, _, promotedSlave, err = inst.RegroupSlavesGTID(failedInstanceKey, true, nil) } case MasterRecoveryPseudoGTID: { lostSlaves, _, _, promotedSlave, err = inst.RegroupSlavesPseudoGTIDIncludingSubSlavesOfBinlogServers(failedInstanceKey, true, nil, &topologyRecovery.PostponedFunctionsContainer) } case MasterRecoveryBinlogServer: { promotedSlave, err = recoverDeadMasterInBinlogServerTopology(topologyRecovery) } } topologyRecovery.AddError(err) if promotedSlave != nil && len(lostSlaves) > 0 && config.Config.DetachLostSlavesAfterMasterFailover { postponedFunction := func() error { log.Debugf("topology_recovery: - RecoverDeadMaster: lost %+v slaves during recovery process; detaching them", len(lostSlaves)) for _, slave := range lostSlaves { slave := slave inst.DetachSlaveOperation(&slave.Key) } return nil } topologyRecovery.AddPostponedFunction(postponedFunction) } if config.Config.MasterFailoverLostInstancesDowntimeMinutes > 0 { postponedFunction := func() error { inst.BeginDowntime(failedInstanceKey, inst.GetMaintenanceOwner(), "RecoverDeadMaster indicates this instance is lost", config.Config.MasterFailoverLostInstancesDowntimeMinutes*60) for _, slave := range lostSlaves { slave := slave inst.BeginDowntime(&slave.Key, inst.GetMaintenanceOwner(), "RecoverDeadMaster indicates this instance is lost", config.Config.MasterFailoverLostInstancesDowntimeMinutes*60) } return nil } topologyRecovery.AddPostponedFunction(postponedFunction) } if promotedSlave == nil { inst.AuditOperation("recover-dead-master", failedInstanceKey, "Failure: no slave promoted.") } else { inst.AuditOperation("recover-dead-master", failedInstanceKey, fmt.Sprintf("promoted slave: %+v", promotedSlave.Key)) } return promotedSlave, lostSlaves, err }
// RecoverDeadCoMaster recovers a dead co-master, complete logic inside func RecoverDeadCoMaster(topologyRecovery *TopologyRecovery, skipProcesses bool) (otherCoMaster *inst.Instance, lostSlaves [](*inst.Instance), err error) { analysisEntry := &topologyRecovery.AnalysisEntry failedInstanceKey := &analysisEntry.AnalyzedInstanceKey otherCoMasterKey := &analysisEntry.AnalyzedInstanceMasterKey inst.AuditOperation("recover-dead-co-master", failedInstanceKey, "problem found; will recover") if !skipProcesses { if err := executeProcesses(config.Config.PreFailoverProcesses, "PreFailoverProcesses", topologyRecovery, true); err != nil { return nil, lostSlaves, topologyRecovery.AddError(err) } } log.Debugf("topology_recovery: RecoverDeadCoMaster: will recover %+v", *failedInstanceKey) var coMasterRecoveryType MasterRecoveryType = MasterRecoveryPseudoGTID if (analysisEntry.OracleGTIDImmediateTopology || analysisEntry.MariaDBGTIDImmediateTopology) && !analysisEntry.PseudoGTIDImmediateTopology { coMasterRecoveryType = MasterRecoveryGTID } log.Debugf("topology_recovery: RecoverDeadCoMaster: coMasterRecoveryType=%+v", coMasterRecoveryType) var promotedSlave *inst.Instance switch coMasterRecoveryType { case MasterRecoveryGTID: { lostSlaves, _, promotedSlave, err = inst.RegroupSlavesGTID(failedInstanceKey, true, nil) } case MasterRecoveryPseudoGTID: { lostSlaves, _, _, promotedSlave, err = inst.RegroupSlavesPseudoGTIDIncludingSubSlavesOfBinlogServers(failedInstanceKey, true, nil, &topologyRecovery.PostponedFunctionsContainer) } } topologyRecovery.AddError(err) if promotedSlave != nil { topologyRecovery.ParticipatingInstanceKeys.AddKey(promotedSlave.Key) promotedSlave, err = replacePromotedSlaveWithCandidate(failedInstanceKey, promotedSlave, otherCoMasterKey) topologyRecovery.AddError(err) } if promotedSlave != nil { if promotedSlave.Key.Equals(otherCoMasterKey) { topologyRecovery.ParticipatingInstanceKeys.AddKey(*otherCoMasterKey) otherCoMaster = promotedSlave } else { err = log.Errorf("RecoverDeadCoMaster: could not manage to promote other-co-master %+v; was only able to promote %+v", *otherCoMasterKey, promotedSlave.Key) promotedSlave = nil } } if promotedSlave != nil && len(lostSlaves) > 0 && config.Config.DetachLostSlavesAfterMasterFailover { postponedFunction := func() error { log.Debugf("topology_recovery: - RecoverDeadCoMaster: lost %+v slaves during recovery process; detaching them", len(lostSlaves)) for _, slave := range lostSlaves { slave := slave inst.DetachSlaveOperation(&slave.Key) } return nil } topologyRecovery.AddPostponedFunction(postponedFunction) } if config.Config.MasterFailoverLostInstancesDowntimeMinutes > 0 { postponedFunction := func() error { inst.BeginDowntime(failedInstanceKey, inst.GetMaintenanceOwner(), "RecoverDeadCoMaster indicates this instance is lost", config.Config.MasterFailoverLostInstancesDowntimeMinutes*60) for _, slave := range lostSlaves { slave := slave inst.BeginDowntime(&slave.Key, inst.GetMaintenanceOwner(), "RecoverDeadCoMaster indicates this instance is lost", config.Config.MasterFailoverLostInstancesDowntimeMinutes*60) } return nil } topologyRecovery.AddPostponedFunction(postponedFunction) } return otherCoMaster, lostSlaves, err }