3 Star 2 Fork 0

Gitee 极速下载/orchestrator

加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
此仓库是为了提升国内下载速度的镜像仓库,每日同步一次。 原始仓库: https://github.com/outbrain/orchestrator/
克隆/下载
instance_topology.go 90.57 KB
一键复制 编辑 原始数据 按行查看 历史
12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504
/*
Copyright 2014 Outbrain Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package inst
import (
"fmt"
"github.com/outbrain/golib/log"
"github.com/outbrain/golib/math"
"github.com/outbrain/orchestrator/go/config"
"regexp"
"sort"
"strings"
"time"
)
// getASCIITopologyEntry will get an ascii topology tree rooted at given instance. Ir recursively
// draws the tree
func getASCIITopologyEntry(depth int, instance *Instance, replicationMap map[*Instance]([]*Instance), extendedOutput bool) []string {
if instance == nil {
return []string{}
}
if instance.IsCoMaster && depth > 1 {
return []string{}
}
prefix := ""
if depth > 0 {
prefix = strings.Repeat(" ", (depth-1)*2)
if instance.SlaveRunning() {
prefix += "+ "
} else {
prefix += "- "
}
}
entry := fmt.Sprintf("%s%s", prefix, instance.Key.DisplayString())
if extendedOutput {
entry = fmt.Sprintf("%s %s", entry, instance.HumanReadableDescription())
}
result := []string{entry}
for _, slave := range replicationMap[instance] {
slavesResult := getASCIITopologyEntry(depth+1, slave, replicationMap, extendedOutput)
result = append(result, slavesResult...)
}
return result
}
// ASCIITopology returns a string representation of the topology of given instance.
func ASCIITopology(instanceKey *InstanceKey, historyTimestampPattern string) (string, error) {
instance, found, err := ReadInstance(instanceKey)
if err != nil || !found {
return "", err
}
var instances [](*Instance)
if historyTimestampPattern == "" {
instances, err = ReadClusterInstances(instance.ClusterName)
} else {
instances, err = ReadHistoryClusterInstances(instance.ClusterName, historyTimestampPattern)
}
if err != nil {
return "", err
}
instancesMap := make(map[InstanceKey](*Instance))
for _, instance := range instances {
log.Debugf("instanceKey: %+v", instance.Key)
instancesMap[instance.Key] = instance
}
replicationMap := make(map[*Instance]([]*Instance))
var masterInstance *Instance
// Investigate slaves:
for _, instance := range instances {
master, ok := instancesMap[instance.MasterKey]
if ok {
if _, ok := replicationMap[master]; !ok {
replicationMap[master] = [](*Instance){}
}
replicationMap[master] = append(replicationMap[master], instance)
} else {
masterInstance = instance
}
}
// Get entries:
var entries []string
if masterInstance != nil {
// Single master
entries = getASCIITopologyEntry(0, masterInstance, replicationMap, historyTimestampPattern == "")
} else {
// Co-masters? For visualization we put each in its own branch while ignoring its other co-masters.
for _, instance := range instances {
if instance.IsCoMaster {
entries = append(entries, getASCIITopologyEntry(1, instance, replicationMap, historyTimestampPattern == "")...)
}
}
}
// Beautify: make sure the "[...]" part is nicely aligned for all instances.
{
maxIndent := 0
for _, entry := range entries {
maxIndent = math.MaxInt(maxIndent, strings.Index(entry, "["))
}
for i, entry := range entries {
entryIndent := strings.Index(entry, "[")
if maxIndent > entryIndent {
tokens := strings.Split(entry, "[")
newEntry := fmt.Sprintf("%s%s[%s", tokens[0], strings.Repeat(" ", maxIndent-entryIndent), tokens[1])
entries[i] = newEntry
}
}
}
// Turn into string
result := strings.Join(entries, "\n")
return result, nil
}
// GetInstanceMaster synchronously reaches into the replication topology
// and retrieves master's data
func GetInstanceMaster(instance *Instance) (*Instance, error) {
master, err := ReadTopologyInstance(&instance.MasterKey)
return master, err
}
// InstancesAreSiblings checks whether both instances are replicating from same master
func InstancesAreSiblings(instance0, instance1 *Instance) bool {
if !instance0.IsSlave() {
return false
}
if !instance1.IsSlave() {
return false
}
if instance0.Key.Equals(&instance1.Key) {
// same instance...
return false
}
return instance0.MasterKey.Equals(&instance1.MasterKey)
}
// InstanceIsMasterOf checks whether an instance is the master of another
func InstanceIsMasterOf(allegedMaster, allegedSlave *Instance) bool {
if !allegedSlave.IsSlave() {
return false
}
if allegedMaster.Key.Equals(&allegedSlave.Key) {
// same instance...
return false
}
return allegedMaster.Key.Equals(&allegedSlave.MasterKey)
}
// MoveEquivalent will attempt moving instance indicated by instanceKey below another instance,
// based on known master coordinates equivalence
func MoveEquivalent(instanceKey, otherKey *InstanceKey) (*Instance, error) {
instance, found, err := ReadInstance(instanceKey)
if err != nil || !found {
return instance, err
}
if instance.Key.Equals(otherKey) {
return instance, fmt.Errorf("MoveEquivalent: attempt to move an instance below itself %+v", instance.Key)
}
// Are there equivalent coordinates to this instance?
instanceCoordinates := &InstanceBinlogCoordinates{Key: instance.MasterKey, Coordinates: instance.ExecBinlogCoordinates}
binlogCoordinates, err := GetEquivalentBinlogCoordinatesFor(instanceCoordinates, otherKey)
if err != nil {
return instance, err
}
if binlogCoordinates == nil {
return instance, fmt.Errorf("No equivalent coordinates found for %+v replicating from %+v at %+v", instance.Key, instance.MasterKey, instance.ExecBinlogCoordinates)
}
// For performance reasons, we did all the above before even checking the slave is stopped or stopping it at all.
// This allows us to quickly skip the entire operation should there NOT be coordinates.
// To elaborate: if the slave is actually running AND making progress, it is unlikely/impossible for it to have
// equivalent coordinates, as the current coordinates are like to have never been seen.
// This excludes the case, for example, that the master is itself not replicating.
// Now if we DO get to happen on equivalent coordinates, we need to double check. For CHANGE MASTER to happen we must
// stop the slave anyhow. But then let's verify the position hasn't changed.
knownExecBinlogCoordinates := instance.ExecBinlogCoordinates
instance, err = StopSlave(instanceKey)
if err != nil {
goto Cleanup
}
if !instance.ExecBinlogCoordinates.Equals(&knownExecBinlogCoordinates) {
// Seems like things were still running... We don't have an equivalence point
err = fmt.Errorf("MoveEquivalent(): ExecBinlogCoordinates changed after stopping replication on %+v; aborting", instance.Key)
goto Cleanup
}
instance, err = ChangeMasterTo(instanceKey, otherKey, binlogCoordinates, false, GTIDHintNeutral)
Cleanup:
instance, _ = StartSlave(instanceKey)
if err == nil {
message := fmt.Sprintf("moved %+v via equivalence coordinates below %+v", *instanceKey, *otherKey)
log.Debugf(message)
AuditOperation("move-equivalent", instanceKey, message)
}
return instance, err
}
// MoveUp will attempt moving instance indicated by instanceKey up the topology hierarchy.
// It will perform all safety and sanity checks and will tamper with this instance's replication
// as well as its master.
func MoveUp(instanceKey *InstanceKey) (*Instance, error) {
instance, err := ReadTopologyInstance(instanceKey)
if err != nil {
return instance, err
}
if !instance.IsSlave() {
return instance, fmt.Errorf("instance is not a slave: %+v", instanceKey)
}
rinstance, _, _ := ReadInstance(&instance.Key)
if canMove, merr := rinstance.CanMove(); !canMove {
return instance, merr
}
master, err := GetInstanceMaster(instance)
if err != nil {
return instance, log.Errorf("Cannot GetInstanceMaster() for %+v. error=%+v", instance.Key, err)
}
if !master.IsSlave() {
return instance, fmt.Errorf("master is not a slave itself: %+v", master.Key)
}
if canReplicate, err := instance.CanReplicateFrom(master); canReplicate == false {
return instance, err
}
if master.IsBinlogServer() {
// Quick solution via binlog servers
return Repoint(instanceKey, &master.MasterKey, GTIDHintDeny)
}
log.Infof("Will move %+v up the topology", *instanceKey)
if maintenanceToken, merr := BeginMaintenance(instanceKey, GetMaintenanceOwner(), "move up"); merr != nil {
err = fmt.Errorf("Cannot begin maintenance on %+v", *instanceKey)
goto Cleanup
} else {
defer EndMaintenance(maintenanceToken)
}
if maintenanceToken, merr := BeginMaintenance(&master.Key, GetMaintenanceOwner(), fmt.Sprintf("child %+v moves up", *instanceKey)); merr != nil {
err = fmt.Errorf("Cannot begin maintenance on %+v", master.Key)
goto Cleanup
} else {
defer EndMaintenance(maintenanceToken)
}
if !instance.UsingMariaDBGTID {
master, err = StopSlave(&master.Key)
if err != nil {
goto Cleanup
}
}
instance, err = StopSlave(instanceKey)
if err != nil {
goto Cleanup
}
if !instance.UsingMariaDBGTID {
instance, err = StartSlaveUntilMasterCoordinates(instanceKey, &master.SelfBinlogCoordinates)
if err != nil {
goto Cleanup
}
}
// We can skip hostname unresolve; we just copy+paste whatever our master thinks of its master.
instance, err = ChangeMasterTo(instanceKey, &master.MasterKey, &master.ExecBinlogCoordinates, true, GTIDHintDeny)
if err != nil {
goto Cleanup
}
Cleanup:
instance, _ = StartSlave(instanceKey)
if !instance.UsingMariaDBGTID {
master, _ = StartSlave(&master.Key)
}
if err != nil {
return instance, log.Errore(err)
}
// and we're done (pending deferred functions)
AuditOperation("move-up", instanceKey, fmt.Sprintf("moved up %+v. Previous master: %+v", *instanceKey, master.Key))
return instance, err
}
// MoveUpSlaves will attempt moving up all slaves of a given instance, at the same time.
// Clock-time, this is fater than moving one at a time. However this means all slaves of the given instance, and the instance itself,
// will all stop replicating together.
func MoveUpSlaves(instanceKey *InstanceKey, pattern string) ([](*Instance), *Instance, error, []error) {
res := [](*Instance){}
errs := []error{}
slaveMutex := make(chan bool, 1)
var barrier chan *InstanceKey
instance, err := ReadTopologyInstance(instanceKey)
if err != nil {
return res, nil, err, errs
}
if !instance.IsSlave() {
return res, instance, fmt.Errorf("instance is not a slave: %+v", instanceKey), errs
}
_, err = GetInstanceMaster(instance)
if err != nil {
return res, instance, log.Errorf("Cannot GetInstanceMaster() for %+v. error=%+v", instance.Key, err), errs
}
if instance.IsBinlogServer() {
slaves, err, errors := RepointSlavesTo(instanceKey, pattern, &instance.MasterKey)
// Bail out!
return slaves, instance, err, errors
}
slaves, err := ReadSlaveInstances(instanceKey)
if err != nil {
return res, instance, err, errs
}
slaves = filterInstancesByPattern(slaves, pattern)
if len(slaves) == 0 {
return res, instance, nil, errs
}
log.Infof("Will move slaves of %+v up the topology", *instanceKey)
if maintenanceToken, merr := BeginMaintenance(instanceKey, GetMaintenanceOwner(), "move up slaves"); merr != nil {
err = fmt.Errorf("Cannot begin maintenance on %+v", *instanceKey)
goto Cleanup
} else {
defer EndMaintenance(maintenanceToken)
}
for _, slave := range slaves {
if maintenanceToken, merr := BeginMaintenance(&slave.Key, GetMaintenanceOwner(), fmt.Sprintf("%+v moves up", slave.Key)); merr != nil {
err = fmt.Errorf("Cannot begin maintenance on %+v", slave.Key)
goto Cleanup
} else {
defer EndMaintenance(maintenanceToken)
}
}
instance, err = StopSlave(instanceKey)
if err != nil {
goto Cleanup
}
barrier = make(chan *InstanceKey)
for _, slave := range slaves {
slave := slave
go func() {
defer func() {
defer func() { barrier <- &slave.Key }()
StartSlave(&slave.Key)
}()
var slaveErr error
ExecuteOnTopology(func() {
if canReplicate, err := slave.CanReplicateFrom(instance); canReplicate == false || err != nil {
slaveErr = err
return
}
if instance.IsBinlogServer() {
// Special case. Just repoint
slave, err = Repoint(&slave.Key, instanceKey, GTIDHintDeny)
if err != nil {
slaveErr = err
return
}
} else {
// Normal case. Do the math.
slave, err = StopSlave(&slave.Key)
if err != nil {
slaveErr = err
return
}
slave, err = StartSlaveUntilMasterCoordinates(&slave.Key, &instance.SelfBinlogCoordinates)
if err != nil {
slaveErr = err
return
}
slave, err = ChangeMasterTo(&slave.Key, &instance.MasterKey, &instance.ExecBinlogCoordinates, false, GTIDHintDeny)
if err != nil {
slaveErr = err
return
}
}
})
func() {
slaveMutex <- true
defer func() { <-slaveMutex }()
if slaveErr == nil {
res = append(res, slave)
} else {
errs = append(errs, slaveErr)
}
}()
}()
}
for range slaves {
<-barrier
}
Cleanup:
instance, _ = StartSlave(instanceKey)
if err != nil {
return res, instance, log.Errore(err), errs
}
if len(errs) == len(slaves) {
// All returned with error
return res, instance, log.Error("Error on all operations"), errs
}
AuditOperation("move-up-slaves", instanceKey, fmt.Sprintf("moved up %d/%d slaves of %+v. New master: %+v", len(res), len(slaves), *instanceKey, instance.MasterKey))
return res, instance, err, errs
}
// MoveBelow will attempt moving instance indicated by instanceKey below its supposed sibling indicated by sinblingKey.
// It will perform all safety and sanity checks and will tamper with this instance's replication
// as well as its sibling.
func MoveBelow(instanceKey, siblingKey *InstanceKey) (*Instance, error) {
instance, err := ReadTopologyInstance(instanceKey)
if err != nil {
return instance, err
}
sibling, err := ReadTopologyInstance(siblingKey)
if err != nil {
return instance, err
}
if sibling.IsBinlogServer() {
// Binlog server has same coordinates as master
// Easy solution!
return Repoint(instanceKey, &sibling.Key, GTIDHintDeny)
}
rinstance, _, _ := ReadInstance(&instance.Key)
if canMove, merr := rinstance.CanMove(); !canMove {
return instance, merr
}
rinstance, _, _ = ReadInstance(&sibling.Key)
if canMove, merr := rinstance.CanMove(); !canMove {
return instance, merr
}
if !InstancesAreSiblings(instance, sibling) {
return instance, fmt.Errorf("instances are not siblings: %+v, %+v", *instanceKey, *siblingKey)
}
if canReplicate, err := instance.CanReplicateFrom(sibling); !canReplicate {
return instance, err
}
log.Infof("Will move %+v below %+v", instanceKey, siblingKey)
if maintenanceToken, merr := BeginMaintenance(instanceKey, GetMaintenanceOwner(), fmt.Sprintf("move below %+v", *siblingKey)); merr != nil {
err = fmt.Errorf("Cannot begin maintenance on %+v", *instanceKey)
goto Cleanup
} else {
defer EndMaintenance(maintenanceToken)
}
if maintenanceToken, merr := BeginMaintenance(siblingKey, GetMaintenanceOwner(), fmt.Sprintf("%+v moves below this", *instanceKey)); merr != nil {
err = fmt.Errorf("Cannot begin maintenance on %+v", *siblingKey)
goto Cleanup
} else {
defer EndMaintenance(maintenanceToken)
}
instance, err = StopSlave(instanceKey)
if err != nil {
goto Cleanup
}
sibling, err = StopSlave(siblingKey)
if err != nil {
goto Cleanup
}
if instance.ExecBinlogCoordinates.SmallerThan(&sibling.ExecBinlogCoordinates) {
instance, err = StartSlaveUntilMasterCoordinates(instanceKey, &sibling.ExecBinlogCoordinates)
if err != nil {
goto Cleanup
}
} else if sibling.ExecBinlogCoordinates.SmallerThan(&instance.ExecBinlogCoordinates) {
sibling, err = StartSlaveUntilMasterCoordinates(siblingKey, &instance.ExecBinlogCoordinates)
if err != nil {
goto Cleanup
}
}
// At this point both siblings have executed exact same statements and are identical
instance, err = ChangeMasterTo(instanceKey, &sibling.Key, &sibling.SelfBinlogCoordinates, false, GTIDHintDeny)
if err != nil {
goto Cleanup
}
Cleanup:
instance, _ = StartSlave(instanceKey)
sibling, _ = StartSlave(siblingKey)
if err != nil {
return instance, log.Errore(err)
}
// and we're done (pending deferred functions)
AuditOperation("move-below", instanceKey, fmt.Sprintf("moved %+v below %+v", *instanceKey, *siblingKey))
return instance, err
}
func canMoveViaGTID(instance, otherInstance *Instance) (isOracleGTID bool, isMariaDBGTID, canMove bool) {
isOracleGTID = (instance.UsingOracleGTID && otherInstance.SupportsOracleGTID)
isMariaDBGTID = (instance.UsingMariaDBGTID && otherInstance.IsMariaDB())
return isOracleGTID, isMariaDBGTID, isOracleGTID || isMariaDBGTID
}
// moveInstanceBelowViaGTID will attempt moving given instance below another instance using either Oracle GTID or MariaDB GTID.
func moveInstanceBelowViaGTID(instance, otherInstance *Instance) (*Instance, error) {
_, _, canMove := canMoveViaGTID(instance, otherInstance)
instanceKey := &instance.Key
otherInstanceKey := &otherInstance.Key
if !canMove {
return instance, fmt.Errorf("Cannot move via GTID as not both instances use GTID: %+v, %+v", *instanceKey, *otherInstanceKey)
}
var err error
rinstance, _, _ := ReadInstance(&instance.Key)
if canMove, merr := rinstance.CanMoveViaMatch(); !canMove {
return instance, merr
}
if canReplicate, err := instance.CanReplicateFrom(otherInstance); !canReplicate {
return instance, err
}
log.Infof("Will move %+v below %+v via GTID", instanceKey, otherInstanceKey)
if maintenanceToken, merr := BeginMaintenance(instanceKey, GetMaintenanceOwner(), fmt.Sprintf("move below %+v", *otherInstanceKey)); merr != nil {
err = fmt.Errorf("Cannot begin maintenance on %+v", *instanceKey)
goto Cleanup
} else {
defer EndMaintenance(maintenanceToken)
}
instance, err = StopSlave(instanceKey)
if err != nil {
goto Cleanup
}
instance, err = ChangeMasterTo(instanceKey, &otherInstance.Key, &otherInstance.SelfBinlogCoordinates, false, GTIDHintForce)
if err != nil {
goto Cleanup
}
Cleanup:
instance, _ = StartSlave(instanceKey)
if err != nil {
return instance, log.Errore(err)
}
// and we're done (pending deferred functions)
AuditOperation("move-below-gtid", instanceKey, fmt.Sprintf("moved %+v below %+v", *instanceKey, *otherInstanceKey))
return instance, err
}
// MoveBelowGTID will attempt moving instance indicated by instanceKey below another instance using either Oracle GTID or MariaDB GTID.
func MoveBelowGTID(instanceKey, otherKey *InstanceKey) (*Instance, error) {
instance, err := ReadTopologyInstance(instanceKey)
if err != nil {
return instance, err
}
other, err := ReadTopologyInstance(otherKey)
if err != nil {
return instance, err
}
return moveInstanceBelowViaGTID(instance, other)
}
// moveSlavesViaGTID moves a list of slaves under another instance via GTID, returning those slaves
// that could not be moved (do not use GTID)
func moveSlavesViaGTID(slaves [](*Instance), other *Instance) (movedSlaves [](*Instance), unmovedSlaves [](*Instance), err error, errs []error) {
slaves = RemoveInstance(slaves, &other.Key)
if len(slaves) == 0 {
// Nothing to do
return movedSlaves, unmovedSlaves, nil, errs
}
log.Infof("Will move %+v slaves below %+v via GTID", len(slaves), other.Key)
barrier := make(chan *InstanceKey)
slaveMutex := make(chan bool, 1)
for _, slave := range slaves {
slave := slave
// Parallelize repoints
go func() {
defer func() { barrier <- &slave.Key }()
ExecuteOnTopology(func() {
var slaveErr error
if _, _, canMove := canMoveViaGTID(slave, other); canMove {
slave, slaveErr = moveInstanceBelowViaGTID(slave, other)
} else {
slaveErr = fmt.Errorf("%+v cannot move below %+v via GTID", slave.Key, other.Key)
}
func() {
// Instantaneous mutex.
slaveMutex <- true
defer func() { <-slaveMutex }()
if slaveErr == nil {
movedSlaves = append(movedSlaves, slave)
} else {
unmovedSlaves = append(unmovedSlaves, slave)
errs = append(errs, slaveErr)
}
}()
})
}()
}
for range slaves {
<-barrier
}
if len(errs) == len(slaves) {
// All returned with error
return movedSlaves, unmovedSlaves, fmt.Errorf("moveSlavesViaGTID: Error on all %+v operations", len(errs)), errs
}
AuditOperation("move-slaves-gtid", &other.Key, fmt.Sprintf("moved %d/%d slaves below %+v via GTID", len(movedSlaves), len(slaves), other.Key))
return movedSlaves, unmovedSlaves, err, errs
}
// MoveSlavesGTID will (attempt to) move all slaves of given master below given instance.
func MoveSlavesGTID(masterKey *InstanceKey, belowKey *InstanceKey, pattern string) (movedSlaves [](*Instance), unmovedSlaves [](*Instance), err error, errs []error) {
belowInstance, err := ReadTopologyInstance(belowKey)
if err != nil {
// Can't access "below" ==> can't move slaves beneath it
return movedSlaves, unmovedSlaves, err, errs
}
// slaves involved
slaves, err := ReadSlaveInstancesIncludingBinlogServerSubSlaves(masterKey)
if err != nil {
return movedSlaves, unmovedSlaves, err, errs
}
slaves = filterInstancesByPattern(slaves, pattern)
movedSlaves, unmovedSlaves, err, errs = moveSlavesViaGTID(slaves, belowInstance)
if err != nil {
log.Errore(err)
}
if len(unmovedSlaves) > 0 {
err = fmt.Errorf("MoveSlavesGTID: only moved %d out of %d slaves of %+v; error is: %+v", len(movedSlaves), len(slaves), *masterKey, err)
}
return movedSlaves, unmovedSlaves, err, errs
}
// Repoint connects a slave to a master using its exact same executing coordinates.
// The given masterKey can be null, in which case the existing master is used.
// Two use cases:
// - masterKey is nil: use case is corrupted relay logs on slave
// - masterKey is not nil: using Binlog servers (coordinates remain the same)
func Repoint(instanceKey *InstanceKey, masterKey *InstanceKey, gtidHint OperationGTIDHint) (*Instance, error) {
instance, err := ReadTopologyInstance(instanceKey)
if err != nil {
return instance, err
}
if !instance.IsSlave() {
return instance, fmt.Errorf("instance is not a slave: %+v", *instanceKey)
}
if masterKey == nil {
masterKey = &instance.MasterKey
}
// With repoint we *prefer* the master to be alive, but we don't strictly require it.
// The use case for the master being alive is with hostname-resolve or hostname-unresolve: asking the slave
// to reconnect to its same master while changing the MASTER_HOST in CHANGE MASTER TO due to DNS changes etc.
master, err := ReadTopologyInstance(masterKey)
masterIsAccessible := (err == nil)
if !masterIsAccessible {
master, _, err = ReadInstance(masterKey)
if err != nil {
return instance, err
}
}
if canReplicate, err := instance.CanReplicateFrom(master); !canReplicate {
return instance, err
}
// if a binlog server check it is sufficiently up to date
if master.IsBinlogServer() {
// "Repoint" operation trusts the user. But only so much. Repoiting to a binlog server which is not yet there is strictly wrong.
if !instance.ExecBinlogCoordinates.SmallerThanOrEquals(&master.SelfBinlogCoordinates) {
return instance, fmt.Errorf("repoint: binlog server %+v is not sufficiently up to date to repoint %+v below it", *masterKey, *instanceKey)
}
}
log.Infof("Will repoint %+v to master %+v", *instanceKey, *masterKey)
if maintenanceToken, merr := BeginMaintenance(instanceKey, GetMaintenanceOwner(), "repoint"); merr != nil {
err = fmt.Errorf("Cannot begin maintenance on %+v", *instanceKey)
goto Cleanup
} else {
defer EndMaintenance(maintenanceToken)
}
instance, err = StopSlave(instanceKey)
if err != nil {
goto Cleanup
}
// See above, we are relaxed about the master being accessible/inaccessible.
// If accessible, we wish to do hostname-unresolve. If inaccessible, we can skip the test and not fail the
// ChangeMasterTo operation. This is why we pass "!masterIsAccessible" below.
if instance.ExecBinlogCoordinates.IsEmpty() {
instance.ExecBinlogCoordinates.LogFile = "orchestrator-unknown-log-file"
}
instance, err = ChangeMasterTo(instanceKey, masterKey, &instance.ExecBinlogCoordinates, !masterIsAccessible, gtidHint)
if err != nil {
goto Cleanup
}
Cleanup:
instance, _ = StartSlave(instanceKey)
if err != nil {
return instance, log.Errore(err)
}
// and we're done (pending deferred functions)
AuditOperation("repoint", instanceKey, fmt.Sprintf("slave %+v repointed to master: %+v", *instanceKey, *masterKey))
return instance, err
}
// RepointTo repoints list of slaves onto another master.
// Binlog Server is the major use case
func RepointTo(slaves [](*Instance), belowKey *InstanceKey) ([](*Instance), error, []error) {
res := [](*Instance){}
errs := []error{}
slaves = RemoveInstance(slaves, belowKey)
if len(slaves) == 0 {
// Nothing to do
return res, nil, errs
}
if belowKey == nil {
return res, log.Errorf("RepointTo received nil belowKey"), errs
}
log.Infof("Will repoint %+v slaves below %+v", len(slaves), *belowKey)
barrier := make(chan *InstanceKey)
slaveMutex := make(chan bool, 1)
for _, slave := range slaves {
slave := slave
// Parallelize repoints
go func() {
defer func() { barrier <- &slave.Key }()
ExecuteOnTopology(func() {
slave, slaveErr := Repoint(&slave.Key, belowKey, GTIDHintNeutral)
func() {
// Instantaneous mutex.
slaveMutex <- true
defer func() { <-slaveMutex }()
if slaveErr == nil {
res = append(res, slave)
} else {
errs = append(errs, slaveErr)
}
}()
})
}()
}
for range slaves {
<-barrier
}
if len(errs) == len(slaves) {
// All returned with error
return res, log.Error("Error on all operations"), errs
}
AuditOperation("repoint-to", belowKey, fmt.Sprintf("repointed %d/%d slaves to %+v", len(res), len(slaves), *belowKey))
return res, nil, errs
}
// RepointSlaves repoints slaves of a given instance (possibly filtered) onto another master.
// Binlog Server is the major use case
func RepointSlavesTo(instanceKey *InstanceKey, pattern string, belowKey *InstanceKey) ([](*Instance), error, []error) {
res := [](*Instance){}
errs := []error{}
slaves, err := ReadSlaveInstances(instanceKey)
if err != nil {
return res, err, errs
}
slaves = RemoveInstance(slaves, belowKey)
slaves = filterInstancesByPattern(slaves, pattern)
if len(slaves) == 0 {
// Nothing to do
return res, nil, errs
}
if belowKey == nil {
// Default to existing master. All slaves are of the same master, hence just pick one.
belowKey = &slaves[0].MasterKey
}
log.Infof("Will repoint slaves of %+v to %+v", *instanceKey, *belowKey)
return RepointTo(slaves, belowKey)
}
// RepointSlaves repoints all slaves of a given instance onto its existing master.
func RepointSlaves(instanceKey *InstanceKey, pattern string) ([](*Instance), error, []error) {
return RepointSlavesTo(instanceKey, pattern, nil)
}
// MakeCoMaster will attempt to make an instance co-master with its master, by making its master a slave of its own.
// This only works out if the master is not replicating; the master does not have a known master (it may have an unknown master).
func MakeCoMaster(instanceKey *InstanceKey) (*Instance, error) {
instance, err := ReadTopologyInstance(instanceKey)
if err != nil {
return instance, err
}
if canMove, merr := instance.CanMove(); !canMove {
return instance, merr
}
master, err := GetInstanceMaster(instance)
if err != nil {
return instance, err
}
log.Debugf("Will check whether %+v's master (%+v) can become its co-master", instance.Key, master.Key)
if canMove, merr := master.CanMoveAsCoMaster(); !canMove {
return instance, merr
}
if instanceKey.Equals(&master.MasterKey) {
return instance, fmt.Errorf("instance %+v is already co master of %+v", instance.Key, master.Key)
}
if !instance.ReadOnly {
return instance, fmt.Errorf("instance %+v is not read-only; first make it read-only before making it co-master", instance.Key)
}
if master.IsCoMaster {
// We allow breaking of an existing co-master replication. Here's the breakdown:
// Ideally, this would not eb allowed, and we would first require the user to RESET SLAVE on 'master'
// prior to making it participate as co-master with our 'instance'.
// However there's the problem that upon RESET SLAVE we lose the replication's user/password info.
// Thus, we come up with the following rule:
// If S replicates from M1, and M1<->M2 are co masters, we allow S to become co-master of M1 (S<->M1) if:
// - M1 is writeable
// - M2 is read-only or is unreachable/invalid
// - S is read-only
// And so we will be replacing one read-only co-master with another.
otherCoMaster, found, _ := ReadInstance(&master.MasterKey)
if found && otherCoMaster.IsLastCheckValid && !otherCoMaster.ReadOnly {
return instance, fmt.Errorf("master %+v is already co-master with %+v, and %+v is alive, and not read-only; cowardly refusing to demote it. Please set it as read-only beforehand", master.Key, otherCoMaster.Key, otherCoMaster.Key)
}
// OK, good to go.
} else if _, found, _ := ReadInstance(&master.MasterKey); found {
return instance, fmt.Errorf("%+v is not a real master; it replicates from: %+v", master.Key, master.MasterKey)
}
if canReplicate, err := master.CanReplicateFrom(instance); !canReplicate {
return instance, err
}
log.Infof("Will make %+v co-master of %+v", instanceKey, master.Key)
if maintenanceToken, merr := BeginMaintenance(instanceKey, GetMaintenanceOwner(), fmt.Sprintf("make co-master of %+v", master.Key)); merr != nil {
err = fmt.Errorf("Cannot begin maintenance on %+v", *instanceKey)
goto Cleanup
} else {
defer EndMaintenance(maintenanceToken)
}
if maintenanceToken, merr := BeginMaintenance(&master.Key, GetMaintenanceOwner(), fmt.Sprintf("%+v turns into co-master of this", *instanceKey)); merr != nil {
err = fmt.Errorf("Cannot begin maintenance on %+v", master.Key)
goto Cleanup
} else {
defer EndMaintenance(maintenanceToken)
}
// the coMaster used to be merely a slave. Just point master into *some* position
// within coMaster...
master, err = StopSlave(&master.Key)
if err != nil {
goto Cleanup
}
master, err = ChangeMasterTo(&master.Key, instanceKey, &instance.SelfBinlogCoordinates, false, GTIDHintNeutral)
if err != nil {
goto Cleanup
}
Cleanup:
master, _ = StartSlave(&master.Key)
if err != nil {
return instance, log.Errore(err)
}
// and we're done (pending deferred functions)
AuditOperation("make-co-master", instanceKey, fmt.Sprintf("%+v made co-master of %+v", *instanceKey, master.Key))
return instance, err
}
// ResetSlaveOperation will reset a slave
func ResetSlaveOperation(instanceKey *InstanceKey) (*Instance, error) {
instance, err := ReadTopologyInstance(instanceKey)
if err != nil {
return instance, err
}
log.Infof("Will reset slave on %+v", instanceKey)
if maintenanceToken, merr := BeginMaintenance(instanceKey, GetMaintenanceOwner(), "reset slave"); merr != nil {
err = fmt.Errorf("Cannot begin maintenance on %+v", *instanceKey)
goto Cleanup
} else {
defer EndMaintenance(maintenanceToken)
}
if instance.IsSlave() {
instance, err = StopSlave(instanceKey)
if err != nil {
goto Cleanup
}
}
instance, err = ResetSlave(instanceKey)
if err != nil {
goto Cleanup
}
Cleanup:
instance, _ = StartSlave(instanceKey)
if err != nil {
return instance, log.Errore(err)
}
// and we're done (pending deferred functions)
AuditOperation("reset-slave", instanceKey, fmt.Sprintf("%+v replication reset", *instanceKey))
return instance, err
}
// DetachSlaveOperation will detach a slave from its master by forcibly corrupting its replication coordinates
func DetachSlaveOperation(instanceKey *InstanceKey) (*Instance, error) {
instance, err := ReadTopologyInstance(instanceKey)
if err != nil {
return instance, err
}
log.Infof("Will detach %+v", instanceKey)
if maintenanceToken, merr := BeginMaintenance(instanceKey, GetMaintenanceOwner(), "detach slave"); merr != nil {
err = fmt.Errorf("Cannot begin maintenance on %+v", *instanceKey)
goto Cleanup
} else {
defer EndMaintenance(maintenanceToken)
}
if instance.IsSlave() {
instance, err = StopSlave(instanceKey)
if err != nil {
goto Cleanup
}
}
instance, err = DetachSlave(instanceKey)
if err != nil {
goto Cleanup
}
Cleanup:
instance, _ = StartSlave(instanceKey)
if err != nil {
return instance, log.Errore(err)
}
// and we're done (pending deferred functions)
AuditOperation("detach-slave", instanceKey, fmt.Sprintf("%+v replication detached", *instanceKey))
return instance, err
}
// ReattachSlaveOperation will detach a slave from its master by forcibly corrupting its replication coordinates
func ReattachSlaveOperation(instanceKey *InstanceKey) (*Instance, error) {
instance, err := ReadTopologyInstance(instanceKey)
if err != nil {
return instance, err
}
log.Infof("Will reattach %+v", instanceKey)
if maintenanceToken, merr := BeginMaintenance(instanceKey, GetMaintenanceOwner(), "detach slave"); merr != nil {
err = fmt.Errorf("Cannot begin maintenance on %+v", *instanceKey)
goto Cleanup
} else {
defer EndMaintenance(maintenanceToken)
}
if instance.IsSlave() {
instance, err = StopSlave(instanceKey)
if err != nil {
goto Cleanup
}
}
instance, err = ReattachSlave(instanceKey)
if err != nil {
goto Cleanup
}
Cleanup:
instance, _ = StartSlave(instanceKey)
if err != nil {
return instance, log.Errore(err)
}
// and we're done (pending deferred functions)
AuditOperation("reattach-slave", instanceKey, fmt.Sprintf("%+v replication reattached", *instanceKey))
return instance, err
}
// DetachSlaveMasterHost detaches a slave from its master by corrupting the Master_Host (in such way that is reversible)
func DetachSlaveMasterHost(instanceKey *InstanceKey) (*Instance, error) {
instance, err := ReadTopologyInstance(instanceKey)
if err != nil {
return instance, err
}
if !instance.IsSlave() {
return instance, fmt.Errorf("instance is not a slave: %+v", *instanceKey)
}
if instance.MasterKey.IsDetached() {
return instance, fmt.Errorf("instance already detached: %+v", *instanceKey)
}
detachedMasterKey := instance.MasterKey.DetachedKey()
log.Infof("Will detach master host on %+v. Detached key is %+v", *instanceKey, *detachedMasterKey)
if maintenanceToken, merr := BeginMaintenance(instanceKey, GetMaintenanceOwner(), "detach-slave-master-host"); merr != nil {
err = fmt.Errorf("Cannot begin maintenance on %+v", *instanceKey)
goto Cleanup
} else {
defer EndMaintenance(maintenanceToken)
}
instance, err = StopSlave(instanceKey)
if err != nil {
goto Cleanup
}
instance, err = ChangeMasterTo(instanceKey, detachedMasterKey, &instance.ExecBinlogCoordinates, true, GTIDHintNeutral)
if err != nil {
goto Cleanup
}
Cleanup:
instance, _ = StartSlave(instanceKey)
if err != nil {
return instance, log.Errore(err)
}
// and we're done (pending deferred functions)
AuditOperation("repoint", instanceKey, fmt.Sprintf("slave %+v detached from master into %+v", *instanceKey, *detachedMasterKey))
return instance, err
}
// ReattachSlaveMasterHost reattaches a slave back onto its master by undoing a DetachSlaveMasterHost operation
func ReattachSlaveMasterHost(instanceKey *InstanceKey) (*Instance, error) {
instance, err := ReadTopologyInstance(instanceKey)
if err != nil {
return instance, err
}
if !instance.IsSlave() {
return instance, fmt.Errorf("instance is not a slave: %+v", *instanceKey)
}
if !instance.MasterKey.IsDetached() {
return instance, fmt.Errorf("instance does not seem to be detached: %+v", *instanceKey)
}
reattachedMasterKey := instance.MasterKey.ReattachedKey()
log.Infof("Will reattach master host on %+v. Reattached key is %+v", *instanceKey, *reattachedMasterKey)
if maintenanceToken, merr := BeginMaintenance(instanceKey, GetMaintenanceOwner(), "reattach-slave-master-host"); merr != nil {
err = fmt.Errorf("Cannot begin maintenance on %+v", *instanceKey)
goto Cleanup
} else {
defer EndMaintenance(maintenanceToken)
}
instance, err = StopSlave(instanceKey)
if err != nil {
goto Cleanup
}
instance, err = ChangeMasterTo(instanceKey, reattachedMasterKey, &instance.ExecBinlogCoordinates, true, GTIDHintNeutral)
if err != nil {
goto Cleanup
}
Cleanup:
instance, _ = StartSlave(instanceKey)
if err != nil {
return instance, log.Errore(err)
}
// and we're done (pending deferred functions)
AuditOperation("repoint", instanceKey, fmt.Sprintf("slave %+v reattached to master $+v", *instanceKey, *reattachedMasterKey))
return instance, err
}
// EnableGTID will attempt to enable GTID-mode (either Oracle or MariaDB)
func EnableGTID(instanceKey *InstanceKey) (*Instance, error) {
instance, err := ReadTopologyInstance(instanceKey)
if err != nil {
return instance, err
}
if instance.UsingGTID() {
return instance, fmt.Errorf("%+v already uses GTID", *instanceKey)
}
log.Infof("Will attempt to enable GTID on %+v", *instanceKey)
instance, err = Repoint(instanceKey, nil, GTIDHintForce)
if err != nil {
return instance, err
}
if !instance.UsingGTID() {
return instance, fmt.Errorf("Cannot enable GTID on %+v", *instanceKey)
}
AuditOperation("enable-gtid", instanceKey, fmt.Sprintf("enabled GTID on %+v", *instanceKey))
return instance, err
}
// DisableGTID will attempt to disable GTID-mode (either Oracle or MariaDB) and revert to binlog file:pos replication
func DisableGTID(instanceKey *InstanceKey) (*Instance, error) {
instance, err := ReadTopologyInstance(instanceKey)
if err != nil {
return instance, err
}
if !instance.UsingGTID() {
return instance, fmt.Errorf("%+v is not using GTID", *instanceKey)
}
log.Infof("Will attempt to disable GTID on %+v", *instanceKey)
instance, err = Repoint(instanceKey, nil, GTIDHintDeny)
if err != nil {
return instance, err
}
if instance.UsingGTID() {
return instance, fmt.Errorf("Cannot disable GTID on %+v", *instanceKey)
}
AuditOperation("disable-gtid", instanceKey, fmt.Sprintf("disabled GTID on %+v", *instanceKey))
return instance, err
}
// ResetMasterGTIDOperation will issue a safe RESET MASTER on a slave that replicates via GTID:
// It will make sure the gtid_purged set matches the executed set value as read just before the RESET.
// this will enable new slaves to be attached to given instance without complaints about missing/purged entries.
// This function requires that the instance does not have slaves.
func ResetMasterGTIDOperation(instanceKey *InstanceKey, removeSelfUUID bool, uuidToRemove string) (*Instance, error) {
instance, err := ReadTopologyInstance(instanceKey)
if err != nil {
return instance, err
}
if !instance.UsingOracleGTID {
return instance, log.Errorf("reset-master-gtid requested for %+v but it is not using oracle-gtid", *instanceKey)
}
if len(instance.SlaveHosts) > 0 {
return instance, log.Errorf("reset-master-gtid will not operate on %+v because it has %+v slaves. Expecting no slaves", *instanceKey, len(instance.SlaveHosts))
}
log.Infof("Will reset master on %+v", instanceKey)
var oracleGtidSet *OracleGtidSet
if maintenanceToken, merr := BeginMaintenance(instanceKey, GetMaintenanceOwner(), "reset-master-gtid"); merr != nil {
err = fmt.Errorf("Cannot begin maintenance on %+v", *instanceKey)
goto Cleanup
} else {
defer EndMaintenance(maintenanceToken)
}
if instance.IsSlave() {
instance, err = StopSlave(instanceKey)
if err != nil {
goto Cleanup
}
}
oracleGtidSet, err = ParseGtidSet(instance.ExecutedGtidSet)
if err != nil {
goto Cleanup
}
if removeSelfUUID {
uuidToRemove = instance.ServerUUID
}
if uuidToRemove != "" {
removed := oracleGtidSet.RemoveUUID(uuidToRemove)
if removed {
log.Debugf("Will remove UUID %s", uuidToRemove)
} else {
log.Debugf("UUID %s not found", uuidToRemove)
}
}
instance, err = ResetMaster(instanceKey)
if err != nil {
goto Cleanup
}
err = setGTIDPurged(instance, oracleGtidSet.String())
if err != nil {
goto Cleanup
}
Cleanup:
instance, _ = StartSlave(instanceKey)
if err != nil {
return instance, log.Errore(err)
}
// and we're done (pending deferred functions)
AuditOperation("reset-master-gtid", instanceKey, fmt.Sprintf("%+v master reset", *instanceKey))
return instance, err
}
// FindLastPseudoGTIDEntry will search an instance's binary logs or relay logs for the last pseudo-GTID entry,
// and return found coordinates as well as entry text
func FindLastPseudoGTIDEntry(instance *Instance, recordedInstanceRelayLogCoordinates BinlogCoordinates, maxBinlogCoordinates *BinlogCoordinates, exhaustiveSearch bool, expectedBinlogFormat *string) (*BinlogCoordinates, string, error) {
var instancePseudoGtidText string
var instancePseudoGtidCoordinates *BinlogCoordinates
var err error
if instance.LogBinEnabled && instance.LogSlaveUpdatesEnabled && (expectedBinlogFormat == nil || instance.Binlog_format == *expectedBinlogFormat) {
// Well no need to search this instance's binary logs if it doesn't have any...
// With regard log-slave-updates, some edge cases are possible, like having this instance's log-slave-updates
// enabled/disabled (of course having restarted it)
// The approach is not to take chances. If log-slave-updates is disabled, fail and go for relay-logs.
// If log-slave-updates was just enabled then possibly no pseudo-gtid is found, and so again we will go
// for relay logs.
// Also, if master has STATEMENT binlog format, and the slave has ROW binlog format, then comparing binlog entries would urely fail if based on the slave's binary logs.
// Instead, we revert to the relay logs.
instancePseudoGtidCoordinates, instancePseudoGtidText, err = getLastPseudoGTIDEntryInInstance(instance, maxBinlogCoordinates, exhaustiveSearch)
}
if err != nil || instancePseudoGtidCoordinates == nil {
// Unable to find pseudo GTID in binary logs.
// Then MAYBE we are lucky enough (chances are we are, if this slave did not crash) that we can
// extract the Pseudo GTID entry from the last (current) relay log file.
instancePseudoGtidCoordinates, instancePseudoGtidText, err = getLastPseudoGTIDEntryInRelayLogs(instance, recordedInstanceRelayLogCoordinates, exhaustiveSearch)
}
return instancePseudoGtidCoordinates, instancePseudoGtidText, err
}
// CorrelateBinlogCoordinates
func CorrelateBinlogCoordinates(instance *Instance, binlogCoordinates *BinlogCoordinates, otherInstance *Instance) (*BinlogCoordinates, int, error) {
// We record the relay log coordinates just after the instance stopped since the coordinates can change upon
// a FLUSH LOGS/FLUSH RELAY LOGS (or a START SLAVE, though that's an altogether different problem) etc.
// We want to be on the safe side; we don't utterly trust that we are the only ones playing with the instance.
recordedInstanceRelayLogCoordinates := instance.RelaylogCoordinates
instancePseudoGtidCoordinates, instancePseudoGtidText, err := FindLastPseudoGTIDEntry(instance, recordedInstanceRelayLogCoordinates, binlogCoordinates, true, &otherInstance.Binlog_format)
if err != nil {
return nil, 0, err
}
entriesMonotonic := (config.Config.PseudoGTIDMonotonicHint != "") && strings.Contains(instancePseudoGtidText, config.Config.PseudoGTIDMonotonicHint)
otherInstancePseudoGtidCoordinates, err := SearchEntryInInstanceBinlogs(otherInstance, instancePseudoGtidText, entriesMonotonic)
if err != nil {
return nil, 0, err
}
// We've found a match: the latest Pseudo GTID position within instance and its identical twin in otherInstance
// We now iterate the events in both, up to the completion of events in instance (recall that we looked for
// the last entry in instance, hence, assuming pseudo GTID entries are frequent, the amount of entries to read
// from instance is not long)
// The result of the iteration will be either:
// - bad conclusion that instance is actually more advanced than otherInstance (we find more entries in instance
// following the pseudo gtid than we can match in otherInstance), hence we cannot ask instance to replicate
// from otherInstance
// - good result: both instances are exactly in same shape (have replicated the exact same number of events since
// the last pseudo gtid). Since they are identical, it is easy to point instance into otherInstance.
// - good result: the first position within otherInstance where instance has not replicated yet. It is easy to point
// instance into otherInstance.
nextBinlogCoordinatesToMatch, countMatchedEvents, err := GetNextBinlogCoordinatesToMatch(instance, *instancePseudoGtidCoordinates,
recordedInstanceRelayLogCoordinates, binlogCoordinates, otherInstance, *otherInstancePseudoGtidCoordinates)
if err != nil {
return nil, 0, err
}
if countMatchedEvents == 0 {
err = fmt.Errorf("Unexpected: 0 events processed while iterating logs. Something went wrong; aborting. nextBinlogCoordinatesToMatch: %+v", nextBinlogCoordinatesToMatch)
return nil, 0, err
}
return nextBinlogCoordinatesToMatch, countMatchedEvents, nil
}
// MatchBelow will attempt moving instance indicated by instanceKey below its the one indicated by otherKey.
// The refactoring is based on matching binlog entries, not on "classic" positions comparisons.
// The "other instance" could be the sibling of the moving instance any of its ancestors. It may actually be
// a cousin of some sort (though unlikely). The only important thing is that the "other instance" is more
// advanced in replication than given instance.
func MatchBelow(instanceKey, otherKey *InstanceKey, requireInstanceMaintenance bool) (*Instance, *BinlogCoordinates, error) {
instance, err := ReadTopologyInstance(instanceKey)
if err != nil {
return instance, nil, err
}
if config.Config.PseudoGTIDPattern == "" {
return instance, nil, fmt.Errorf("PseudoGTIDPattern not configured; cannot use Pseudo-GTID")
}
if instanceKey.Equals(otherKey) {
return instance, nil, fmt.Errorf("MatchBelow: attempt to match an instance below itself %+v", *instanceKey)
}
otherInstance, err := ReadTopologyInstance(otherKey)
if err != nil {
return instance, nil, err
}
rinstance, _, _ := ReadInstance(&instance.Key)
if canMove, merr := rinstance.CanMoveViaMatch(); !canMove {
return instance, nil, merr
}
if canReplicate, err := instance.CanReplicateFrom(otherInstance); !canReplicate {
return instance, nil, err
}
var nextBinlogCoordinatesToMatch *BinlogCoordinates
var countMatchedEvents int
if otherInstance.IsBinlogServer() {
// A Binlog Server does not do all the SHOW BINLOG EVENTS stuff
err = fmt.Errorf("Cannot use PseudoGTID with Binlog Server %+v", otherInstance.Key)
goto Cleanup
}
log.Infof("Will match %+v below %+v", *instanceKey, *otherKey)
if requireInstanceMaintenance {
if maintenanceToken, merr := BeginMaintenance(instanceKey, GetMaintenanceOwner(), fmt.Sprintf("match below %+v", *otherKey)); merr != nil {
err = fmt.Errorf("Cannot begin maintenance on %+v", *instanceKey)
goto Cleanup
} else {
defer EndMaintenance(maintenanceToken)
}
}
log.Debugf("Stopping slave on %+v", *instanceKey)
instance, err = StopSlave(instanceKey)
if err != nil {
goto Cleanup
}
nextBinlogCoordinatesToMatch, countMatchedEvents, err = CorrelateBinlogCoordinates(instance, nil, otherInstance)
if countMatchedEvents == 0 {
err = fmt.Errorf("Unexpected: 0 events processed while iterating logs. Something went wrong; aborting. nextBinlogCoordinatesToMatch: %+v", nextBinlogCoordinatesToMatch)
goto Cleanup
}
log.Debugf("%+v will match below %+v at %+v; validated events: %d", *instanceKey, *otherKey, *nextBinlogCoordinatesToMatch, countMatchedEvents)
// Drum roll......
instance, err = ChangeMasterTo(instanceKey, otherKey, nextBinlogCoordinatesToMatch, false, GTIDHintDeny)
if err != nil {
goto Cleanup
}
Cleanup:
instance, _ = StartSlave(instanceKey)
if err != nil {
return instance, nextBinlogCoordinatesToMatch, log.Errore(err)
}
// and we're done (pending deferred functions)
AuditOperation("match-below", instanceKey, fmt.Sprintf("matched %+v below %+v", *instanceKey, *otherKey))
return instance, nextBinlogCoordinatesToMatch, err
}
// RematchSlave will re-match a slave to its master, using pseudo-gtid
func RematchSlave(instanceKey *InstanceKey, requireInstanceMaintenance bool) (*Instance, *BinlogCoordinates, error) {
instance, err := ReadTopologyInstance(instanceKey)
if err != nil {
return instance, nil, err
}
masterInstance, found, err := ReadInstance(&instance.MasterKey)
if err != nil || !found {
return instance, nil, err
}
return MatchBelow(instanceKey, &masterInstance.Key, requireInstanceMaintenance)
}
// MakeMaster will take an instance, make all its siblings its slaves (via pseudo-GTID) and make it master
// (stop its replicaiton, make writeable).
func MakeMaster(instanceKey *InstanceKey) (*Instance, error) {
instance, err := ReadTopologyInstance(instanceKey)
if err != nil {
return instance, err
}
masterInstance, err := ReadTopologyInstance(&instance.MasterKey)
if err != nil {
if masterInstance.IsSlave() {
return instance, fmt.Errorf("MakeMaster: instance's master %+v seems to be replicating", masterInstance.Key)
}
if masterInstance.IsLastCheckValid {
return instance, fmt.Errorf("MakeMaster: instance's master %+v seems to be accessible", masterInstance.Key)
}
}
// If err == nil this is "good": that means the master is inaccessible... So it's OK to do the promotion
if !instance.SQLThreadUpToDate() {
return instance, fmt.Errorf("MakeMaster: instance's SQL thread must be up-to-date with I/O thread for %+v", *instanceKey)
}
siblings, err := ReadSlaveInstances(&masterInstance.Key)
if err != nil {
return instance, err
}
for _, sibling := range siblings {
if instance.ExecBinlogCoordinates.SmallerThan(&sibling.ExecBinlogCoordinates) {
return instance, fmt.Errorf("MakeMaster: instance %+v has more advanced sibling: %+v", *instanceKey, sibling.Key)
}
}
if maintenanceToken, merr := BeginMaintenance(instanceKey, GetMaintenanceOwner(), fmt.Sprintf("siblings match below this: %+v", *instanceKey)); merr != nil {
err = fmt.Errorf("Cannot begin maintenance on %+v", *instanceKey)
goto Cleanup
} else {
defer EndMaintenance(maintenanceToken)
}
_, _, err, _ = MultiMatchBelow(siblings, instanceKey, false, nil)
if err != nil {
goto Cleanup
}
SetReadOnly(instanceKey, false)
Cleanup:
if err != nil {
return instance, log.Errore(err)
}
// and we're done (pending deferred functions)
AuditOperation("make-master", instanceKey, fmt.Sprintf("made master of %+v", *instanceKey))
return instance, err
}
// EnslaveSiblings is a convenience method for turning sublings of a slave to be its subordinates.
// This uses normal connected replication (does not utilize Pseudo-GTID)
func EnslaveSiblings(instanceKey *InstanceKey) (*Instance, int, error) {
instance, err := ReadTopologyInstance(instanceKey)
if err != nil {
return instance, 0, err
}
masterInstance, found, err := ReadInstance(&instance.MasterKey)
if err != nil || !found {
return instance, 0, err
}
siblings, err := ReadSlaveInstances(&masterInstance.Key)
if err != nil {
return instance, 0, err
}
enslavedSiblings := 0
for _, sibling := range siblings {
if _, err := MoveBelow(&sibling.Key, &instance.Key); err == nil {
enslavedSiblings++
}
}
return instance, enslavedSiblings, err
}
// EnslaveMaster will move an instance up the chain and cause its master to become its slave.
// It's almost a role change, just that other slaves of either 'instance' or its master are currently unaffected
// (they continue replicate without change)
// Note that the master must itself be a slave; however the grandparent does not necessarily have to be reachable
// and can in fact be dead.
func EnslaveMaster(instanceKey *InstanceKey) (*Instance, error) {
instance, err := ReadTopologyInstance(instanceKey)
if err != nil {
return instance, err
}
masterInstance, found, err := ReadInstance(&instance.MasterKey)
if err != nil || !found {
return instance, err
}
log.Debugf("EnslaveMaster: will attempt making %+v enslave its master %+v, now resolved as %+v", *instanceKey, instance.MasterKey, masterInstance.Key)
if canReplicate, err := masterInstance.CanReplicateFrom(instance); canReplicate == false {
return instance, err
}
// We begin
masterInstance, err = StopSlave(&masterInstance.Key)
if err != nil {
goto Cleanup
}
instance, err = StopSlave(&instance.Key)
if err != nil {
goto Cleanup
}
instance, err = StartSlaveUntilMasterCoordinates(&instance.Key, &masterInstance.SelfBinlogCoordinates)
if err != nil {
goto Cleanup
}
// instance and masterInstance are equal
// We skip name unresolve. It is OK if the master's master is dead, unreachable, does not resolve properly.
// We just copy+paste info from the master.
// In particular, this is commonly calledin DeadMaster recovery
instance, err = ChangeMasterTo(&instance.Key, &masterInstance.MasterKey, &masterInstance.ExecBinlogCoordinates, true, GTIDHintNeutral)
if err != nil {
goto Cleanup
}
// instance is now sibling of master
masterInstance, err = ChangeMasterTo(&masterInstance.Key, &instance.Key, &instance.SelfBinlogCoordinates, false, GTIDHintNeutral)
if err != nil {
goto Cleanup
}
// swap is done!
Cleanup:
instance, _ = StartSlave(&instance.Key)
masterInstance, _ = StartSlave(&masterInstance.Key)
if err != nil {
return instance, err
}
AuditOperation("enslave-master", instanceKey, fmt.Sprintf("enslaved master: %+v", masterInstance.Key))
return instance, err
}
// MakeLocalMaster promotes a slave above its master, making it slave of its grandparent, while also enslaving its siblings.
// This serves as a convenience method to recover replication when a local master fails; the instance promoted is one of its slaves,
// which is most advanced among its siblings.
// This method utilizes Pseudo GTID
func MakeLocalMaster(instanceKey *InstanceKey) (*Instance, error) {
instance, err := ReadTopologyInstance(instanceKey)
if err != nil {
return instance, err
}
masterInstance, found, err := ReadInstance(&instance.MasterKey)
if err != nil || !found {
return instance, err
}
grandparentInstance, err := ReadTopologyInstance(&masterInstance.MasterKey)
if err != nil {
return instance, err
}
siblings, err := ReadSlaveInstances(&masterInstance.Key)
if err != nil {
return instance, err
}
for _, sibling := range siblings {
if instance.ExecBinlogCoordinates.SmallerThan(&sibling.ExecBinlogCoordinates) {
return instance, fmt.Errorf("MakeMaster: instance %+v has more advanced sibling: %+v", *instanceKey, sibling.Key)
}
}
instance, err = StopSlaveNicely(instanceKey, 0)
if err != nil {
goto Cleanup
}
_, _, err = MatchBelow(instanceKey, &grandparentInstance.Key, true)
if err != nil {
goto Cleanup
}
_, _, err, _ = MultiMatchBelow(siblings, instanceKey, false, nil)
if err != nil {
goto Cleanup
}
Cleanup:
if err != nil {
return instance, log.Errore(err)
}
// and we're done (pending deferred functions)
AuditOperation("make-local-master", instanceKey, fmt.Sprintf("made master of %+v", *instanceKey))
return instance, err
}
// sortedSlaves returns the list of slaves of a given master, sorted by exec coordinates
// (most up-to-date slave first)
func sortedSlaves(masterKey *InstanceKey, shouldStopSlaves bool, includeBinlogServerSubSlaves bool) (slaves [](*Instance), err error) {
if includeBinlogServerSubSlaves {
slaves, err = ReadSlaveInstancesIncludingBinlogServerSubSlaves(masterKey)
} else {
slaves, err = ReadSlaveInstances(masterKey)
}
if err != nil {
return slaves, err
}
if len(slaves) == 0 {
return slaves, nil
}
if shouldStopSlaves {
log.Debugf("sortedSlaves: stopping %d slaves nicely", len(slaves))
slaves = StopSlavesNicely(slaves, time.Duration(config.Config.InstanceBulkOperationsWaitTimeoutSeconds)*time.Second)
}
slaves = RemoveNilInstances(slaves)
sort.Sort(sort.Reverse(InstancesByExecBinlogCoordinates(slaves)))
for _, slave := range slaves {
log.Debugf("- sorted slave: %+v %+v", slave.Key, slave.ExecBinlogCoordinates)
}
return slaves, err
}
// MultiMatchBelow will efficiently match multiple slaves below a given instance.
// It is assumed that all given slaves are siblings
func MultiMatchBelow(slaves [](*Instance), belowKey *InstanceKey, slavesAlreadyStopped bool, postponedFunctionsContainer *PostponedFunctionsContainer) ([](*Instance), *Instance, error, []error) {
res := [](*Instance){}
errs := []error{}
slaveMutex := make(chan bool, 1)
if config.Config.PseudoGTIDPattern == "" {
return res, nil, fmt.Errorf("PseudoGTIDPattern not configured; cannot use Pseudo-GTID"), errs
}
slaves = RemoveInstance(slaves, belowKey)
slaves = RemoveBinlogServerInstances(slaves)
for _, slave := range slaves {
if maintenanceToken, merr := BeginMaintenance(&slave.Key, GetMaintenanceOwner(), fmt.Sprintf("%+v match below %+v as part of MultiMatchBelow", slave.Key, *belowKey)); merr != nil {
errs = append(errs, fmt.Errorf("Cannot begin maintenance on %+v", slave.Key))
slaves = RemoveInstance(slaves, &slave.Key)
} else {
defer EndMaintenance(maintenanceToken)
}
}
belowInstance, err := ReadTopologyInstance(belowKey)
if err != nil {
// Can't access the server below which we need to match ==> can't move slaves
return res, belowInstance, err, errs
}
if belowInstance.IsBinlogServer() {
// A Binlog Server does not do all the SHOW BINLOG EVENTS stuff
err = fmt.Errorf("Cannot use PseudoGTID with Binlog Server %+v", belowInstance.Key)
return res, belowInstance, err, errs
}
// slaves involved
if len(slaves) == 0 {
return res, belowInstance, nil, errs
}
if !slavesAlreadyStopped {
log.Debugf("MultiMatchBelow: stopping %d slaves nicely", len(slaves))
// We want the slaves to have SQL thread up to date with IO thread.
// We will wait for them (up to a timeout) to do so.
slaves = StopSlavesNicely(slaves, time.Duration(config.Config.InstanceBulkOperationsWaitTimeoutSeconds)*time.Second)
}
slaves = RemoveNilInstances(slaves)
sort.Sort(sort.Reverse(InstancesByExecBinlogCoordinates(slaves)))
// Optimizations:
// Slaves which broke on the same Exec-coordinates can be handled in the exact same way:
// we only need to figure out one slave of each group/bucket of exec-coordinates; then apply the CHANGE MASTER TO
// on all its fellow members using same coordinates.
slaveBuckets := make(map[BinlogCoordinates][](*Instance))
for _, slave := range slaves {
slave := slave
slaveBuckets[slave.ExecBinlogCoordinates] = append(slaveBuckets[slave.ExecBinlogCoordinates], slave)
}
log.Debugf("MultiMatchBelow: %d slaves merged into %d buckets", len(slaves), len(slaveBuckets))
for bucket, bucketSlaves := range slaveBuckets {
log.Debugf("+- bucket: %+v, %d slaves", bucket, len(bucketSlaves))
}
matchedSlaves := make(map[InstanceKey]bool)
bucketsBarrier := make(chan *BinlogCoordinates)
// Now go over the buckets, and try a single slave from each bucket
// (though if one results with an error, synchronuously-for-that-bucket continue to the next slave in bucket)
for execCoordinates, bucketSlaves := range slaveBuckets {
execCoordinates := execCoordinates
bucketSlaves := bucketSlaves
var bucketMatchedCoordinates *BinlogCoordinates
// Buckets concurrent
go func() {
// find coordinates for a single bucket based on a slave in said bucket
defer func() { bucketsBarrier <- &execCoordinates }()
func() {
for _, slave := range bucketSlaves {
slave := slave
var slaveErr error
var matchedCoordinates *BinlogCoordinates
log.Debugf("MultiMatchBelow: attempting slave %+v in bucket %+v", slave.Key, execCoordinates)
matchFunc := func() error {
ExecuteOnTopology(func() {
_, matchedCoordinates, slaveErr = MatchBelow(&slave.Key, &belowInstance.Key, false)
})
return nil
}
if postponedFunctionsContainer != nil &&
config.Config.PostponeSlaveRecoveryOnLagMinutes > 0 &&
slave.SQLDelay > config.Config.PostponeSlaveRecoveryOnLagMinutes*60 &&
len(bucketSlaves) == 1 {
// This slave is the only one in the bucket, AND it's lagging very much, AND
// we're configured to postpone operation on this slave so as not to delay everyone else.
(*postponedFunctionsContainer).AddPostponedFunction(matchFunc)
return
// We bail out and trust our invoker to later call upon this postponed function
}
matchFunc()
log.Debugf("MultiMatchBelow: match result: %+v, %+v", matchedCoordinates, slaveErr)
if slaveErr == nil {
// Success! We matched a slave of this bucket
func() {
// Instantaneous mutex.
slaveMutex <- true
defer func() { <-slaveMutex }()
bucketMatchedCoordinates = matchedCoordinates
matchedSlaves[slave.Key] = true
}()
log.Debugf("MultiMatchBelow: matched slave %+v in bucket %+v", slave.Key, execCoordinates)
return
}
// Got here? Error!
func() {
// Instantaneous mutex.
slaveMutex <- true
defer func() { <-slaveMutex }()
errs = append(errs, slaveErr)
}()
log.Errore(slaveErr)
// Failure: some unknown problem with bucket slave. Let's try the next one (continue loop)
}
}()
if bucketMatchedCoordinates == nil {
log.Errorf("MultiMatchBelow: Cannot match up %d slaves since their bucket %+v is failed", len(bucketSlaves), execCoordinates)
return
}
log.Debugf("MultiMatchBelow: bucket %+v coordinates are: %+v. Proceeding to match all bucket slaves", execCoordinates, *bucketMatchedCoordinates)
// At this point our bucket has a known salvaged slave.
// We don't wait for the other buckets -- we immediately work out all the other slaves in this bucket.
// (perhaps another bucket is busy matching a 24h delayed-replica; we definitely don't want to hold on that)
func() {
barrier := make(chan *InstanceKey)
// We point all this bucket's slaves into the same coordinates, concurrently
// We are already doing concurrent buckets; but for each bucket we also want to do concurrent slaves,
// otherwise one large bucket would make for a sequential work...
for _, slave := range bucketSlaves {
slave := slave
go func() {
defer func() { barrier <- &slave.Key }()
var err error
if _, found := matchedSlaves[slave.Key]; found {
// Already matched this slave
return
}
log.Debugf("MultiMatchBelow: Will match up %+v to previously matched master coordinates %+v", slave.Key, *bucketMatchedCoordinates)
slaveMatchSuccess := false
ExecuteOnTopology(func() {
if _, err = ChangeMasterTo(&slave.Key, &belowInstance.Key, bucketMatchedCoordinates, false, GTIDHintDeny); err == nil {
StartSlave(&slave.Key)
slaveMatchSuccess = true
}
})
func() {
// Quickly update lists; mutext is instantenous
slaveMutex <- true
defer func() { <-slaveMutex }()
if slaveMatchSuccess {
matchedSlaves[slave.Key] = true
} else {
errs = append(errs, err)
log.Errorf("MultiMatchBelow: Cannot match up %+v: error is %+v", slave.Key, err)
}
}()
}()
}
for range bucketSlaves {
<-barrier
}
}()
}()
}
for range slaveBuckets {
<-bucketsBarrier
}
for _, slave := range slaves {
slave := slave
if _, found := matchedSlaves[slave.Key]; found {
res = append(res, slave)
}
}
return res, belowInstance, err, errs
}
// MultiMatchSlaves will match (via pseudo-gtid) all slaves of given master below given instance.
func MultiMatchSlaves(masterKey *InstanceKey, belowKey *InstanceKey, pattern string) ([](*Instance), *Instance, error, []error) {
res := [](*Instance){}
errs := []error{}
belowInstance, err := ReadTopologyInstance(belowKey)
if err != nil {
// Can't access "below" ==> can't match slaves beneath it
return res, nil, err, errs
}
masterInstance, found, err := ReadInstance(masterKey)
if err != nil || !found {
return res, nil, err, errs
}
// See if we have a binlog server case (special handling):
binlogCase := false
if masterInstance.IsBinlogServer() && masterInstance.MasterKey.Equals(belowKey) {
// repoint-up
log.Debugf("MultiMatchSlaves: pointing slaves up from binlog server")
binlogCase = true
} else if belowInstance.IsBinlogServer() && belowInstance.MasterKey.Equals(masterKey) {
// repoint-down
log.Debugf("MultiMatchSlaves: pointing slaves down to binlog server")
binlogCase = true
} else if masterInstance.IsBinlogServer() && belowInstance.IsBinlogServer() && masterInstance.MasterKey.Equals(&belowInstance.MasterKey) {
// Both BLS, siblings
log.Debugf("MultiMatchSlaves: pointing slaves to binlong sibling")
binlogCase = true
}
if binlogCase {
slaves, err, errors := RepointSlavesTo(masterKey, pattern, belowKey)
// Bail out!
return slaves, masterInstance, err, errors
}
// Not binlog server
// slaves involved
slaves, err := ReadSlaveInstancesIncludingBinlogServerSubSlaves(masterKey)
if err != nil {
return res, belowInstance, err, errs
}
slaves = filterInstancesByPattern(slaves, pattern)
matchedSlaves, belowInstance, err, errs := MultiMatchBelow(slaves, &belowInstance.Key, false, nil)
if len(matchedSlaves) != len(slaves) {
err = fmt.Errorf("MultiMatchSlaves: only matched %d out of %d slaves of %+v; error is: %+v", len(matchedSlaves), len(slaves), *masterKey, err)
}
AuditOperation("multi-match-slaves", masterKey, fmt.Sprintf("matched %d slaves under %+v", len(matchedSlaves), *belowKey))
return matchedSlaves, belowInstance, err, errs
}
// MatchUp will move a slave up the replication chain, so that it becomes sibling of its master, via Pseudo-GTID
func MatchUp(instanceKey *InstanceKey, requireInstanceMaintenance bool) (*Instance, *BinlogCoordinates, error) {
instance, found, err := ReadInstance(instanceKey)
if err != nil || !found {
return nil, nil, err
}
if !instance.IsSlave() {
return instance, nil, fmt.Errorf("instance is not a slave: %+v", instanceKey)
}
master, found, err := ReadInstance(&instance.MasterKey)
if err != nil || !found {
return instance, nil, log.Errorf("Cannot get master for %+v. error=%+v", instance.Key, err)
}
if !master.IsSlave() {
return instance, nil, fmt.Errorf("master is not a slave itself: %+v", master.Key)
}
return MatchBelow(instanceKey, &master.MasterKey, requireInstanceMaintenance)
}
// MatchUpSlaves will move all slaves of given master up the replication chain,
// so that they become siblings of their master.
// This should be called when the local master dies, and all its slaves are to be resurrected via Pseudo-GTID
func MatchUpSlaves(masterKey *InstanceKey, pattern string) ([](*Instance), *Instance, error, []error) {
res := [](*Instance){}
errs := []error{}
masterInstance, found, err := ReadInstance(masterKey)
if err != nil || !found {
return res, nil, err, errs
}
return MultiMatchSlaves(masterKey, &masterInstance.MasterKey, pattern)
}
func isGenerallyValidAsBinlogSource(slave *Instance) bool {
if !slave.IsLastCheckValid {
// something wrong with this slave right now. We shouldn't hope to be able to promote it
return false
}
if !slave.LogBinEnabled {
return false
}
if !slave.LogSlaveUpdatesEnabled {
return false
}
return true
}
func isGenerallyValidAsCandidateSlave(slave *Instance) bool {
if !isGenerallyValidAsBinlogSource(slave) {
// does not have binary logs
return false
}
if slave.IsBinlogServer() {
// Can't regroup under a binlog server because it does not support pseudo-gtid related queries such as SHOW BINLOG EVENTS
return false
}
return true
}
// isValidAsCandidateMasterInBinlogServerTopology let's us know whether a given slave is generally
// valid to promote to be master.
func isValidAsCandidateMasterInBinlogServerTopology(slave *Instance) bool {
if !slave.IsLastCheckValid {
// something wrong with this slave right now. We shouldn't hope to be able to promote it
return false
}
if !slave.LogBinEnabled {
return false
}
if slave.LogSlaveUpdatesEnabled {
// That's right: we *disallow* log-slave-updates
return false
}
if slave.IsBinlogServer() {
return false
}
return true
}
func isBannedFromBeingCandidateSlave(slave *Instance) bool {
for _, filter := range config.Config.PromotionIgnoreHostnameFilters {
if matched, _ := regexp.MatchString(filter, slave.Key.Hostname); matched {
return true
}
}
return false
}
// GetCandidateSlave chooses the best slave to promote given a (possibly dead) master
func GetCandidateSlave(masterKey *InstanceKey, forRematchPurposes bool) (*Instance, [](*Instance), [](*Instance), [](*Instance), error) {
var candidateSlave *Instance
aheadSlaves := [](*Instance){}
equalSlaves := [](*Instance){}
laterSlaves := [](*Instance){}
slaves, err := sortedSlaves(masterKey, forRematchPurposes, false)
if err != nil {
return candidateSlave, aheadSlaves, equalSlaves, laterSlaves, err
}
if len(slaves) == 0 {
return candidateSlave, aheadSlaves, equalSlaves, laterSlaves, fmt.Errorf("No slaves found for %+v", *masterKey)
}
for _, slave := range slaves {
slave := slave
if isGenerallyValidAsCandidateSlave(slave) && !isBannedFromBeingCandidateSlave(slave) {
// this is the one
candidateSlave = slave
break
}
}
if candidateSlave == nil {
// Unable to find a candidate, so will not regroup.
// Pick a (single) slave which is not banned.
for _, slave := range slaves {
slave := slave
if !isBannedFromBeingCandidateSlave(slave) {
// this is the one
candidateSlave = slave
break
}
}
if candidateSlave != nil {
slaves = RemoveInstance(slaves, &candidateSlave.Key)
}
return candidateSlave, slaves, equalSlaves, laterSlaves, fmt.Errorf("GetCandidateSlave: no candidate slaves found %+v", *masterKey)
}
slaves = RemoveInstance(slaves, &candidateSlave.Key)
for _, slave := range slaves {
slave := slave
if slave.ExecBinlogCoordinates.SmallerThan(&candidateSlave.ExecBinlogCoordinates) {
laterSlaves = append(laterSlaves, slave)
} else if slave.ExecBinlogCoordinates.Equals(&candidateSlave.ExecBinlogCoordinates) {
equalSlaves = append(equalSlaves, slave)
} else {
aheadSlaves = append(aheadSlaves, slave)
}
}
log.Debugf("sortedSlaves: candidate: %+v, ahead: %d, equal: %d, late: %d", candidateSlave.Key, len(aheadSlaves), len(equalSlaves), len(laterSlaves))
return candidateSlave, aheadSlaves, equalSlaves, laterSlaves, nil
}
// GetCandidateSlaveOfBinlogServerTopology chooses the best slave to promote given a (possibly dead) master
func GetCandidateSlaveOfBinlogServerTopology(masterKey *InstanceKey) (candidateSlave *Instance, err error) {
slaves, err := sortedSlaves(masterKey, false, true)
if err != nil {
return candidateSlave, err
}
if len(slaves) == 0 {
return candidateSlave, fmt.Errorf("No slaves found for %+v", *masterKey)
}
for _, slave := range slaves {
slave := slave
if candidateSlave != nil {
break
}
if isValidAsCandidateMasterInBinlogServerTopology(slave) && !isBannedFromBeingCandidateSlave(slave) {
// this is the one
candidateSlave = slave
}
}
if candidateSlave != nil {
log.Debugf("GetCandidateSlaveOfBinlogServerTopology: returning %+v as candidate slave for %+v", candidateSlave.Key, *masterKey)
} else {
log.Debugf("GetCandidateSlaveOfBinlogServerTopology: no candidate slave found for %+v", *masterKey)
}
return candidateSlave, err
}
// RegroupSlaves will choose a candidate slave of a given instance, and enslave its siblings using
// either simple CHANGE MASTER TO, where possible, or pseudo-gtid
func RegroupSlavesPseudoGTID(masterKey *InstanceKey, returnSlaveEvenOnFailureToRegroup bool, onCandidateSlaveChosen func(*Instance), postponedFunctionsContainer *PostponedFunctionsContainer) ([](*Instance), [](*Instance), [](*Instance), *Instance, error) {
candidateSlave, aheadSlaves, equalSlaves, laterSlaves, err := GetCandidateSlave(masterKey, true)
if err != nil {
if !returnSlaveEvenOnFailureToRegroup {
candidateSlave = nil
}
return aheadSlaves, equalSlaves, laterSlaves, candidateSlave, err
}
if config.Config.PseudoGTIDPattern == "" {
return aheadSlaves, equalSlaves, laterSlaves, candidateSlave, fmt.Errorf("PseudoGTIDPattern not configured; cannot use Pseudo-GTID")
}
if onCandidateSlaveChosen != nil {
onCandidateSlaveChosen(candidateSlave)
}
log.Debugf("RegroupSlaves: working on %d equals slaves", len(equalSlaves))
barrier := make(chan *InstanceKey)
for _, slave := range equalSlaves {
slave := slave
// This slave has the exact same executing coordinates as the candidate slave. This slave
// is *extremely* easy to attach below the candidate slave!
go func() {
defer func() { barrier <- &candidateSlave.Key }()
ExecuteOnTopology(func() {
ChangeMasterTo(&slave.Key, &candidateSlave.Key, &candidateSlave.SelfBinlogCoordinates, false, GTIDHintDeny)
})
}()
}
for range equalSlaves {
<-barrier
}
log.Debugf("RegroupSlaves: multi matching %d later slaves", len(laterSlaves))
// As for the laterSlaves, we'll have to apply pseudo GTID
laterSlaves, instance, err, _ := MultiMatchBelow(laterSlaves, &candidateSlave.Key, true, postponedFunctionsContainer)
operatedSlaves := append(equalSlaves, candidateSlave)
operatedSlaves = append(operatedSlaves, laterSlaves...)
log.Debugf("RegroupSlaves: starting %d slaves", len(operatedSlaves))
barrier = make(chan *InstanceKey)
for _, slave := range operatedSlaves {
slave := slave
go func() {
defer func() { barrier <- &candidateSlave.Key }()
ExecuteOnTopology(func() {
StartSlave(&slave.Key)
})
}()
}
for range operatedSlaves {
<-barrier
}
log.Debugf("RegroupSlaves: done")
AuditOperation("regroup-slaves", masterKey, fmt.Sprintf("regrouped %+v slaves below %+v", len(operatedSlaves), *masterKey))
// aheadSlaves are lost (they were ahead in replication as compared to promoted slave)
return aheadSlaves, equalSlaves, laterSlaves, instance, err
}
func getMostUpToDateActiveBinlogServer(masterKey *InstanceKey) (mostAdvancedBinlogServer *Instance, binlogServerSlaves [](*Instance), err error) {
if binlogServerSlaves, err = ReadBinlogServerSlaveInstances(masterKey); err == nil && len(binlogServerSlaves) > 0 {
// Pick the most advanced binlog sever that is good to go
for _, binlogServer := range binlogServerSlaves {
if binlogServer.IsLastCheckValid {
if mostAdvancedBinlogServer == nil {
mostAdvancedBinlogServer = binlogServer
}
if mostAdvancedBinlogServer.ExecBinlogCoordinates.SmallerThan(&binlogServer.ExecBinlogCoordinates) {
mostAdvancedBinlogServer = binlogServer
}
}
}
}
return mostAdvancedBinlogServer, binlogServerSlaves, err
}
// RegroupSlavesIncludingSubSlavesOfBinlogServers works in a mixed standard/binlog-server topology. This kind of topology shouldn't really exist,
// but life is hard. To transition binlog servers into your topology you live sometimes with this hybrid solution.
func RegroupSlavesPseudoGTIDIncludingSubSlavesOfBinlogServers(masterKey *InstanceKey, returnSlaveEvenOnFailureToRegroup bool, onCandidateSlaveChosen func(*Instance), postponedFunctionsContainer *PostponedFunctionsContainer) ([](*Instance), [](*Instance), [](*Instance), *Instance, error) {
// First, handle binlog server issues:
func() error {
log.Debugf("RegroupSlavesIncludingSubSlavesOfBinlogServers: starting on slaves of %+v", *masterKey)
// Find the most up to date binlog server:
mostUpToDateBinlogServer, binlogServerSlaves, err := getMostUpToDateActiveBinlogServer(masterKey)
if err != nil {
return log.Errore(err)
}
if mostUpToDateBinlogServer == nil {
log.Debugf("RegroupSlavesIncludingSubSlavesOfBinlogServers: no binlog server replicates from %+v", *masterKey)
// No binlog server; proceed as normal
return nil
}
log.Debugf("RegroupSlavesIncludingSubSlavesOfBinlogServers: most up to date binlog server of %+v: %+v", *masterKey, mostUpToDateBinlogServer.Key)
// Find the most up to date candidate slave:
candidateSlave, _, _, _, err := GetCandidateSlave(masterKey, true)
if err != nil {
return log.Errore(err)
}
if candidateSlave == nil {
log.Debugf("RegroupSlavesIncludingSubSlavesOfBinlogServers: no candidate slave for %+v", *masterKey)
// Let the followup code handle that
return nil
}
log.Debugf("RegroupSlavesIncludingSubSlavesOfBinlogServers: candidate slave of %+v: %+v", *masterKey, candidateSlave.Key)
if candidateSlave.ExecBinlogCoordinates.SmallerThan(&mostUpToDateBinlogServer.ExecBinlogCoordinates) {
log.Debugf("RegroupSlavesIncludingSubSlavesOfBinlogServers: candidate slave %+v coordinates smaller than binlog server %+v", candidateSlave.Key, mostUpToDateBinlogServer.Key)
// Need to align under binlog server...
candidateSlave, err = Repoint(&candidateSlave.Key, &mostUpToDateBinlogServer.Key, GTIDHintDeny)
if err != nil {
return log.Errore(err)
}
log.Debugf("RegroupSlavesIncludingSubSlavesOfBinlogServers: repointed candidate slave %+v under binlog server %+v", candidateSlave.Key, mostUpToDateBinlogServer.Key)
candidateSlave, err = StartSlaveUntilMasterCoordinates(&candidateSlave.Key, &mostUpToDateBinlogServer.ExecBinlogCoordinates)
if err != nil {
return log.Errore(err)
}
log.Debugf("RegroupSlavesIncludingSubSlavesOfBinlogServers: aligned candidate slave %+v under binlog server %+v", candidateSlave.Key, mostUpToDateBinlogServer.Key)
// and move back
candidateSlave, err = Repoint(&candidateSlave.Key, masterKey, GTIDHintDeny)
if err != nil {
return log.Errore(err)
}
log.Debugf("RegroupSlavesIncludingSubSlavesOfBinlogServers: repointed candidate slave %+v under master %+v", candidateSlave.Key, *masterKey)
return nil
}
// Either because it _was_ like that, or we _made_ it so,
// candidate slave is as/more up to date than all binlog servers
for _, binlogServer := range binlogServerSlaves {
log.Debugf("RegroupSlavesIncludingSubSlavesOfBinlogServers: matching slaves of binlog server %+v below %+v", binlogServer.Key, candidateSlave.Key)
// Right now sequentially.
// At this point just do what you can, don't return an error
MultiMatchSlaves(&binlogServer.Key, &candidateSlave.Key, "")
log.Debugf("RegroupSlavesIncludingSubSlavesOfBinlogServers: done matching slaves of binlog server %+v below %+v", binlogServer.Key, candidateSlave.Key)
}
log.Debugf("RegroupSlavesIncludingSubSlavesOfBinlogServers: done handling binlog regrouping for %+v; will proceed with normal RegroupSlaves", *masterKey)
AuditOperation("regroup-slaves-including-bls", masterKey, fmt.Sprintf("matched slaves of binlog server slaves of %+v under %+v", *masterKey, candidateSlave.Key))
return nil
}()
// Proceed to normal regroup:
return RegroupSlavesPseudoGTID(masterKey, returnSlaveEvenOnFailureToRegroup, onCandidateSlaveChosen, postponedFunctionsContainer)
}
// RegroupSlavesGTID will choose a candidate slave of a given instance, and enslave its siblings using GTID
func RegroupSlavesGTID(masterKey *InstanceKey, returnSlaveEvenOnFailureToRegroup bool, onCandidateSlaveChosen func(*Instance)) ([](*Instance), [](*Instance), *Instance, error) {
var emptySlaves [](*Instance)
candidateSlave, aheadSlaves, equalSlaves, laterSlaves, err := GetCandidateSlave(masterKey, true)
if err != nil {
if !returnSlaveEvenOnFailureToRegroup {
candidateSlave = nil
}
return emptySlaves, emptySlaves, candidateSlave, err
}
if onCandidateSlaveChosen != nil {
onCandidateSlaveChosen(candidateSlave)
}
slavesToMove := append(equalSlaves, laterSlaves...)
log.Debugf("RegroupSlavesGTID: working on %d slaves", len(slavesToMove))
movedSlaves, unmovedSlaves, err, _ := moveSlavesViaGTID(slavesToMove, candidateSlave)
if err != nil {
log.Errore(err)
}
unmovedSlaves = append(unmovedSlaves, aheadSlaves...)
StartSlave(&candidateSlave.Key)
log.Debugf("RegroupSlavesGTID: done")
AuditOperation("regroup-slaves-gtid", masterKey, fmt.Sprintf("regrouped slaves of %+v via GTID; promoted %+v", *masterKey, candidateSlave.Key))
return unmovedSlaves, movedSlaves, candidateSlave, err
}
// RegroupSlavesBinlogServers works on a binlog-servers topology. It picks the most up-to-date BLS and repoints all other
// BLS below it
func RegroupSlavesBinlogServers(masterKey *InstanceKey, returnSlaveEvenOnFailureToRegroup bool) (repointedBinlogServers [](*Instance), promotedBinlogServer *Instance, err error) {
var binlogServerSlaves [](*Instance)
promotedBinlogServer, binlogServerSlaves, err = getMostUpToDateActiveBinlogServer(masterKey)
resultOnError := func(err error) ([](*Instance), *Instance, error) {
if !returnSlaveEvenOnFailureToRegroup {
promotedBinlogServer = nil
}
return repointedBinlogServers, promotedBinlogServer, err
}
if err != nil {
return resultOnError(err)
}
repointedBinlogServers, err, _ = RepointTo(binlogServerSlaves, &promotedBinlogServer.Key)
if err != nil {
return resultOnError(err)
}
AuditOperation("regroup-slaves-bls", masterKey, fmt.Sprintf("regrouped binlog server slaves of %+v; promoted %+v", *masterKey, promotedBinlogServer.Key))
return repointedBinlogServers, promotedBinlogServer, nil
}
// RegroupSlaves is a "smart" method of promoting one slave over the others ("promoting" it on top of its siblings)
// This method decides which strategy to use: GTID, Pseudo-GTID, Binlog Servers.
func RegroupSlaves(masterKey *InstanceKey, returnSlaveEvenOnFailureToRegroup bool,
onCandidateSlaveChosen func(*Instance),
postponedFunctionsContainer *PostponedFunctionsContainer) (
aheadSlaves [](*Instance), equalSlaves [](*Instance), laterSlaves [](*Instance), instance *Instance, err error) {
//
var emptySlaves [](*Instance)
slaves, err := ReadSlaveInstances(masterKey)
if err != nil {
return emptySlaves, emptySlaves, emptySlaves, instance, err
}
if len(slaves) == 0 {
return emptySlaves, emptySlaves, emptySlaves, instance, err
}
if len(slaves) == 1 {
return emptySlaves, emptySlaves, emptySlaves, slaves[0], err
}
allGTID := true
allBinlogServers := true
allPseudoGTID := true
for _, slave := range slaves {
if !slave.UsingGTID() {
allGTID = false
}
if !slave.IsBinlogServer() {
allBinlogServers = false
}
if !slave.UsingPseudoGTID {
allPseudoGTID = false
}
}
if allGTID {
log.Debugf("RegroupSlaves: using GTID to regroup slaves of %+v", *masterKey)
unmovedSlaves, movedSlaves, candidateSlave, err := RegroupSlavesGTID(masterKey, returnSlaveEvenOnFailureToRegroup, onCandidateSlaveChosen)
return unmovedSlaves, emptySlaves, movedSlaves, candidateSlave, err
}
if allBinlogServers {
log.Debugf("RegroupSlaves: using binlog servers to regroup slaves of %+v", *masterKey)
movedSlaves, candidateSlave, err := RegroupSlavesBinlogServers(masterKey, returnSlaveEvenOnFailureToRegroup)
return emptySlaves, emptySlaves, movedSlaves, candidateSlave, err
}
if allPseudoGTID {
log.Debugf("RegroupSlaves: using Pseudo-GTID to regroup slaves of %+v", *masterKey)
return RegroupSlavesPseudoGTID(masterKey, returnSlaveEvenOnFailureToRegroup, onCandidateSlaveChosen, postponedFunctionsContainer)
}
// And, as last resort, we do PseudoGTID & binlog servers
log.Warningf("RegroupSlaves: unsure what method to invoke for %+v; trying Pseudo-GTID+Binlog Servers", *masterKey)
return RegroupSlavesPseudoGTIDIncludingSubSlavesOfBinlogServers(masterKey, returnSlaveEvenOnFailureToRegroup, onCandidateSlaveChosen, postponedFunctionsContainer)
}
// relocateBelowInternal is a protentially recursive function which chooses how to relocate an instance below another.
// It may choose to use Pseudo-GTID, or normal binlog positions, or take advantage of binlog servers,
// or it may combine any of the above in a multi-step operation.
func relocateBelowInternal(instance, other *Instance) (*Instance, error) {
if canReplicate, err := instance.CanReplicateFrom(other); !canReplicate {
return instance, log.Errorf("%+v cannot replicate from %+v. Reason: %+v", instance.Key, other.Key, err)
}
// simplest:
if InstanceIsMasterOf(other, instance) {
// already the desired setup.
return Repoint(&instance.Key, &other.Key, GTIDHintNeutral)
}
// Do we have record of equivalent coordinates?
if !instance.IsBinlogServer() {
if movedInstance, err := MoveEquivalent(&instance.Key, &other.Key); err == nil {
return movedInstance, nil
}
}
// Try and take advantage of binlog servers:
if InstancesAreSiblings(instance, other) && other.IsBinlogServer() {
return MoveBelow(&instance.Key, &other.Key)
}
instanceMaster, found, err := ReadInstance(&instance.MasterKey)
if err != nil || !found {
return instance, err
}
if instanceMaster.MasterKey.Equals(&other.Key) && instanceMaster.IsBinlogServer() {
// Moving to grandparent via binlog server
return Repoint(&instance.Key, &instanceMaster.MasterKey, GTIDHintDeny)
}
if other.IsBinlogServer() {
if instanceMaster.IsBinlogServer() && InstancesAreSiblings(instanceMaster, other) {
// Special case: this is a binlog server family; we move under the uncle, in one single step
return Repoint(&instance.Key, &other.Key, GTIDHintDeny)
}
// Relocate to its master, then repoint to the binlog server
otherMaster, found, err := ReadInstance(&other.MasterKey)
if err != nil || !found {
return instance, err
}
if !other.IsLastCheckValid {
return instance, log.Errorf("Binlog server %+v is not reachable. It would take two steps to relocate %+v below it, and I won't even do the first step.", other.Key, instance.Key)
}
log.Debugf("Relocating to a binlog server; will first attempt to relocate to the binlog server's master: %+v, and then repoint down", otherMaster.Key)
if _, err := relocateBelowInternal(instance, otherMaster); err != nil {
return instance, err
}
return Repoint(&instance.Key, &other.Key, GTIDHintDeny)
}
if instance.IsBinlogServer() {
// Can only move within the binlog-server family tree
// And these have been covered just now: move up from a master binlog server, move below a binling binlog server.
// sure, the family can be more complex, but we keep these operations atomic
return nil, log.Errorf("Relocating binlog server %+v below %+v turns to be too complex; please do it manually", instance.Key, other.Key)
}
// Next, try GTID
if _, _, canMove := canMoveViaGTID(instance, other); canMove {
return moveInstanceBelowViaGTID(instance, other)
}
// Next, try Pseudo-GTID
if instance.UsingPseudoGTID && other.UsingPseudoGTID {
// We prefer PseudoGTID to anything else because, while it takes longer to run, it does not issue
// a STOP SLAVE on any server other than "instance" itself.
instance, _, err := MatchBelow(&instance.Key, &other.Key, true)
return instance, err
}
// No Pseudo-GTID; cehck simple binlog file/pos operations:
if InstancesAreSiblings(instance, other) {
return MoveBelow(&instance.Key, &other.Key)
}
// See if we need to MoveUp
if instanceMaster.MasterKey.Equals(&other.Key) {
// Moving to grandparent
return MoveUp(&instance.Key)
}
if instanceMaster.IsBinlogServer() {
// Break operation into two: move (repoint) up, then continue
if _, err := MoveUp(&instance.Key); err != nil {
return instance, err
}
return relocateBelowInternal(instance, other)
}
// Too complex
return nil, log.Errorf("Relocating %+v below %+v turns to be too complex; please do it manually", instance.Key, other.Key)
}
// RelocateBelow will attempt moving instance indicated by instanceKey below another instance.
// Orchestrator will try and figure out the best way to relocate the server. This could span normal
// binlog-position, pseudo-gtid, repointing, binlog servers...
func RelocateBelow(instanceKey, otherKey *InstanceKey) (*Instance, error) {
instance, found, err := ReadInstance(instanceKey)
if err != nil || !found {
return instance, log.Errorf("Error reading %+v", *instanceKey)
}
other, found, err := ReadInstance(otherKey)
if err != nil || !found {
return instance, log.Errorf("Error reading %+v", *otherKey)
}
instance, err = relocateBelowInternal(instance, other)
if err == nil {
AuditOperation("relocate-below", instanceKey, fmt.Sprintf("relocated %+v below %+v", *instanceKey, *otherKey))
}
return instance, err
}
// relocateSlavesInternal is a protentially recursive function which chooses how to relocate
// slaves of an instance below another.
// It may choose to use Pseudo-GTID, or normal binlog positions, or take advantage of binlog servers,
// or it may combine any of the above in a multi-step operation.
func relocateSlavesInternal(slaves [](*Instance), instance, other *Instance) ([](*Instance), error, []error) {
errs := []error{}
var err error
// simplest:
if instance.Key.Equals(&other.Key) {
// already the desired setup.
return RepointTo(slaves, &other.Key)
}
// Try and take advantage of binlog servers:
if InstanceIsMasterOf(other, instance) && instance.IsBinlogServer() {
// Up from a binlog server
return RepointTo(slaves, &other.Key)
}
if InstanceIsMasterOf(instance, other) && other.IsBinlogServer() {
// Down under a binlog server
return RepointTo(slaves, &other.Key)
}
if InstancesAreSiblings(instance, other) && instance.IsBinlogServer() && other.IsBinlogServer() {
// Between siblings
return RepointTo(slaves, &other.Key)
}
if other.IsBinlogServer() {
// Relocate to binlog server's parent (recursive call), then repoint down
otherMaster, found, err := ReadInstance(&other.MasterKey)
if err != nil || !found {
return nil, err, errs
}
slaves, err, errs = relocateSlavesInternal(slaves, instance, otherMaster)
if err != nil {
return slaves, err, errs
}
return RepointTo(slaves, &other.Key)
}
// GTID
{
movedSlaves, unmovedSlaves, err, errs := moveSlavesViaGTID(slaves, other)
if len(movedSlaves) == len(slaves) {
// Moved (or tried moving) everything via GTID
return movedSlaves, err, errs
} else if len(movedSlaves) > 0 {
// something was moved via GTID; let's try further on
return relocateSlavesInternal(unmovedSlaves, instance, other)
}
// Otherwise nothing was moved via GTID. Maybe we don't have any GTIDs, we continue.
}
// Pseudo GTID
if other.UsingPseudoGTID {
// Which slaves are using Pseudo GTID?
var pseudoGTIDSlaves [](*Instance)
for _, slave := range slaves {
if slave.UsingPseudoGTID {
pseudoGTIDSlaves = append(pseudoGTIDSlaves, slave)
}
}
pseudoGTIDSlaves, _, err, errs = MultiMatchBelow(pseudoGTIDSlaves, &other.Key, false, nil)
return pseudoGTIDSlaves, err, errs
}
// Normal binlog file:pos
if InstanceIsMasterOf(other, instance) {
// moveUpSlaves -- but not supporting "slaves" argument at this time.
}
// Too complex
return nil, log.Errorf("Relocating %+v slaves of %+v below %+v turns to be too complex; please do it manually", len(slaves), instance.Key, other.Key), errs
}
// RelocateSlaves will attempt moving slaves of an instance indicated by instanceKey below another instance.
// Orchestrator will try and figure out the best way to relocate the servers. This could span normal
// binlog-position, pseudo-gtid, repointing, binlog servers...
func RelocateSlaves(instanceKey, otherKey *InstanceKey, pattern string) (slaves [](*Instance), other *Instance, err error, errs []error) {
instance, found, err := ReadInstance(instanceKey)
if err != nil || !found {
return slaves, other, log.Errorf("Error reading %+v", *instanceKey), errs
}
other, found, err = ReadInstance(otherKey)
if err != nil || !found {
return slaves, other, log.Errorf("Error reading %+v", *otherKey), errs
}
slaves, err = ReadSlaveInstances(instanceKey)
if err != nil {
return slaves, other, err, errs
}
slaves = RemoveInstance(slaves, otherKey)
slaves = filterInstancesByPattern(slaves, pattern)
if len(slaves) == 0 {
// Nothing to do
return slaves, other, nil, errs
}
slaves, err, errs = relocateSlavesInternal(slaves, instance, other)
if err == nil {
AuditOperation("relocate-slaves", instanceKey, fmt.Sprintf("relocated %+v slaves of %+v below %+v", len(slaves), *instanceKey, *otherKey))
}
return slaves, other, err, errs
}
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化
1
https://gitee.com/mirrors/orchestrator.git
git@gitee.com:mirrors/orchestrator.git
mirrors
orchestrator
orchestrator
v1.4.550

搜索帮助