Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix dr auto sync #21

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
72 changes: 46 additions & 26 deletions pkg/replication/replication_mode.go
Original file line number Diff line number Diff line change
Expand Up @@ -423,8 +423,8 @@ func (m *ModeManager) tickUpdateState() {
if r.Role != placement.Learner {
totalVoter += r.Count
}
minimalUpPrimary := minimalUpVoters(r, stores[primaryUp], stores[primaryDown])
minimalUpDr := minimalUpVoters(r, stores[drUp], stores[drDown])
minimalUpPrimary := minimalUpVoters(r, stores[primaryUpVoter], stores[primaryDownVoter])
minimalUpDr := minimalUpVoters(r, stores[drUpVoter], stores[drDownVoter])
primaryHasVoter = primaryHasVoter || minimalUpPrimary > 0
drHasVoter = drHasVoter || minimalUpDr > 0
upVoters := minimalUpPrimary + minimalUpDr
Expand All @@ -440,10 +440,11 @@ func (m *ModeManager) tickUpdateState() {
hasMajority := totalUpVoter*2 > totalVoter

log.Debug("replication store status",
zap.Uint64s("up-primary", storeIDs(stores[primaryUp])),
zap.Uint64s("up-dr", storeIDs(stores[drUp])),
zap.Uint64s("down-primary", storeIDs(stores[primaryDown])),
zap.Uint64s("down-dr", storeIDs(stores[drDown])),
zap.Uint64s("up-primary-voter", storeIDs(stores[primaryUpVoter])),
zap.Uint64s("up-dr-voter", storeIDs(stores[drUpVoter])),
zap.Uint64s("down-primary-voter", storeIDs(stores[primaryDownVoter])),
zap.Uint64s("down-dr-voter", storeIDs(stores[drDownVoter])),
zap.Uint64s("up-primary-learner", storeIDs(stores[primaryUpLearner])),
zap.Bool("can-sync", canSync),
zap.Bool("has-majority", hasMajority),
)
Expand All @@ -465,36 +466,36 @@ func (m *ModeManager) tickUpdateState() {
+------------+ +------------+

*/

primaryUpStores := append(stores[primaryUpVoter], stores[primaryUpLearner]...)
switch m.drGetState() {
case drStateSync:
// If hasMajority is false, the cluster is always unavailable. Switch to async won't help.
if !canSync && hasMajority {
m.drSwitchToAsyncWait(storeIDs(stores[primaryUp]))
m.drSwitchToAsyncWait(storeIDs(primaryUpStores))
}
case drStateAsyncWait:
if canSync {
m.drSwitchToSync()
break
}
if oldAvailableStores := m.drGetAvailableStores(); !reflect.DeepEqual(oldAvailableStores, storeIDs(stores[primaryUp])) {
m.drSwitchToAsyncWait(storeIDs(stores[primaryUp]))
if oldAvailableStores := m.drGetAvailableStores(); !reflect.DeepEqual(oldAvailableStores, storeIDs(primaryUpStores)) {
m.drSwitchToAsyncWait(storeIDs(primaryUpStores))
break
}
if m.drCheckStoreStateUpdated(storeIDs(stores[primaryUp])) {
m.drSwitchToAsync(storeIDs(stores[primaryUp]))
if m.drCheckStoreStateUpdated(storeIDs(primaryUpStores)) {
m.drSwitchToAsync(storeIDs(primaryUpStores))
}
case drStateAsync:
if canSync {
m.drSwitchToSyncRecover()
break
}
if !reflect.DeepEqual(m.drGetAvailableStores(), stores[primaryUp]) && m.drCheckStoreStateUpdated(storeIDs(stores[primaryUp])) {
m.drSwitchToAsync(storeIDs(stores[primaryUp]))
if !reflect.DeepEqual(m.drGetAvailableStores(), primaryUpStores) && m.drCheckStoreStateUpdated(storeIDs(primaryUpStores)) {
m.drSwitchToAsync(storeIDs(primaryUpStores))
}
case drStateSyncRecover:
if !canSync && hasMajority {
m.drSwitchToAsync(storeIDs(stores[primaryUp]))
m.drSwitchToAsync(storeIDs(primaryUpStores))
} else {
m.updateProgress()
progress := m.estimateProgress()
Expand Down Expand Up @@ -562,10 +563,11 @@ func (m *ModeManager) tickReplicateStatus() {
}

const (
primaryUp = iota
primaryDown
drUp
drDown
primaryUpVoter = iota
primaryDownVoter
drUpVoter
drDownVoter
primaryUpLearner
storeStatusTypeCount
)

Expand All @@ -577,24 +579,28 @@ func (m *ModeManager) checkStoreStatus() [][]*core.StoreInfo {
if s.IsRemoved() {
continue
}
down := s.DownTime() >= m.config.DRAutoSync.WaitStoreTimeout.Duration
labelValue := s.GetLabelValue(m.config.DRAutoSync.LabelKey)
// learner peers do not participate in major commit or vote, so it should not count in primary/dr as a normal store.
if s.GetRegionCount() == s.GetLearnerCount() {
if s.GetRegionCount() == s.GetLearnerCount() && m.checkLearnerStore(s) {
if labelValue == m.config.DRAutoSync.Primary && !down {
stores[primaryUpLearner] = append(stores[primaryUpLearner], s)
}
continue
}
down := s.DownTime() >= m.config.DRAutoSync.WaitStoreTimeout.Duration
labelValue := s.GetLabelValue(m.config.DRAutoSync.LabelKey)

if labelValue == m.config.DRAutoSync.Primary {
if down {
stores[primaryDown] = append(stores[primaryDown], s)
stores[primaryDownVoter] = append(stores[primaryDownVoter], s)
} else {
stores[primaryUp] = append(stores[primaryUp], s)
stores[primaryUpVoter] = append(stores[primaryUpVoter], s)
}
}
if labelValue == m.config.DRAutoSync.DR {
if down {
stores[drDown] = append(stores[drDown], s)
stores[drDownVoter] = append(stores[drDownVoter], s)
} else {
stores[drUp] = append(stores[drUp], s)
stores[drUpVoter] = append(stores[drUpVoter], s)
}
}
}
Expand All @@ -604,6 +610,20 @@ func (m *ModeManager) checkStoreStatus() [][]*core.StoreInfo {
return stores
}

// checkLearnerStore will check whether all peers of the store should be learner by placement rules.
func (m *ModeManager) checkLearnerStore(store *core.StoreInfo) bool {
for _, r := range m.cluster.GetRuleManager().GetAllRules() {
if len(r.StartKey) > 0 || len(r.EndKey) > 0 {
// All rules should be global rules. If not, skip it.
continue
}
if r.Role == placement.Learner && placement.MatchLabelConstraints(store, r.LabelConstraints) {
return true
}
}
return false
}

// UpdateStoreDRStatus saves the dr-autosync status of a store.
func (m *ModeManager) UpdateStoreDRStatus(id uint64, status *pb.StoreDRAutoSyncStatus) {
m.drStoreStatus.Store(id, status)
Expand Down
56 changes: 55 additions & 1 deletion pkg/replication/replication_mode_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,8 @@ func TestStateSwitch(t *testing.T) {
cluster.GetRuleManager().SetAllGroupBundles(
genPlacementRuleConfig([]ruleConfig{
{key: "zone", value: "zone1", role: placement.Voter, count: 4},
{key: "zone", value: "zone2", role: placement.Voter, count: 2},
{key: "zone", value: "zone2", role: placement.Voter, count: 1},
{key: "zone", value: "zone2", role: placement.Learner, count: 1},
}), true)

cluster.AddLabelsStore(1, 1, map[string]string{"zone": "zone1"})
Expand Down Expand Up @@ -697,6 +698,59 @@ func TestComplexPlacementRules2(t *testing.T) {
re.Equal(drStateAsyncWait, rep.drGetState())
}

func TestLearnerCheck(t *testing.T) {
re := require.New(t)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
store := storage.NewStorageWithMemoryBackend()
conf := config.ReplicationModeConfig{ReplicationMode: modeDRAutoSync, DRAutoSync: config.DRAutoSyncReplicationConfig{
LabelKey: "zone",
Primary: "zone1",
DR: "zone2",
WaitStoreTimeout: typeutil.Duration{Duration: time.Minute},
}}
cluster := mockcluster.NewCluster(ctx, mockconfig.NewTestOptions())
replicator := newMockReplicator([]uint64{1})
rep, err := NewReplicationModeManager(conf, store, cluster, replicator)
re.NoError(err)
cluster.GetRuleManager().SetAllGroupBundles(
genPlacementRuleConfig([]ruleConfig{
{key: "zone", value: "zone1", role: placement.Voter, count: 3},
{key: "zone", value: "zone2", role: placement.Voter, count: 2},
{key: "zone", value: "zone2", role: placement.Learner, count: 1},
}), true)

cluster.AddLabelsStore(1, 1, map[string]string{"zone": "zone1"})
cluster.AddLabelsStore(2, 1, map[string]string{"zone": "zone1"})
cluster.AddLabelsStore(3, 1, map[string]string{"zone": "zone1"})
cluster.AddLabelsStore(4, 1, map[string]string{"zone": "zone1"})
cluster.AddLabersStoreWithLearnerCount(4, 1, 1, map[string]string{"zone": "zone1"})

// initial state is sync
re.Equal(drStateSync, rep.drGetState())
stateID := rep.drAutoSync.StateID
re.NotEqual(uint64(0), stateID)
rep.tickReplicateStatus()
re.Equal(fmt.Sprintf(`{"state":"sync","state_id":%d}`, stateID), replicator.lastData[1])
assertStateIDUpdate := func() {
re.NotEqual(stateID, rep.drAutoSync.StateID)
stateID = rep.drAutoSync.StateID
}
syncStoreStatus := func(storeIDs ...uint64) {
state := rep.GetReplicationStatus()
for _, s := range storeIDs {
rep.UpdateStoreDRStatus(s, &pb.StoreDRAutoSyncStatus{State: state.GetDrAutoSync().State, StateId: state.GetDrAutoSync().GetStateId()})
}
}

// although the all peers in store 4 are learner, but it still be available.
syncStoreStatus(1, 2, 3, 4)
rep.tickUpdateState()
assertStateIDUpdate()
rep.tickReplicateStatus()
re.Equal(fmt.Sprintf(`{"state":"async_wait","state_id":%d,"available_stores":[1,2,3,4]}`, stateID), replicator.lastData[1])
}

func genRegions(cluster *mockcluster.Cluster, stateID uint64, n int) []*core.RegionInfo {
var regions []*core.RegionInfo
for i := 1; i <= n; i++ {
Expand Down
Loading