diff --git a/redis/redis-checker/src/main/java/com/ctrip/xpipe/redis/checker/model/DcClusterShardKeeper.java b/redis/redis-checker/src/main/java/com/ctrip/xpipe/redis/checker/model/DcClusterShardKeeper.java index d4804bebc..2e246e484 100644 --- a/redis/redis-checker/src/main/java/com/ctrip/xpipe/redis/checker/model/DcClusterShardKeeper.java +++ b/redis/redis-checker/src/main/java/com/ctrip/xpipe/redis/checker/model/DcClusterShardKeeper.java @@ -67,7 +67,7 @@ public boolean equals(Object o) { @Override public int hashCode() { - return Objects.hash(super.hashCode(), port); + return Objects.hash(super.hashCode(), active); } @Override diff --git a/redis/redis-checker/src/main/java/com/ctrip/xpipe/redis/checker/model/KeeperContainerUsedInfoModel.java b/redis/redis-checker/src/main/java/com/ctrip/xpipe/redis/checker/model/KeeperContainerUsedInfoModel.java index efffe46e0..b2fb6cef6 100644 --- a/redis/redis-checker/src/main/java/com/ctrip/xpipe/redis/checker/model/KeeperContainerUsedInfoModel.java +++ b/redis/redis-checker/src/main/java/com/ctrip/xpipe/redis/checker/model/KeeperContainerUsedInfoModel.java @@ -12,6 +12,10 @@ public class KeeperContainerUsedInfoModel { private String org; + private String az; + + private Date updateTime; + private long activeInputFlow; private long totalInputFlow; @@ -54,6 +58,7 @@ public KeeperContainerUsedInfoModel(KeeperContainerUsedInfoModel model, Map.Entr this.keeperIp = model.getKeeperIp(); this.dcName = model.getDcName(); this.org = model.getOrg(); + this.az = model.getAz(); this.activeInputFlow = model.getActiveInputFlow() + dcClusterShard.getValue().getInputFlow(); this.totalInputFlow = model.getTotalInputFlow() + dcClusterShard.getValue().getInputFlow(); this.inputFlowStandard = model.getInputFlowStandard(); @@ -76,6 +81,8 @@ public static KeeperContainerUsedInfoModel cloneKeeperContainerUsedInfoModel(Kee newModel.setKeeperIp(model.getKeeperIp()); newModel.setDcName(model.getDcName()); newModel.setOrg(model.getOrg()); + newModel.setAz(model.getAz()); + newModel.setUpdateTime(model.getUpdateTime()); newModel.setActiveInputFlow(model.getActiveInputFlow()); newModel.setTotalInputFlow(model.getTotalInputFlow()); newModel.setInputFlowStandard(model.getInputFlowStandard()); @@ -128,8 +135,27 @@ public String getOrg() { return org; } - public void setOrg(String org) { + public KeeperContainerUsedInfoModel setOrg(String org) { this.org = org; + return this; + } + + public String getAz() { + return az; + } + + public KeeperContainerUsedInfoModel setAz(String az) { + this.az = az; + return this; + } + + public Date getUpdateTime() { + return updateTime; + } + + public KeeperContainerUsedInfoModel setUpdateTime(Date updateTime) { + this.updateTime = updateTime; + return this; } public long getActiveInputFlow() { @@ -262,6 +288,8 @@ public String toString() { "keeperIp='" + keeperIp + '\'' + ", dcName='" + dcName + '\'' + ", org='" + org + '\'' + + ", az='" + az + '\'' + + ", updateTime=" + updateTime + ", activeInputFlow=" + activeInputFlow + ", totalInputFlow=" + totalInputFlow + ", inputFlowStandard=" + inputFlowStandard + diff --git a/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/controller/api/KeeperContainerController.java b/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/controller/api/KeeperContainerController.java index 88c4c6084..75253c9d6 100644 --- a/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/controller/api/KeeperContainerController.java +++ b/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/controller/api/KeeperContainerController.java @@ -14,6 +14,7 @@ import org.springframework.web.bind.annotation.RequestMethod; import org.springframework.web.bind.annotation.RestController; +import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; diff --git a/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/controller/consoleportal/KeeperContainerInfoController.java b/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/controller/consoleportal/KeeperContainerInfoController.java index 29c0aa910..7c4d8e2ed 100644 --- a/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/controller/consoleportal/KeeperContainerInfoController.java +++ b/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/controller/consoleportal/KeeperContainerInfoController.java @@ -11,6 +11,8 @@ import org.springframework.beans.factory.annotation.Autowired; import org.springframework.web.bind.annotation.*; +import java.util.ArrayList; +import java.util.Date; import java.util.List; @RestController @@ -86,7 +88,7 @@ public List getOverloadKeeperContainerMigra public RetMessage beginToMigrateOverloadKeeperContainers(@RequestBody List keeperContainerDetailModels) { logger.info("begin to migrate over load keeper containers {}", keeperContainerDetailModels); try { - keeperContainerMigrationService.beginMigrateKeeperContainers(keeperContainerDetailModels); +// keeperContainerMigrationService.beginMigrateKeeperContainers(keeperContainerDetailModels); } catch (Throwable th) { logger.warn("migrate over load keeper containers {} fail by {}", keeperContainerDetailModels, th.getMessage()); return RetMessage.createFailMessage(th.getMessage()); diff --git a/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/keeper/KeeperContainerOverloadCause.java b/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/keeper/KeeperContainerOverloadCause.java index cadc3e120..2b4c496e1 100644 --- a/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/keeper/KeeperContainerOverloadCause.java +++ b/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/keeper/KeeperContainerOverloadCause.java @@ -8,7 +8,9 @@ public enum KeeperContainerOverloadCause { BOTH, KEEPER_PAIR_PEER_DATA_OVERLOAD, KEEPER_PAIR_INPUT_FLOW_OVERLOAD, - KEEPER_PAIR_BOTH; + KEEPER_PAIR_BOTH, + RESOURCE_LACK, + PAIR_RESOURCE_LACK,; public static KeeperContainerOverloadCause findByValue(String value) { if(StringUtil.isEmpty(value)) return null; diff --git a/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/keeper/entity/IPPairData.java b/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/keeper/entity/IPPairData.java index 7bbdee4f3..7884d4226 100644 --- a/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/keeper/entity/IPPairData.java +++ b/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/keeper/entity/IPPairData.java @@ -9,7 +9,6 @@ public class IPPairData { private long inputFlow; private long peerData; - private final Map keeperUsedInfoMap = new HashMap<>(); public IPPairData() { } @@ -17,13 +16,11 @@ public IPPairData() { public void removeDcClusterShard(Map.Entry migrateDcClusterShard) { this.inputFlow -= migrateDcClusterShard.getValue().getInputFlow(); this.peerData -= migrateDcClusterShard.getValue().getPeerData(); - keeperUsedInfoMap.remove(migrateDcClusterShard.getKey()); } public void addDcClusterShard(Map.Entry migrateDcClusterShard) { this.inputFlow += migrateDcClusterShard.getValue().getInputFlow(); this.peerData += migrateDcClusterShard.getValue().getPeerData(); - keeperUsedInfoMap.put(migrateDcClusterShard.getKey(), migrateDcClusterShard.getValue()); } public long getInputFlow() { @@ -34,8 +31,4 @@ public long getPeerData() { return peerData; } - public Map getKeeperUsedInfoMap() { - return keeperUsedInfoMap; - } - } diff --git a/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/keeper/handler/KeeperContainerFilterChain.java b/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/keeper/handler/KeeperContainerFilterChain.java index 9d8fb003f..2229b50e8 100644 --- a/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/keeper/handler/KeeperContainerFilterChain.java +++ b/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/keeper/handler/KeeperContainerFilterChain.java @@ -3,6 +3,7 @@ import com.ctrip.xpipe.redis.checker.model.DcClusterShardKeeper; import com.ctrip.xpipe.redis.checker.model.KeeperContainerUsedInfoModel; import com.ctrip.xpipe.redis.console.config.ConsoleConfig; +import com.ctrip.xpipe.redis.console.keeper.entity.IPPairData; import com.ctrip.xpipe.redis.console.keeper.util.KeeperContainerUsedInfoAnalyzerContext; import com.ctrip.xpipe.utils.VisibleForTesting; import org.springframework.beans.factory.annotation.Autowired; @@ -16,30 +17,46 @@ public class KeeperContainerFilterChain { @Autowired private ConsoleConfig config; - public boolean doKeeperContainerFilter(KeeperContainerUsedInfoModel targetContainer){ + public boolean isKeeperContainerUseful(KeeperContainerUsedInfoModel targetContainer){ Handler handler = new HostActiveHandler(); handler.setNextHandler(new HostDiskOverloadHandler(config)) .setNextHandler(new KeeperContainerOverloadHandler()); return handler.handle(targetContainer); } - public boolean doKeeperFilter(Map.Entry keeperUsedInfoEntry, - KeeperContainerUsedInfoModel srcKeeperContainer, - KeeperContainerUsedInfoModel targetKeeperContainer, - KeeperContainerUsedInfoAnalyzerContext analyzerUtil){ + public boolean canMigrate(Map.Entry keeperUsedInfoEntry, + KeeperContainerUsedInfoModel srcKeeperContainerPair, + KeeperContainerUsedInfoModel targetKeeperContainer, + KeeperContainerUsedInfoAnalyzerContext analyzerUtil){ Handler> handler = new KeeperDataOverloadHandler(targetKeeperContainer); - handler.setNextHandler(new KeeperPairOverloadHandler(analyzerUtil, srcKeeperContainer, targetKeeperContainer, config)); + handler.setNextHandler(new KeeperPairOverloadHandler(analyzerUtil, srcKeeperContainerPair, targetKeeperContainer, config)); return handler.handle(keeperUsedInfoEntry); } - public boolean doKeeperPairFilter(Map.Entry keeperUsedInfoEntry, - KeeperContainerUsedInfoModel keeperContainer1, - KeeperContainerUsedInfoModel keeperContainer2, - KeeperContainerUsedInfoAnalyzerContext analyzerUtil) { - return new KeeperPairOverloadHandler(analyzerUtil, keeperContainer1, keeperContainer2, config) + public boolean isKeeperPairOverload(Map.Entry keeperUsedInfoEntry, + KeeperContainerUsedInfoModel keeperContainer1, + KeeperContainerUsedInfoModel keeperContainer2, + KeeperContainerUsedInfoAnalyzerContext analyzerUtil) { + if (keeperContainer1.getKeeperIp().equals(keeperContainer2.getKeeperIp())) { + return true; + } + return !new KeeperPairOverloadHandler(analyzerUtil, keeperContainer1, keeperContainer2, config) .handle(keeperUsedInfoEntry); } + public boolean isKeeperContainerPairOverload(KeeperContainerUsedInfoModel keeperContainer1, + KeeperContainerUsedInfoModel keeperContainer2, + IPPairData ipPairData) { + if (keeperContainer1.getKeeperIp().equals(keeperContainer2.getKeeperIp())) { + return true; + } + return new KeeperContainerPairOverloadHandler(keeperContainer1, keeperContainer2, config, ipPairData).handle(null); + } + + public boolean isDataOverLoad(KeeperContainerUsedInfoModel model) { + return !new KeeperContainerOverloadHandler().handle(model); + } + @VisibleForTesting public void setConfig(ConsoleConfig config){ this.config = config; diff --git a/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/keeper/handler/KeeperContainerPairOverloadHandler.java b/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/keeper/handler/KeeperContainerPairOverloadHandler.java new file mode 100644 index 000000000..fcfefd3be --- /dev/null +++ b/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/keeper/handler/KeeperContainerPairOverloadHandler.java @@ -0,0 +1,35 @@ +package com.ctrip.xpipe.redis.console.keeper.handler; + +import com.ctrip.xpipe.redis.checker.model.KeeperContainerUsedInfoModel; +import com.ctrip.xpipe.redis.console.config.ConsoleConfig; +import com.ctrip.xpipe.redis.console.keeper.entity.IPPairData; +import com.ctrip.xpipe.redis.console.model.KeeperContainerOverloadStandardModel; + +public class KeeperContainerPairOverloadHandler extends AbstractHandler{ + + private KeeperContainerUsedInfoModel pairA; + + private KeeperContainerUsedInfoModel pairB; + + private ConsoleConfig config; + + private IPPairData ipPairData; + + public KeeperContainerPairOverloadHandler(KeeperContainerUsedInfoModel pairA, KeeperContainerUsedInfoModel pairB, ConsoleConfig config, IPPairData ipPairData) { + this.pairA = pairA; + this.pairB = pairB; + this.config = config; + this.ipPairData = ipPairData; + } + + @Override + protected boolean doNextHandler(Object o) { + double keeperPairOverLoadFactor = config.getKeeperPairOverLoadFactor(); + KeeperContainerOverloadStandardModel minStandardModel = new KeeperContainerOverloadStandardModel() + .setFlowOverload((long) (Math.min(pairB.getInputFlowStandard(), pairA.getInputFlowStandard()) * keeperPairOverLoadFactor)) + .setPeerDataOverload((long) (Math.min(pairB.getRedisUsedMemoryStandard(), pairA.getRedisUsedMemoryStandard()) * keeperPairOverLoadFactor)); + long overloadInputFlow = ipPairData.getInputFlow() - minStandardModel.getFlowOverload(); + long overloadPeerData = ipPairData.getPeerData() - minStandardModel.getPeerDataOverload(); + return overloadInputFlow > 0 || overloadPeerData > 0; + } +} diff --git a/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/keeper/handler/KeeperPairOverloadHandler.java b/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/keeper/handler/KeeperPairOverloadHandler.java index 0c5004f5f..a0bbb39b4 100644 --- a/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/keeper/handler/KeeperPairOverloadHandler.java +++ b/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/keeper/handler/KeeperPairOverloadHandler.java @@ -6,12 +6,13 @@ import com.ctrip.xpipe.redis.console.config.ConsoleConfig; import com.ctrip.xpipe.redis.console.keeper.entity.IPPairData; import com.ctrip.xpipe.redis.console.keeper.util.KeeperContainerUsedInfoAnalyzerContext; +import com.ctrip.xpipe.redis.console.model.KeeperContainerOverloadStandardModel; import java.util.Map; public class KeeperPairOverloadHandler extends AbstractHandler>{ - private KeeperContainerUsedInfoAnalyzerContext analyzerUtil; + private KeeperContainerUsedInfoAnalyzerContext analyzerContext; private KeeperContainerUsedInfoModel keeperContainer1; @@ -20,7 +21,7 @@ public class KeeperPairOverloadHandler extends AbstractHandler keeperUsedInfoEntry) { - IPPairData longLongPair = analyzerUtil.getIPPairData(keeperContainer1.getKeeperIp(), keeperContainer2.getKeeperIp()); - if (longLongPair == null) return true; double keeperPairOverLoadFactor = config.getKeeperPairOverLoadFactor(); - double flowStandard = Math.min(keeperContainer1.getInputFlowStandard(), keeperContainer2.getInputFlowStandard()) * keeperPairOverLoadFactor; - double peerDataStandard = Math.min(keeperContainer1.getRedisUsedMemoryStandard(), keeperContainer2.getRedisUsedMemoryStandard()) * keeperPairOverLoadFactor; - return longLongPair.getInputFlow() + keeperUsedInfoEntry.getValue().getInputFlow() < flowStandard && - longLongPair.getPeerData() + keeperUsedInfoEntry.getValue().getPeerData() < peerDataStandard; + IPPairData longLongPair = analyzerContext.getIPPairData(keeperContainer1.getKeeperIp(), keeperContainer2.getKeeperIp()); + if (longLongPair == null) return true; + KeeperContainerOverloadStandardModel minStandardModel = new KeeperContainerOverloadStandardModel() + .setFlowOverload((long) (Math.min(keeperContainer1.getInputFlowStandard(), keeperContainer2.getInputFlowStandard()) * keeperPairOverLoadFactor)) + .setPeerDataOverload((long) (Math.min(keeperContainer1.getRedisUsedMemoryStandard(), keeperContainer2.getRedisUsedMemoryStandard()) * keeperPairOverLoadFactor)); + + long overloadInputFlow = longLongPair.getInputFlow() + keeperUsedInfoEntry.getValue().getInputFlow() - minStandardModel.getFlowOverload(); + long overloadPeerData = longLongPair.getPeerData() + keeperUsedInfoEntry.getValue().getPeerData() - minStandardModel.getPeerDataOverload(); + return overloadPeerData < 0 && overloadInputFlow < 0; } } diff --git a/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/keeper/impl/DefaultKeeperContainerAvailablePool.java b/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/keeper/impl/DefaultKeeperContainerAvailablePool.java new file mode 100644 index 000000000..212b2a839 --- /dev/null +++ b/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/keeper/impl/DefaultKeeperContainerAvailablePool.java @@ -0,0 +1,4 @@ +package com.ctrip.xpipe.redis.console.keeper.impl; + +public class DefaultKeeperContainerAvailablePool { +} diff --git a/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/keeper/impl/DefaultKeeperContainerMigrationAnalyzer.java b/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/keeper/impl/DefaultKeeperContainerMigrationAnalyzer.java new file mode 100644 index 000000000..42c25c6b9 --- /dev/null +++ b/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/keeper/impl/DefaultKeeperContainerMigrationAnalyzer.java @@ -0,0 +1,207 @@ +package com.ctrip.xpipe.redis.console.keeper.impl; + +import com.ctrip.xpipe.api.foundation.FoundationService; +import com.ctrip.xpipe.monitor.CatEventMonitor; +import com.ctrip.xpipe.redis.checker.model.DcClusterShardKeeper; +import com.ctrip.xpipe.redis.checker.model.KeeperContainerUsedInfoModel; +import com.ctrip.xpipe.redis.checker.model.KeeperContainerUsedInfoModel.*; +import com.ctrip.xpipe.redis.console.config.ConsoleConfig; +import com.ctrip.xpipe.redis.console.keeper.KeeperContainerOverloadCause; +import com.ctrip.xpipe.redis.console.keeper.entity.IPPairData; +import com.ctrip.xpipe.redis.console.keeper.handler.KeeperContainerFilterChain; +import com.ctrip.xpipe.redis.console.keeper.util.DefaultKeeperContainerUsedInfoAnalyzerContext; +import com.ctrip.xpipe.redis.console.keeper.util.KeeperContainerUsedInfoAnalyzerContext; +import com.ctrip.xpipe.redis.console.model.KeeperContainerOverloadStandardModel; +import com.ctrip.xpipe.redis.console.model.MigrationKeeperContainerDetailModel; +import com.ctrip.xpipe.redis.console.service.KeeperContainerAnalyzerService; +import com.ctrip.xpipe.redis.console.service.MetaserverService; +import com.ctrip.xpipe.redis.core.entity.DcMeta; +import com.ctrip.xpipe.redis.core.meta.MetaCache; +import com.ctrip.xpipe.utils.VisibleForTesting; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +import java.util.*; +import java.util.stream.Collectors; + +@Component +public class DefaultKeeperContainerMigrationAnalyzer implements KeeperContainerMigrationAnalyzer{ + + private static final Logger logger = LoggerFactory.getLogger(DefaultKeeperContainerMigrationAnalyzer.class); + + @Autowired + private KeeperContainerAnalyzerService keeperContainerAnalyzerService; + + @Autowired + private KeeperContainerFilterChain filterChain; + + @Autowired + private ConsoleConfig config; + + @Autowired + private MetaCache metaCache; + + private KeeperContainerUsedInfoAnalyzerContext analyzerContext; + + private static final String currentDc = FoundationService.DEFAULT.getDataCenter().toUpperCase(); + + private static final String KEEPER_RESOURCE_LACK = "keeper_resource_lack"; + + private static final String KEEPER_PAIR_RESOURCE_LACK = "keeper_pair_resource_lack"; + + @Override + public List getMigrationPlans(Map modelsMap) { + List models = new ArrayList<>(modelsMap.values()); + keeperContainerAnalyzerService.initStandard(models); + List modelsWithoutResource = new ArrayList<>(); + models.forEach(a -> modelsWithoutResource.add(KeeperContainerUsedInfoModel.cloneKeeperContainerUsedInfoModel(a))); + analyzerContext = new DefaultKeeperContainerUsedInfoAnalyzerContext(filterChain, metaCache.getXpipeMeta().getDcs().get(currentDc)); + analyzerContext.initKeeperPairData(modelsWithoutResource, modelsMap); + analyzerContext.initAvailablePool(modelsWithoutResource); + for (KeeperContainerUsedInfoModel model : modelsWithoutResource) { + generateDataOverLoadMigrationPlans(model, modelsMap); + for (String ip : analyzerContext.getAllPairsIP(model.getKeeperIp())) { + generatePairOverLoadMigrationPlans(model, modelsMap.get(ip)); + } + } + return analyzerContext.getAllMigrationPlans(); + } + + private void generateDataOverLoadMigrationPlans(KeeperContainerUsedInfoModel model, Map modelsMap) { + Object[] cause = getDataOverloadCause(model); + if (cause == null) return; + List> descShards = getDescShards(model.getDetailInfo(), (Boolean) cause[1]); + Iterator> iterator = descShards.iterator(); + while (filterChain.isDataOverLoad(model) && iterator.hasNext()) { + Map.Entry dcClusterShard = iterator.next(); + if (!dcClusterShard.getKey().isActive()) continue; + KeeperContainerUsedInfoModel backUpKeeper = modelsMap.get(analyzerContext.getBackUpKeeperIp(dcClusterShard.getKey())); + if (canSwitchMaster(model, backUpKeeper, dcClusterShard)) { + analyzerContext.addMigrationPlan(model, backUpKeeper, true, false, (String) cause[0], dcClusterShard, null); + continue; + } + KeeperContainerUsedInfoModel bestKeeperContainer = analyzerContext.getBestKeeperContainer(model, dcClusterShard, backUpKeeper, (Boolean) cause[1]); + if (bestKeeperContainer == null) { + break; + } + analyzerContext.addMigrationPlan(model, bestKeeperContainer, false, false, (String) cause[0], dcClusterShard, backUpKeeper); + analyzerContext.recycleKeeperContainer(bestKeeperContainer, (Boolean) cause[1]); + } + if (filterChain.isDataOverLoad(model)) { + logger.warn("[analyzeKeeperContainerUsedInfo] no available space for overload keeperContainer to migrate {}", model); + CatEventMonitor.DEFAULT.logEvent(KEEPER_RESOURCE_LACK, "Dc:" + currentDc + " Org:" + model.getOrg() + " Az:" + model.getAz()); + analyzerContext.addResourceLackPlan(model, null, KeeperContainerOverloadCause.RESOURCE_LACK.name()); + } + } + + private void generatePairOverLoadMigrationPlans(KeeperContainerUsedInfoModel modelA, KeeperContainerUsedInfoModel modelB) { + Object[] cause = getPariOverloadCause(modelA, modelB); + if (cause == null) return; + List> descShards = getDescShards(getAllDetailInfo(modelA, modelB), (Boolean) cause[1]); + Iterator> iterator = descShards.iterator(); + while (filterChain.isKeeperContainerPairOverload(modelA, modelB, analyzerContext.getIPPairData(modelA.getKeeperIp(), modelB.getKeeperIp())) && iterator.hasNext()) { + Map.Entry dcClusterShard = iterator.next(); + if (dcClusterShard.getKey().isActive()) continue; + KeeperContainerUsedInfoModel activeKeeperContainer = dcClusterShard.getValue().getKeeperIP().equals(modelA.getKeeperIp()) ? modelB : modelA; + KeeperContainerUsedInfoModel backUpKeeperContainer = dcClusterShard.getValue().getKeeperIP().equals(modelA.getKeeperIp()) ? modelA : modelB; + KeeperContainerUsedInfoModel bestKeeperContainer = analyzerContext.getBestKeeperContainer(backUpKeeperContainer, dcClusterShard, activeKeeperContainer, (Boolean) cause[1]); + if (bestKeeperContainer == null) { + break; + } + if (!filterChain.isKeeperPairOverload(dcClusterShard, backUpKeeperContainer, bestKeeperContainer, analyzerContext)) { + analyzerContext.addMigrationPlan(backUpKeeperContainer, bestKeeperContainer, false, true, (String) cause[0], dcClusterShard, activeKeeperContainer); + analyzerContext.recycleKeeperContainer(bestKeeperContainer, (Boolean) cause[1]); + } + } + if (filterChain.isKeeperContainerPairOverload(modelA, modelB, analyzerContext.getIPPairData(modelA.getKeeperIp(), modelB.getKeeperIp()))) { + logger.warn("[analyzeKeeperContainerUsedInfo] no available space for overload keeperContainer pair to migrate {} {}", modelA, modelB); + CatEventMonitor.DEFAULT.logEvent(KEEPER_PAIR_RESOURCE_LACK, "Dc:" + currentDc + " OrgA:" + modelA.getOrg() + " AzA:" + modelA.getAz() + " OrgB:" + modelB.getOrg() + " AzB:" + modelB.getAz()); + analyzerContext.addResourceLackPlan(modelA, modelB.getKeeperIp(),KeeperContainerOverloadCause.PAIR_RESOURCE_LACK.name()); + } + } + + private List> getDescShards(Map allDetailInfo, boolean isPeerDataOverload) { + Comparator> comparator = isPeerDataOverload ? + Comparator.comparingLong(e -> e.getValue().getPeerData()) : Comparator.comparingLong(e -> e.getValue().getInputFlow()); + return allDetailInfo.entrySet().stream() + .sorted(comparator.reversed()) + .collect(Collectors.toList()); + } + + private boolean canSwitchMaster(KeeperContainerUsedInfoModel src, KeeperContainerUsedInfoModel backUp, Map.Entry dcClusterShard) { + return backUp != null + && !analyzerContext.isProblemKeeperContainer(backUp.getKeeperIp()) + && (src.getOrg() == null || src.getOrg().equals(backUp.getOrg())) + && (src.getAz() == null || src.getAz().equals(backUp.getAz())) + && filterChain.isKeeperContainerUseful(backUp) + && getPariOverloadCause(src, backUp) == null + && filterChain.canMigrate(dcClusterShard, backUp, backUp, analyzerContext); + } + + private Map getAllDetailInfo(KeeperContainerUsedInfoModel modelA, KeeperContainerUsedInfoModel modelB) { + if (modelA.getDetailInfo() == null) { + return modelB.getDetailInfo(); + } + if (modelB.getDetailInfo() == null) { + return modelA.getDetailInfo(); + } + Map detailInfo = modelA.getDetailInfo(); + detailInfo.putAll(modelB.getDetailInfo()); + return detailInfo; + } + + private Object[] getDataOverloadCause(KeeperContainerUsedInfoModel infoModel) { + long overloadInputFlow = infoModel.getActiveInputFlow() - infoModel.getInputFlowStandard(); + long overloadPeerData = infoModel.getActiveRedisUsedMemory() - infoModel.getRedisUsedMemoryStandard(); + if (overloadInputFlow <= 0 && overloadPeerData <= 0) { + return null; + } else if (overloadPeerData > 0 && overloadInputFlow >= overloadPeerData) { + return new Object[] {KeeperContainerOverloadCause.BOTH.name(), false}; + } else if (overloadInputFlow > 0 && overloadInputFlow < overloadPeerData) { + return new Object[] {KeeperContainerOverloadCause.BOTH.name(), true}; + } else if (overloadInputFlow > 0) { + return new Object[] {KeeperContainerOverloadCause.INPUT_FLOW_OVERLOAD.name(), false}; + } else { + return new Object[] {KeeperContainerOverloadCause.PEER_DATA_OVERLOAD.name(), true}; + } + } + + private Object[] getPariOverloadCause(KeeperContainerUsedInfoModel pairA, KeeperContainerUsedInfoModel pairB) { + double keeperPairOverLoadFactor = config.getKeeperPairOverLoadFactor(); + KeeperContainerOverloadStandardModel minStandardModel = new KeeperContainerOverloadStandardModel() + .setFlowOverload((long) (Math.min(pairB.getInputFlowStandard(), pairA.getInputFlowStandard()) * keeperPairOverLoadFactor)) + .setPeerDataOverload((long) (Math.min(pairB.getRedisUsedMemoryStandard(), pairA.getRedisUsedMemoryStandard()) * keeperPairOverLoadFactor)); + + IPPairData longLongPair = analyzerContext.getIPPairData(pairA.getKeeperIp(), pairB.getKeeperIp()); + long overloadInputFlow = longLongPair.getInputFlow() - minStandardModel.getFlowOverload(); + long overloadPeerData = longLongPair.getPeerData() - minStandardModel.getPeerDataOverload(); + if (overloadInputFlow <= 0 && overloadPeerData <= 0) { + return null; + } else if (overloadPeerData > 0 && overloadInputFlow >= overloadPeerData) { + return new Object[] {KeeperContainerOverloadCause.KEEPER_PAIR_BOTH.name(), false}; + } else if (overloadInputFlow > 0 && overloadInputFlow < overloadPeerData) { + return new Object[] {KeeperContainerOverloadCause.KEEPER_PAIR_BOTH.name(), true}; + } else if (overloadInputFlow > 0) { + return new Object[] {KeeperContainerOverloadCause.KEEPER_PAIR_INPUT_FLOW_OVERLOAD.name(), false}; + } else { + return new Object[] {KeeperContainerOverloadCause.KEEPER_PAIR_PEER_DATA_OVERLOAD.name(), true}; + } + } + + @VisibleForTesting + public void setKeeperContainerAnalyzerService(KeeperContainerAnalyzerService keeperContainerAnalyzerService) { + this.keeperContainerAnalyzerService = keeperContainerAnalyzerService; + } + + @VisibleForTesting + public void setFilterChain(KeeperContainerFilterChain filterChain) { + this.filterChain = filterChain; + } + + @VisibleForTesting + public void setMetaCache(MetaCache metaCache) { + this.metaCache = metaCache; + } +} diff --git a/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/keeper/impl/DefaultKeeperContainerUsedInfoAnalyzer.java b/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/keeper/impl/DefaultKeeperContainerUsedInfoAnalyzer.java index 15423689f..b135594ce 100644 --- a/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/keeper/impl/DefaultKeeperContainerUsedInfoAnalyzer.java +++ b/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/keeper/impl/DefaultKeeperContainerUsedInfoAnalyzer.java @@ -45,9 +45,7 @@ public class DefaultKeeperContainerUsedInfoAnalyzer extends AbstractService impl private ConsoleConfig config; - private KeeperContainerAnalyzerService keeperContainerAnalyzerService; - - private KeeperContainerFilterChain keeperContainerFilterChain; + private KeeperContainerMigrationAnalyzer migrationAnalyzer; @Resource(name = AbstractSpringConfigContext.GLOBAL_EXECUTOR) private Executor executors; @@ -60,24 +58,51 @@ public class DefaultKeeperContainerUsedInfoAnalyzer extends AbstractService impl private long currentDcMaxKeeperContainerActiveRedisUsedMemory; - private PriorityQueue minInputFlowKeeperContainers; - - private PriorityQueue minPeerDataKeeperContainers; - - private KeeperContainerUsedInfoAnalyzerContext analyzerContext = new DefaultKeeperContainerUsedInfoAnalyzerContext(); - private static final String currentDc = FoundationService.DEFAULT.getDataCenter().toUpperCase(); - private static final String KEEPER_RESOURCE_LACK = "keeper_resource_lack"; - public DefaultKeeperContainerUsedInfoAnalyzer() {} public DefaultKeeperContainerUsedInfoAnalyzer(ConsoleConfig config, - KeeperContainerAnalyzerService keeperContainerAnalyzerService, - KeeperContainerFilterChain keeperContainerFilterChain) { + KeeperContainerMigrationAnalyzer migrationAnalyzer) { this.config = config; - this.keeperContainerAnalyzerService = keeperContainerAnalyzerService; - this.keeperContainerFilterChain = keeperContainerFilterChain; + this.migrationAnalyzer = migrationAnalyzer; + } + + @Override + public synchronized void updateKeeperContainerUsedInfo(int index, List keeperContainerUsedInfoModels) { + if (keeperContainerUsedInfoModels != null && !keeperContainerUsedInfoModels.isEmpty()){ + keeperContainerUsedInfoModelIndexMap.put(index, new Pair<>(keeperContainerUsedInfoModels, new Date())); + } + removeExpireData(); + logger.info("[analyzeKeeperContainerUsedInfo] current index {}", keeperContainerUsedInfoModelIndexMap.keySet()); + if (keeperContainerUsedInfoModelIndexMap.size() != config.getClusterDividedParts()) return; + + currentDcAllKeeperContainerUsedInfoModelMap.clear(); + keeperContainerUsedInfoModelIndexMap.values().forEach(list -> list.getKey().forEach(infoModel -> currentDcAllKeeperContainerUsedInfoModelMap.put(infoModel.getKeeperIp(), + infoModel.setUpdateTime(new Date(System.currentTimeMillis() + 8 * 60 * 60 * 1000))))); + + keeperContainerUsedInfoModelIndexMap.clear(); + logger.info("[analyzeKeeperContainerUsedInfo] start analyze allKeeperContainerUsedInfoModelsList"); + if (currentDcAllKeeperContainerUsedInfoModelMap.isEmpty()) return; + executors.execute(new AbstractExceptionLogTask() { + @Override + protected void doRun() throws Exception { + TransactionMonitor transaction = TransactionMonitor.DEFAULT; + transaction.logTransactionSwallowException("keeperContainer.analyze", currentDc, new Task() { + @Override + public void go() throws Exception { + currentDcKeeperContainerMigrationResult = migrationAnalyzer.getMigrationPlans(currentDcAllKeeperContainerUsedInfoModelMap); + } + + @Override + public Map getData() { + Map transactionData = new HashMap<>(); + transactionData.put("keeperContainerSize", currentDcAllKeeperContainerUsedInfoModelMap.size()); + return transactionData; + } + }); + } + }); } @Override @@ -116,12 +141,21 @@ public List getCurrentDcMaxKeeperContainerFullSynchronizationTime() { List result = new ArrayList<>(); double keeperContainerIoRate = config.getKeeperContainerIoRate(); if (!currentDcAllKeeperContainerUsedInfoModelMap.isEmpty()) { - currentDcMaxKeeperContainerActiveRedisUsedMemory = analyzerContext.getMaxActiveRedisUsedMemory(currentDcAllKeeperContainerUsedInfoModelMap); + currentDcMaxKeeperContainerActiveRedisUsedMemory = getMaxActiveRedisUsedMemory(currentDcAllKeeperContainerUsedInfoModelMap); } result.add((int) (currentDcMaxKeeperContainerActiveRedisUsedMemory /1024/1024/keeperContainerIoRate/60)); return result; } + private long getMaxActiveRedisUsedMemory(Map usedInfo) { + long max = 0; + for (KeeperContainerUsedInfoModel usedInfoModel : usedInfo.values()) { + max = Math.max(max, usedInfoModel.getActiveRedisUsedMemory()); + } + return max; + } + + public List getAllDcResult(Supplier> localDcResultSupplier, AbstractGetAllDcCommand> command, List result) { logger.info("[getAllDcResult] {} start", command.getName()); ParallelCommandChain commandChain = new ParallelCommandChain(MoreExecutors.directExecutor(), false); @@ -150,42 +184,6 @@ public List getAllDcResult(Supplier> localDcResultSupplier, Abstr return result; } - @Override - public synchronized void updateKeeperContainerUsedInfo(int index, List keeperContainerUsedInfoModels) { - if (keeperContainerUsedInfoModels != null && !keeperContainerUsedInfoModels.isEmpty()){ - keeperContainerUsedInfoModelIndexMap.put(index, new Pair<>(keeperContainerUsedInfoModels, new Date())); - } - removeExpireData(); - logger.info("[analyzeKeeperContainerUsedInfo] current index {}", keeperContainerUsedInfoModelIndexMap.keySet()); - if (keeperContainerUsedInfoModelIndexMap.size() != config.getClusterDividedParts()) return; - - currentDcAllKeeperContainerUsedInfoModelMap.clear(); - keeperContainerUsedInfoModelIndexMap.values().forEach(list -> list.getKey().forEach(infoModel -> currentDcAllKeeperContainerUsedInfoModelMap.put(infoModel.getKeeperIp(), infoModel))); - - keeperContainerUsedInfoModelIndexMap.clear(); - logger.info("[analyzeKeeperContainerUsedInfo] start analyze allKeeperContainerUsedInfoModelsList"); - if (currentDcAllKeeperContainerUsedInfoModelMap.isEmpty()) return; - executors.execute(new AbstractExceptionLogTask() { - @Override - protected void doRun() throws Exception { - TransactionMonitor transaction = TransactionMonitor.DEFAULT; - transaction.logTransactionSwallowException("keeperContainer.analyze", currentDc, new Task() { - @Override - public void go() throws Exception { -// analyzeKeeperContainerUsedInfo(); - } - - @Override - public Map getData() { - Map transactionData = new HashMap<>(); - transactionData.put("keeperContainerSize", currentDcAllKeeperContainerUsedInfoModelMap.size()); - return transactionData; - } - }); - } - }); - } - private void removeExpireData() { List expireIndex = new ArrayList<>(); for (Map.Entry, Date>> entry : keeperContainerUsedInfoModelIndexMap.entrySet()) { @@ -199,285 +197,14 @@ private void removeExpireData() { } } - @VisibleForTesting - void analyzeKeeperContainerUsedInfo() { - logger.info("[analyzeKeeperContainerUsedInfo] start, keeperContainer number {}", currentDcAllKeeperContainerUsedInfoModelMap.size()); - if(!analyzerContext.initKeeperPairData(currentDcAllKeeperContainerUsedInfoModelMap)) return; - keeperContainerAnalyzerService.initStandard(currentDcAllKeeperContainerUsedInfoModelMap); - generateAllSortedDescKeeperContainerUsedInfoModelQueue(); - List result = new ArrayList<>(); - for (KeeperContainerUsedInfoModel infoModel : currentDcAllKeeperContainerUsedInfoModelMap.values()) { - List migrationKeeperDetails = getOverloadKeeperMigrationDetails(infoModel); - if (migrationKeeperDetails != null) - result.addAll(migrationKeeperDetails); - for (String ip : analyzerContext.getAllPairsIP(infoModel.getKeeperIp())) { - List keeperPairMigrationKeeperDetails = getKeeperPairMigrationKeeperDetails(infoModel, currentDcAllKeeperContainerUsedInfoModelMap.get(ip)); - if (keeperPairMigrationKeeperDetails != null) - result.addAll(keeperPairMigrationKeeperDetails); - } - } - currentDcKeeperContainerMigrationResult = result; - } - - private void generateAllSortedDescKeeperContainerUsedInfoModelQueue() { - minInputFlowKeeperContainers = new PriorityQueue<>(currentDcAllKeeperContainerUsedInfoModelMap.values().size(), - (keeper1, keeper2) -> (int)(keeper1.getActiveInputFlow() - keeper2.getActiveInputFlow())); - minPeerDataKeeperContainers = new PriorityQueue<>(currentDcAllKeeperContainerUsedInfoModelMap.values().size(), - (keeper1, keeper2) -> (int)(keeper1.getTotalRedisUsedMemory() - keeper2.getTotalRedisUsedMemory())); - currentDcAllKeeperContainerUsedInfoModelMap.values().forEach(keeperContainerInfoModel -> { - if (keeperContainerFilterChain.doKeeperContainerFilter(keeperContainerInfoModel)) { - minPeerDataKeeperContainers.add(KeeperContainerUsedInfoModel.cloneKeeperContainerUsedInfoModel(keeperContainerInfoModel)); - minInputFlowKeeperContainers.add(KeeperContainerUsedInfoModel.cloneKeeperContainerUsedInfoModel(keeperContainerInfoModel)); - } - }); - } - - - private List getOverloadKeeperMigrationDetails(KeeperContainerUsedInfoModel infoModel) { - long overloadInputFlow = infoModel.getActiveInputFlow() - infoModel.getInputFlowStandard(); - long overloadPeerData = infoModel.getActiveRedisUsedMemory() - infoModel.getRedisUsedMemoryStandard(); - KeeperContainerOverloadCause overloadCause = getKeeperContainerOverloadCause(overloadInputFlow, overloadPeerData); - if (overloadCause == null) return null; - - switch (overloadCause) { - case PEER_DATA_OVERLOAD: - return getOverloadKeeperMigrationDetails(infoModel, true, overloadPeerData, minPeerDataKeeperContainers, overloadCause); - case INPUT_FLOW_OVERLOAD: - case BOTH: - return getOverloadKeeperMigrationDetails(infoModel, false, overloadInputFlow, minInputFlowKeeperContainers, overloadCause); - default: - logger.warn("invalid keeper container overload cause {}", overloadCause); - return null; - } - } - - private List getOverloadKeeperMigrationDetails(KeeperContainerUsedInfoModel src, - boolean isPeerDataOverload, - long overloadData, - PriorityQueue availableKeeperContainers, - KeeperContainerOverloadCause overloadCause) { - logger.info("[analyzeKeeperContainerUsedInfo] srcIp: {}, overloadCause:{}, overloadData:{}", src.getKeeperIp(), overloadCause.name(), overloadData); - PriorityQueue anotherQueue = isPeerDataOverload ? minInputFlowKeeperContainers : minPeerDataKeeperContainers; - List> allDescDcClusterShards = getDescDcClusterShardDetails(src.getDetailInfo(), isPeerDataOverload); - List result = new ArrayList<>(); - KeeperContainerUsedInfoModel target = null; - MigrationKeeperContainerDetailModel keeperContainerMigrationDetail = null; - MigrationKeeperContainerDetailModel switchActiveMigrationDetail = new MigrationKeeperContainerDetailModel(src, null, 0, true, false, overloadCause.name(), new ArrayList<>()); - for (Map.Entry dcClusterShard : allDescDcClusterShards) { - if (overloadData <= 0) break; - if (!dcClusterShard.getKey().isActive()) continue; - String backUpKeeperIP = analyzerContext.getBackUpKeeperIp(dcClusterShard.getKey()); - if (backUpKeeperIP == null) { - throw new RuntimeException("backUpKeeperIP is null, dcClusterShard: " + dcClusterShard.getKey()); - } - KeeperContainerUsedInfoModel backUpKeeper = currentDcAllKeeperContainerUsedInfoModelMap.get(backUpKeeperIP); - if (keeperContainerFilterChain.doKeeperContainerFilter(backUpKeeper) && keeperContainerFilterChain.doKeeperFilter(dcClusterShard, src, backUpKeeper, analyzerContext)) { - switchActiveMigrationDetail.addReadyToMigrateShard(dcClusterShard.getKey()); - updateAllSortedDescKeeperContainerUsedInfoModelQueue(backUpKeeper.getKeeperIp(), new KeeperContainerUsedInfoModel(backUpKeeper, dcClusterShard)); - overloadData = updateOverLoadData(isPeerDataOverload, overloadData, dcClusterShard.getValue()); - continue; - } - if (target == null ) { - target = availableKeeperContainers.poll(); - if (target == null) { - logger.warn("[analyzeKeeperContainerUsedInfo] no available keeper containers {} for overload keeper container {}", availableKeeperContainers, src); - CatEventMonitor.DEFAULT.logEvent(KEEPER_RESOURCE_LACK, currentDc); - generateResult(result, keeperContainerMigrationDetail, switchActiveMigrationDetail); -// 資源不足case - return result; - } - keeperContainerMigrationDetail = new MigrationKeeperContainerDetailModel(src, target, 0, false, false, overloadCause.name(), new ArrayList<>()); - } - if (!keeperContainerFilterChain.doKeeperFilter(dcClusterShard, src, target, analyzerContext) || - !keeperContainerFilterChain.doKeeperPairFilter(dcClusterShard, backUpKeeper, target, analyzerContext)) { - updateSortedDescKeeperContainerUsedInfoModelQueue(anotherQueue, target.getKeeperIp(), target); - target = null; - if (keeperContainerMigrationDetail.getMigrateKeeperCount() != 0) result.add(keeperContainerMigrationDetail); - continue; - } - keeperContainerMigrationDetail.addReadyToMigrateShard(dcClusterShard.getKey()); - target.setActiveInputFlow(target.getActiveInputFlow() + dcClusterShard.getValue().getInputFlow()).setActiveRedisUsedMemory(target.getActiveRedisUsedMemory() + dcClusterShard.getValue().getPeerData()); - analyzerContext.updateMigrateIpPair(src.getKeeperIp(), backUpKeeper.getKeeperIp(), target.getKeeperIp(), dcClusterShard); - overloadData = updateOverLoadData(isPeerDataOverload, overloadData, dcClusterShard.getValue()); - } - - generateResult(result, keeperContainerMigrationDetail, switchActiveMigrationDetail); - - if (target != null && target.getActiveInputFlow() < src.getInputFlowStandard() && target.getTotalRedisUsedMemory() < src.getRedisUsedMemoryStandard()) { - availableKeeperContainers.add(target); - updateSortedDescKeeperContainerUsedInfoModelQueue(anotherQueue, target.getKeeperIp(), target); - } - return result; - } - - private List getKeeperPairMigrationKeeperDetails(KeeperContainerUsedInfoModel pairA, KeeperContainerUsedInfoModel pairB){ - double keeperPairOverLoadFactor = config.getKeeperPairOverLoadFactor(); - KeeperContainerOverloadStandardModel minStandardModel = new KeeperContainerOverloadStandardModel() - .setFlowOverload((long) (Math.min(pairB.getInputFlowStandard(), pairA.getInputFlowStandard()) * keeperPairOverLoadFactor)) - .setPeerDataOverload((long) (Math.min(pairB.getRedisUsedMemoryStandard(), pairA.getRedisUsedMemoryStandard()) * keeperPairOverLoadFactor)); - - IPPairData longLongPair = analyzerContext.getIPPairData(pairA.getKeeperIp(), pairB.getKeeperIp()); - long overloadInputFlow = longLongPair.getInputFlow() - minStandardModel.getFlowOverload(); - long overloadPeerData = longLongPair.getPeerData() - minStandardModel.getPeerDataOverload(); - KeeperContainerOverloadCause overloadCause = getKeeperPairOverloadCause(overloadInputFlow, overloadPeerData); - if (overloadCause == null) return null; - - switch (overloadCause) { - case KEEPER_PAIR_PEER_DATA_OVERLOAD: - return getKeeperPairMigrationDetails(pairA, pairB, true, overloadPeerData, minPeerDataKeeperContainers, overloadCause); - case KEEPER_PAIR_INPUT_FLOW_OVERLOAD: - case KEEPER_PAIR_BOTH: - return getKeeperPairMigrationDetails(pairA, pairB, false, overloadInputFlow, minInputFlowKeeperContainers, overloadCause); - default: - logger.warn("invalid keeper container overload cause {}", overloadCause); - return null; - } - - } - - private KeeperContainerOverloadCause getKeeperPairOverloadCause(long overloadInputFlow, long overloadPeerData) { - if (overloadInputFlow <= 0 && overloadPeerData <= 0) { - return null; - } else if (overloadInputFlow > 0 && overloadPeerData > 0) { - return KeeperContainerOverloadCause.KEEPER_PAIR_BOTH; - } else if (overloadInputFlow > 0) { - return KeeperContainerOverloadCause.KEEPER_PAIR_INPUT_FLOW_OVERLOAD; - } else { - return KeeperContainerOverloadCause.KEEPER_PAIR_PEER_DATA_OVERLOAD; - } - } - - private List getKeeperPairMigrationDetails(KeeperContainerUsedInfoModel pairA, - KeeperContainerUsedInfoModel pairB, - boolean isPeerDataOverload, - long overloadData, - PriorityQueue availableKeeperContainers, - KeeperContainerOverloadCause overloadCause){ - List> allDcClusterShards = getDescDcClusterShardDetails(analyzerContext.getAllDetailInfo(pairA.getKeeperIp(), pairB.getKeeperIp()), isPeerDataOverload); - List result = new ArrayList<>(); - KeeperContainerUsedInfoModel target = null; - List usedTarget = new ArrayList<>(); - MigrationKeeperContainerDetailModel keeperContainerDetailModel = null; - logger.debug("[analyzeKeeperPairOverLoad] pairA: {}, pairB: {}, overloadCause:{}, overloadData:{}, availableKeeperContainers:{} ", pairA, pairB, isPeerDataOverload, overloadData, availableKeeperContainers); - for (Map.Entry dcClusterShard : allDcClusterShards) { - if (overloadData <= 0) break; - if (!dcClusterShard.getKey().isActive()) continue; - String srcKeeperIp = dcClusterShard.getValue().getKeeperIP(); - KeeperContainerUsedInfoModel srcKeeperContainer = srcKeeperIp.equals(pairA.getKeeperIp()) ? pairA : pairB; - KeeperContainerUsedInfoModel backUpKeeperContainer = srcKeeperIp.equals(pairA.getKeeperIp()) ? pairB : pairA; - while (target == null) { - target = availableKeeperContainers.poll(); - if (target == null) { - logger.warn("[analyzeKeeperContainerUsedInfo] no available keeper containers {} for overload keeper pair {},{}", availableKeeperContainers, pairA, pairB); - CatEventMonitor.DEFAULT.logEvent(KEEPER_RESOURCE_LACK, currentDc); - availableKeeperContainers.addAll(usedTarget); - return result; - } - if (target.getKeeperIp().equals(pairA.getKeeperIp()) || target.getKeeperIp().equals(pairB.getKeeperIp())) { - usedTarget.add(target); - target = availableKeeperContainers.poll(); - } - keeperContainerDetailModel = new MigrationKeeperContainerDetailModel(srcKeeperContainer, target, 0, false, true, overloadCause.name(), new ArrayList<>()); - } - if (!keeperContainerFilterChain.doKeeperPairFilter(dcClusterShard, srcKeeperContainer, target, analyzerContext)) { - usedTarget.add(target); - target = null; - generateResult(result, keeperContainerDetailModel); - continue; - } - keeperContainerDetailModel.addReadyToMigrateShard(dcClusterShard.getKey()); - analyzerContext.updateMigrateIpPair(srcKeeperContainer.getKeeperIp(), backUpKeeperContainer.getKeeperIp(), target.getKeeperIp(), dcClusterShard); - overloadData = updateOverLoadData(isPeerDataOverload, overloadData, dcClusterShard.getValue()); - } - - if (target != null) { - usedTarget.add(target); - } - - generateResult(result, keeperContainerDetailModel); - availableKeeperContainers.addAll(usedTarget); - - return result; - } - - - - private KeeperContainerOverloadCause getKeeperContainerOverloadCause(long overloadInputFlow, long overloadPeerData) { - if (overloadInputFlow <= 0 && overloadPeerData <= 0) { - return null; - } else if (overloadInputFlow > 0 && overloadPeerData > 0) { - return KeeperContainerOverloadCause.BOTH; - } else if (overloadInputFlow > 0) { - return KeeperContainerOverloadCause.INPUT_FLOW_OVERLOAD; - } else { - return KeeperContainerOverloadCause.PEER_DATA_OVERLOAD; - } - } - - private List> getDescDcClusterShardDetails(Map allDetailInfo, boolean isPeerDataOverload) { - Comparator> comparator = isPeerDataOverload ? - Comparator.comparingLong(e -> e.getValue().getPeerData()) : Comparator.comparingLong(e -> e.getValue().getInputFlow()); - return allDetailInfo.entrySet().stream() - .sorted(comparator.reversed()) - .collect(Collectors.toList()); - } - - private void updateAllSortedDescKeeperContainerUsedInfoModelQueue(String keeperIp, KeeperContainerUsedInfoModel newModel) { - updateSortedDescKeeperContainerUsedInfoModelQueue(minInputFlowKeeperContainers, keeperIp, newModel); - updateSortedDescKeeperContainerUsedInfoModelQueue(minPeerDataKeeperContainers, keeperIp, newModel); - } - - private void updateSortedDescKeeperContainerUsedInfoModelQueue(PriorityQueue queue, String keeperIp, KeeperContainerUsedInfoModel newModel) { - Queue temp = new LinkedList<>(); - while (!queue.isEmpty()){ - KeeperContainerUsedInfoModel infoModel = queue.poll(); - if (infoModel.getKeeperIp().equals(keeperIp)) { - temp.add(newModel); - break; - } - temp.add(infoModel); - } - while(!temp.isEmpty()) { - queue.add(temp.poll()); - } - } - - private long updateOverLoadData(boolean isPeerDataOverload, long overloadData, KeeperUsedInfo usedInfo) { - long currentOverLoadData = isPeerDataOverload ? usedInfo.getPeerData() : usedInfo.getInputFlow(); - return overloadData - currentOverLoadData; - } - - private void generateResult(List result, MigrationKeeperContainerDetailModel... detailModel) { - for (MigrationKeeperContainerDetailModel model : detailModel) { - if (model != null && model.getMigrateKeeperCount() != 0) { - result.add(model); - } - } - } - @VisibleForTesting int getCheckerIndexesSize() { return keeperContainerUsedInfoModelIndexMap.size(); } - @VisibleForTesting - void setKeeperContainerAnalyzerService(KeeperContainerAnalyzerService keeperContainerAnalyzerService) { - this.keeperContainerAnalyzerService = keeperContainerAnalyzerService; - } - @VisibleForTesting void setExecutors(Executor executors){ this.executors = executors; } - @VisibleForTesting - void setKeeperContainerFilterChain(KeeperContainerFilterChain keeperContainerFilterChain){ - this.keeperContainerFilterChain = keeperContainerFilterChain; - } - - @VisibleForTesting - Map getCurrentDcKeeperContainerUsedInfoModelsMap() { - return currentDcAllKeeperContainerUsedInfoModelMap; - } - } diff --git a/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/keeper/impl/KeeperContainerMigrationAnalyzer.java b/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/keeper/impl/KeeperContainerMigrationAnalyzer.java new file mode 100644 index 000000000..37d908f01 --- /dev/null +++ b/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/keeper/impl/KeeperContainerMigrationAnalyzer.java @@ -0,0 +1,13 @@ +package com.ctrip.xpipe.redis.console.keeper.impl; + +import com.ctrip.xpipe.redis.checker.model.KeeperContainerUsedInfoModel; +import com.ctrip.xpipe.redis.console.model.MigrationKeeperContainerDetailModel; + +import java.util.List; +import java.util.Map; + +public interface KeeperContainerMigrationAnalyzer { + + List getMigrationPlans(Map modelsMap); + +} diff --git a/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/keeper/util/DefaultKeeperContainerUsedInfoAnalyzerContext.java b/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/keeper/util/DefaultKeeperContainerUsedInfoAnalyzerContext.java index f82623c59..ce7ae1f54 100644 --- a/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/keeper/util/DefaultKeeperContainerUsedInfoAnalyzerContext.java +++ b/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/keeper/util/DefaultKeeperContainerUsedInfoAnalyzerContext.java @@ -6,33 +6,147 @@ import com.ctrip.xpipe.redis.checker.model.KeeperContainerUsedInfoModel; import com.ctrip.xpipe.redis.checker.model.KeeperContainerUsedInfoModel.*; import com.ctrip.xpipe.redis.console.keeper.entity.IPPairData; +import com.ctrip.xpipe.redis.console.keeper.handler.KeeperContainerFilterChain; +import com.ctrip.xpipe.redis.console.model.MigrationKeeperContainerDetailModel; +import com.ctrip.xpipe.redis.core.entity.DcMeta; +import com.ctrip.xpipe.redis.core.entity.KeeperMeta; +import com.ctrip.xpipe.redis.core.meta.MetaCache; +import com.ctrip.xpipe.utils.StringUtil; +import org.apache.commons.lang.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.*; +import java.util.stream.Collectors; public class DefaultKeeperContainerUsedInfoAnalyzerContext implements KeeperContainerUsedInfoAnalyzerContext { private static final Logger logger = LoggerFactory.getLogger(DefaultKeeperContainerUsedInfoAnalyzerContext.class); private Map> ipPairMap = new HashMap<>(); private Map allDetailInfo = new HashMap<>(); + + private PriorityQueue minInputFlowKeeperContainers; + + private PriorityQueue minPeerDataKeeperContainers; + + private Map> migrationPlansMap = new HashMap<>(); + + private Set problemKeeperContainer = new HashSet<>(); + + private Map srcModelsMap; + + private KeeperContainerFilterChain filterChain; + + private DcMeta dcMeta; + private static final String KEEPER_NO_BACKUP = "keeper_no_backup"; - public DefaultKeeperContainerUsedInfoAnalyzerContext() {} + public DefaultKeeperContainerUsedInfoAnalyzerContext(KeeperContainerFilterChain filterChain, DcMeta dcMeta) { + this.filterChain = filterChain; + this.dcMeta = dcMeta; + } + + @Override + public void addMigrationPlan(KeeperContainerUsedInfoModel src, KeeperContainerUsedInfoModel target, boolean switchActive, boolean keeperPairOverload, String cause, Map.Entry dcClusterShard, KeeperContainerUsedInfoModel srcPair) { + if (!migrationPlansMap.containsKey(src.getKeeperIp())) { + migrationPlansMap.put(src.getKeeperIp(), new ArrayList<>()); + } + if (migrationPlansMap.get(src.getKeeperIp()).stream().noneMatch(list -> list.isKeeperPairOverload() == keeperPairOverload && list.isSwitchActive() == switchActive && list.getTargetKeeperContainer() != null && target.getKeeperIp().equals(list.getTargetKeeperContainer().getKeeperIp()) && StringUtils.equals(cause, list.getCause()))) { + MigrationKeeperContainerDetailModel model = new MigrationKeeperContainerDetailModel( + srcModelsMap.get(src.getKeeperIp()), srcModelsMap.get(target.getKeeperIp()), switchActive, keeperPairOverload, cause, new ArrayList<>()); + if (keeperPairOverload) { + model.setSrcOverLoadKeeperPairIp(srcPair.getKeeperIp()); + } + migrationPlansMap.get(src.getKeeperIp()).add(model); + } + MigrationKeeperContainerDetailModel model = migrationPlansMap.get(src.getKeeperIp()).stream().filter(list -> list.isKeeperPairOverload() == keeperPairOverload && list.isSwitchActive() == switchActive && list.getTargetKeeperContainer() != null && target.getKeeperIp().equals(list.getTargetKeeperContainer().getKeeperIp()) && StringUtils.equals(cause, list.getCause())).findFirst().get(); + model.addReadyToMigrateShard(dcClusterShard.getKey()); + + if (!keeperPairOverload) { + src.setActiveInputFlow(src.getActiveInputFlow() - dcClusterShard.getValue().getInputFlow()).setActiveRedisUsedMemory(src.getActiveRedisUsedMemory() - dcClusterShard.getValue().getPeerData()); + target.setActiveInputFlow(target.getActiveInputFlow() + dcClusterShard.getValue().getInputFlow()).setActiveRedisUsedMemory(target.getActiveRedisUsedMemory() + dcClusterShard.getValue().getPeerData()); + } + if (!switchActive) { + updateMigrateIpPair(src.getKeeperIp(), srcPair.getKeeperIp(), target.getKeeperIp(), dcClusterShard); + } + } + + @Override + public List getAllMigrationPlans() { + return migrationPlansMap.values().stream().flatMap(Collection::stream).collect(Collectors.toList()); + } @Override - public long getMaxActiveRedisUsedMemory(Map usedInfo) { - long max = 0; - for (KeeperContainerUsedInfoModel usedInfoModel : usedInfo.values()) { - max = Math.max(max, usedInfoModel.getActiveRedisUsedMemory()); + public void addResourceLackPlan(KeeperContainerUsedInfoModel src, String srcOverLoadKeeperPairIp, String cause) { + if (!migrationPlansMap.containsKey(src.getKeeperIp())) { + migrationPlansMap.put(src.getKeeperIp(), new ArrayList<>()); } - return max; + MigrationKeeperContainerDetailModel model = new MigrationKeeperContainerDetailModel(srcModelsMap.get(src.getKeeperIp()), null, false, false, cause, new ArrayList<>()) + .setUpdateTime(new Date(System.currentTimeMillis() + 8 * 60 * 60 * 1000)); + if (srcOverLoadKeeperPairIp != null) { + model.setSrcOverLoadKeeperPairIp(srcOverLoadKeeperPairIp); + } + migrationPlansMap.get(src.getKeeperIp()).add(model); } @Override - public boolean initKeeperPairData(Map usedInfoMap) { + public boolean isProblemKeeperContainer(String keeperContainerIp) { + return problemKeeperContainer.contains(keeperContainerIp); + } + + @Override + public void initAvailablePool(List usedInfoMap) { + minInputFlowKeeperContainers = new PriorityQueue<>(usedInfoMap.size(), + (keeper1, keeper2) -> (int)((keeper2.getInputFlowStandard() - keeper2.getActiveInputFlow()) - (keeper1.getInputFlowStandard() - keeper1.getActiveInputFlow()))); + minPeerDataKeeperContainers = new PriorityQueue<>(usedInfoMap.size(), + (keeper1, keeper2) -> (int)((keeper2.getRedisUsedMemoryStandard() - keeper2.getTotalRedisUsedMemory()) - (keeper1.getRedisUsedMemoryStandard() - keeper1.getTotalRedisUsedMemory()))); + usedInfoMap.forEach(model -> { + if (!problemKeeperContainer.contains(model.getKeeperIp()) + && filterChain.isKeeperContainerUseful(model)) { + minPeerDataKeeperContainers.add(model); + minInputFlowKeeperContainers.add(model); + } + }); + } + + @Override + public void recycleKeeperContainer(KeeperContainerUsedInfoModel keeperContainer, boolean isPeerDataOverload) { + if (keeperContainer == null || filterChain.isDataOverLoad(keeperContainer)) return; + PriorityQueue queue = isPeerDataOverload ? minPeerDataKeeperContainers : minInputFlowKeeperContainers; + PriorityQueue anotherQueue = isPeerDataOverload ? minInputFlowKeeperContainers : minPeerDataKeeperContainers; + queue.add(keeperContainer); + if (anotherQueue.remove(keeperContainer)) { + anotherQueue.add(keeperContainer); + } + } + + @Override + public KeeperContainerUsedInfoModel getBestKeeperContainer(KeeperContainerUsedInfoModel srcKeeper, Map.Entry dcClusterShard, KeeperContainerUsedInfoModel srcKeeperPair, boolean isPeerDataOverload) { + String org = srcKeeper.getOrg(); + String az = srcKeeper.getAz(); + PriorityQueue queue = isPeerDataOverload ? minPeerDataKeeperContainers : minInputFlowKeeperContainers; + Queue temp = new LinkedList<>(); + while (!queue.isEmpty()) { + KeeperContainerUsedInfoModel target = queue.poll(); + if ((org == null || org.equals(target.getOrg())) + && (az == null || az.equals(target.getAz())) + && !Objects.equals(target.getKeeperIp(), srcKeeperPair.getKeeperIp()) + && filterChain.canMigrate(dcClusterShard, srcKeeperPair, target, this) ) { + return target; + } + temp.add(target); + } + while(!temp.isEmpty()) { + queue.add(temp.poll()); + } + return null; + } + + @Override + public void initKeeperPairData(List usedInfoMap, Map srcModelsMap) { + this.srcModelsMap = srcModelsMap; ipPairMap.clear(); allDetailInfo.clear(); - for (KeeperContainerUsedInfoModel infoModel : usedInfoMap.values()) { + for (KeeperContainerUsedInfoModel infoModel : usedInfoMap) { if (infoModel.getDetailInfo() != null) { allDetailInfo.putAll(infoModel.getDetailInfo()); } @@ -41,10 +155,31 @@ public boolean initKeeperPairData(Map used if (!entry.getKey().isActive()) continue; KeeperUsedInfo activeKeeperUsedInfo = entry.getValue(); String backUpKeeperIp = getBackUpKeeperIp(entry.getKey()); - if (backUpKeeperIp == null) return false; + if (backUpKeeperIp == null) { + getProblemKeeperContainer(entry); + continue; + } addIpPair(activeKeeperUsedInfo.getKeeperIP(), backUpKeeperIp, entry); } - return true; + } + + private void getProblemKeeperContainer(Map.Entry entry) { + dcMeta.getClusters().values().forEach(clusterMeta -> { + if (entry.getKey().getClusterId().equals(clusterMeta.getId())) { + clusterMeta.getShards().values().forEach(shardMeta -> { + if (entry.getKey().getShardId().equals(shardMeta.getId())) { + List keepers = shardMeta.getKeepers(); + if (keepers.size() == 2) { + if (keepers.get(0).getIp().equals(entry.getValue().getKeeperIP())) { + problemKeeperContainer.add(keepers.get(1).getIp()); + } else { + problemKeeperContainer.add(keepers.get(0).getIp()); + } + } + } + }); + } + }); } @Override @@ -52,7 +187,6 @@ public String getBackUpKeeperIp(DcClusterShard activeKeeper) { KeeperUsedInfo backUpKeeperUsedInfo = allDetailInfo.get(new DcClusterShardKeeper(activeKeeper, false)); if (backUpKeeperUsedInfo == null) { CatEventMonitor.DEFAULT.logEvent(KEEPER_NO_BACKUP, activeKeeper.toString()); - logger.warn("[analyzeKeeperPair] active keeper {} has no backup keeper", activeKeeper); return null; } return backUpKeeperUsedInfo.getKeeperIP(); @@ -78,11 +212,6 @@ public IPPairData getIPPairData(String ip1, String ip2) { return null; } - @Override - public Map getAllDetailInfo(String ip1, String ip2) { - return ipPairMap.get(ip1).get(ip2).getKeeperUsedInfoMap(); - } - @Override public void updateMigrateIpPair(String srcKeeperIp, String srcKeeperIpPair, String targetKeeperIp, Map.Entry migrateDcClusterShard) { removeIpPair(srcKeeperIp, srcKeeperIpPair, migrateDcClusterShard); diff --git a/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/keeper/util/KeeperContainerUsedInfoAnalyzerContext.java b/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/keeper/util/KeeperContainerUsedInfoAnalyzerContext.java index 58d528ef5..8084704d7 100644 --- a/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/keeper/util/KeeperContainerUsedInfoAnalyzerContext.java +++ b/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/keeper/util/KeeperContainerUsedInfoAnalyzerContext.java @@ -5,13 +5,20 @@ import com.ctrip.xpipe.redis.checker.model.KeeperContainerUsedInfoModel; import com.ctrip.xpipe.redis.checker.model.KeeperContainerUsedInfoModel.*; import com.ctrip.xpipe.redis.console.keeper.entity.IPPairData; +import com.ctrip.xpipe.redis.console.model.MigrationKeeperContainerDetailModel; import java.util.List; import java.util.Map; public interface KeeperContainerUsedInfoAnalyzerContext { - boolean initKeeperPairData(Map usedInfoMap); + void initKeeperPairData(List usedInfoMap, Map srcModelsMap); + + void initAvailablePool(List usedInfoMap); + + void recycleKeeperContainer(KeeperContainerUsedInfoModel keeperContainer, boolean isPeerDataOverload); + + KeeperContainerUsedInfoModel getBestKeeperContainer(KeeperContainerUsedInfoModel usedInfoModel, Map.Entry dcClusterShard, KeeperContainerUsedInfoModel srcKeeperPair, boolean isPeerDataOverload); String getBackUpKeeperIp(DcClusterShard activeKeeper); @@ -19,10 +26,14 @@ public interface KeeperContainerUsedInfoAnalyzerContext { IPPairData getIPPairData(String ip1, String ip2); - Map getAllDetailInfo(String ip1, String ip2); - void updateMigrateIpPair(String srcKeeperIp, String srcKeeperIpPair, String targetKeeperIp, Map.Entry migrateDcClusterShard); - long getMaxActiveRedisUsedMemory(Map usedInfoMap); + void addMigrationPlan(KeeperContainerUsedInfoModel src, KeeperContainerUsedInfoModel target, boolean switchActive, boolean keeperPairOverload, String cause, Map.Entry dcClusterShard, KeeperContainerUsedInfoModel srcPair); + + List getAllMigrationPlans(); + + void addResourceLackPlan(KeeperContainerUsedInfoModel src, String srcOverLoadKeeperPairIp, String cause); + + boolean isProblemKeeperContainer(String keeperContainerIp); } diff --git a/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/model/MigrationKeeperContainerDetailModel.java b/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/model/MigrationKeeperContainerDetailModel.java index 9cbdb5e02..03a398ba6 100644 --- a/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/model/MigrationKeeperContainerDetailModel.java +++ b/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/model/MigrationKeeperContainerDetailModel.java @@ -4,6 +4,7 @@ import com.ctrip.xpipe.redis.checker.model.KeeperContainerUsedInfoModel; import java.io.Serializable; +import java.util.Date; import java.util.List; import java.util.Objects; @@ -21,24 +22,26 @@ public class MigrationKeeperContainerDetailModel implements Serializable { private boolean keeperPairOverload; + private String srcOverLoadKeeperPairIp; + private String cause; List migrateShards; + private Date updateTime; + public MigrationKeeperContainerDetailModel() { } public MigrationKeeperContainerDetailModel(KeeperContainerUsedInfoModel srcKeeperContainer, KeeperContainerUsedInfoModel targetKeeperContainer, - int migrateKeeperCount, boolean switchActive, boolean keeperPairOverload, String cause, List migrateShards) { this.srcKeeperContainer = srcKeeperContainer; this.targetKeeperContainer = targetKeeperContainer; - this.migrateKeeperCount = migrateKeeperCount; this.switchActive = switchActive; this.keeperPairOverload = keeperPairOverload; this.cause = cause; @@ -48,6 +51,7 @@ public MigrationKeeperContainerDetailModel(KeeperContainerUsedInfoModel srcKeepe public void addReadyToMigrateShard( DcClusterShard shard) { migrateShards.add(shard); migrateKeeperCount++; + this.updateTime = new Date(System.currentTimeMillis() + 8 * 60 * 60 * 1000); } public void migrateShardCompletion(DcClusterShard dcClusterShard) { @@ -91,6 +95,15 @@ public MigrationKeeperContainerDetailModel setMigrateShards(List return this; } + public String getSrcOverLoadKeeperPairIp() { + return srcOverLoadKeeperPairIp; + } + + public MigrationKeeperContainerDetailModel setSrcOverLoadKeeperPairIp(String srcOverLoadKeeperPairIp) { + this.srcOverLoadKeeperPairIp = srcOverLoadKeeperPairIp; + return this; + } + public boolean isSwitchActive() { return switchActive; } @@ -123,6 +136,15 @@ public void migrateKeeperCompleteCountIncrease() { this.migrateKeeperCompleteCount++; } + public Date getUpdateTime() { + return updateTime; + } + + public MigrationKeeperContainerDetailModel setUpdateTime(Date updateTime) { + this.updateTime = updateTime; + return this; + } + @Override public boolean equals(Object o) { if (this == o) return true; @@ -151,7 +173,11 @@ public String toString() { ", targetKeeperContainer=" + targetKeeperContainer + ", migrateKeeperCount=" + migrateKeeperCount + ", migrateKeeperCompleteCount=" + migrateKeeperCompleteCount + + ", switchActive=" + switchActive + + ", keeperPairOverload=" + keeperPairOverload + + ", cause='" + cause + '\'' + ", migrateShards=" + migrateShards + + ", updateTime=" + updateTime + '}'; } } diff --git a/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/service/KeeperContainerAnalyzerService.java b/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/service/KeeperContainerAnalyzerService.java index d50fa657a..8f9377260 100644 --- a/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/service/KeeperContainerAnalyzerService.java +++ b/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/service/KeeperContainerAnalyzerService.java @@ -2,10 +2,11 @@ import com.ctrip.xpipe.redis.checker.model.KeeperContainerUsedInfoModel; +import java.util.List; import java.util.Map; public interface KeeperContainerAnalyzerService { - void initStandard(Map currentDcAllKeeperContainerUsedInfoModelMap); + void initStandard(List currentDcAllKeeperContainerUsedInfoModelMap); } diff --git a/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/service/impl/DefaultKeeperContainerAnalyzerService.java b/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/service/impl/DefaultKeeperContainerAnalyzerService.java index 9b62edd14..3ba1352c3 100644 --- a/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/service/impl/DefaultKeeperContainerAnalyzerService.java +++ b/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/service/impl/DefaultKeeperContainerAnalyzerService.java @@ -5,14 +5,8 @@ import com.ctrip.xpipe.redis.checker.model.KeeperContainerUsedInfoModel; import com.ctrip.xpipe.redis.console.config.ConsoleConfig; import com.ctrip.xpipe.redis.console.keeper.entity.KeeperContainerDiskType; -import com.ctrip.xpipe.redis.console.model.ConfigModel; -import com.ctrip.xpipe.redis.console.model.KeeperContainerOverloadStandardModel; -import com.ctrip.xpipe.redis.console.model.KeepercontainerTbl; -import com.ctrip.xpipe.redis.console.model.OrganizationTbl; -import com.ctrip.xpipe.redis.console.service.ConfigService; -import com.ctrip.xpipe.redis.console.service.KeeperContainerService; -import com.ctrip.xpipe.redis.console.service.KeeperContainerAnalyzerService; -import com.ctrip.xpipe.redis.console.service.OrganizationService; +import com.ctrip.xpipe.redis.console.model.*; +import com.ctrip.xpipe.redis.console.service.*; import com.ctrip.xpipe.utils.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -37,6 +31,9 @@ public class DefaultKeeperContainerAnalyzerService implements KeeperContainerAna @Autowired private OrganizationService organizationService; + @Autowired + private AzService azService; + private static final long DEFAULT_PEER_DATA_OVERLOAD = 474L * 1024 * 1024 * 1024; private static final long DEFAULT_KEEPER_FLOW_OVERLOAD = 270 * 1024; @@ -46,7 +43,7 @@ public class DefaultKeeperContainerAnalyzerService implements KeeperContainerAna private static final String KEEPER_STANDARD = "keeper_standard"; @Override - public void initStandard(Map currentDcAllKeeperContainerUsedInfoModelMap) { + public void initStandard(List currentDcAllKeeperContainerUsedInfoModelMap) { Map inputFlowStandardMap = new HashMap<>(); Map peerDataStandardMap = new HashMap<>(); List configs = configService.getConfigs(KEY_KEEPER_CONTAINER_STANDARD); @@ -66,7 +63,7 @@ public void initStandard(Map currentDcAllK } CatEventMonitor.DEFAULT.logEvent(KEEPER_STANDARD, inputFlowStandardMap.toString() + inputFlowStandardMap.toString()); KeeperContainerOverloadStandardModel defaultOverloadStandard = getDefaultStandard(inputFlowStandardMap, peerDataStandardMap); - for (KeeperContainerUsedInfoModel infoModel : currentDcAllKeeperContainerUsedInfoModelMap.values()) { + for (KeeperContainerUsedInfoModel infoModel : currentDcAllKeeperContainerUsedInfoModelMap) { KeeperContainerOverloadStandardModel realKeeperContainerOverloadStandard = getRealStandard(inputFlowStandardMap, peerDataStandardMap, defaultOverloadStandard, infoModel); infoModel.setInputFlowStandard(realKeeperContainerOverloadStandard.getFlowOverload()); infoModel.setRedisUsedMemoryStandard(realKeeperContainerOverloadStandard.getPeerDataOverload()); @@ -93,7 +90,11 @@ private KeeperContainerOverloadStandardModel getRealStandard( Map KeepercontainerTbl keepercontainerTbl = keeperContainerService.find(infoModel.getKeeperIp()); infoModel.setDiskType(keepercontainerTbl.getKeepercontainerDiskType()); infoModel.setKeeperContainerActive(keepercontainerTbl.isKeepercontainerActive()); - OrganizationTbl organizationTbl = organizationService.getOrganizationTblByCMSOrganiztionId(keepercontainerTbl.getOrgId()); + AzTbl availableZoneTblById = azService.getAvailableZoneTblById(keepercontainerTbl.getAzId()); + if (availableZoneTblById != null) { + infoModel.setAz(availableZoneTblById.getAzName()); + } + OrganizationTbl organizationTbl = organizationService.getOrganization(keepercontainerTbl.getKeepercontainerOrgId()); if (organizationTbl != null) { infoModel.setOrg(organizationTbl.getOrgName()); } @@ -131,4 +132,8 @@ public void setOrganizationService(OrganizationService organizationService) { this.organizationService = organizationService; } + @VisibleForTesting + public void setAzService(AzService azService) { + this.azService = azService; + } } diff --git a/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/spring/ConsoleContextConfig.java b/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/spring/ConsoleContextConfig.java index 5edea82c9..2591875a2 100644 --- a/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/spring/ConsoleContextConfig.java +++ b/redis/redis-console/src/main/java/com/ctrip/xpipe/redis/console/spring/ConsoleContextConfig.java @@ -23,6 +23,7 @@ import com.ctrip.xpipe.redis.console.keeper.KeeperContainerUsedInfoAnalyzer; import com.ctrip.xpipe.redis.console.keeper.handler.KeeperContainerFilterChain; import com.ctrip.xpipe.redis.console.keeper.impl.DefaultKeeperContainerUsedInfoAnalyzer; +import com.ctrip.xpipe.redis.console.keeper.impl.KeeperContainerMigrationAnalyzer; import com.ctrip.xpipe.redis.console.resources.DefaultMetaCache; import com.ctrip.xpipe.redis.console.resources.DefaultPersistenceCache; import com.ctrip.xpipe.redis.console.service.DcClusterShardService; @@ -126,9 +127,8 @@ public PingService pingService() { @Bean public KeeperContainerUsedInfoAnalyzer KeeperContainerUsedInfoAnalyzer(ConsoleConfig config, - KeeperContainerAnalyzerService keeperContainerAnalyzerService, - KeeperContainerFilterChain keeperContainerFilterChain) { - return new DefaultKeeperContainerUsedInfoAnalyzer(config, keeperContainerAnalyzerService, keeperContainerFilterChain); + KeeperContainerMigrationAnalyzer migrationAnalyzer){ + return new DefaultKeeperContainerUsedInfoAnalyzer(config, migrationAnalyzer); } @Bean diff --git a/redis/redis-console/src/main/resources/static/dist/bundle.js b/redis/redis-console/src/main/resources/static/dist/bundle.js index ab3716a2e..30ce66624 100644 --- a/redis/redis-console/src/main/resources/static/dist/bundle.js +++ b/redis/redis-console/src/main/resources/static/dist/bundle.js @@ -9173,7 +9173,7 @@ eval("angular\n .module('index')\n .controller('KeepercontainerFormCtl', K \**********************************************************/ /***/ (() => { -eval("angular\n .module('index')\n .controller('KeepercontainerOverallCtl', KeepercontainerOverallCtl);\nfunction KeepercontainerOverallCtl($rootScope, $scope, $window, $stateParams, KeeperContainerService, toastr, NgTableParams, $interval) {\n $scope.originData = [];\n $scope.tableParams = new NgTableParams({}, {});\n KeeperContainerService.getAllKeepercontainerUsedInfo().then(function (response) {\n if (Array.isArray(response)) {\n response.forEach(function (keeperContainerInfo) {\n if (keeperContainerInfo.keeperIp === $stateParams.keepercontainerIp) {\n $scope.originData = Object.entries(keeperContainerInfo.detailInfo).map(function (item) {\n var key = item[0].split(':');\n return {\n key: {\n dcName: key[0],\n clusterName: key[1],\n shardName: key[2],\n active: key[3],\n port: key[4]\n },\n value: item[1]\n };\n });\n }\n });\n console.log($scope.originData);\n $scope.tableParams = new NgTableParams({\n page: 1,\n count: 10,\n }, {\n filterDelay: 100,\n counts: [10, 25, 50],\n dataset: $scope.originData\n });\n }\n });\n $scope.getIp = function () {\n return $stateParams.keepercontainerIp;\n };\n $scope.getDc = function () {\n return $scope.originData[0].key.dcName;\n };\n}\n\n\n//# sourceURL=webpack://XPipe-Console/./scripts/controllers/KeepercontainerOverallCtl.ts?"); +eval("angular\n .module('index')\n .controller('KeepercontainerOverallCtl', KeepercontainerOverallCtl);\nfunction KeepercontainerOverallCtl($rootScope, $scope, $window, $stateParams, KeeperContainerService, toastr, NgTableParams, $interval) {\n $scope.originData = [];\n $scope.tableParams = new NgTableParams({}, {});\n KeeperContainerService.getAllKeepercontainerUsedInfo().then(function (response) {\n if (Array.isArray(response)) {\n response.forEach(function (keeperContainerInfo) {\n if (keeperContainerInfo.keeperIp === $stateParams.keepercontainerIp) {\n $scope.updateTime = keeperContainerInfo.updateTime;\n $scope.originData = Object.entries(keeperContainerInfo.detailInfo).map(function (item) {\n var key = item[0].split(':');\n return {\n key: {\n dcName: key[0],\n clusterName: key[1],\n shardName: key[2],\n active: key[3],\n port: key[4]\n },\n value: item[1]\n };\n });\n }\n });\n console.log($scope.originData);\n $scope.tableParams = new NgTableParams({\n page: 1,\n count: 10,\n }, {\n filterDelay: 100,\n counts: [10, 25, 50],\n dataset: $scope.originData\n });\n }\n });\n $scope.getIpDC = function () {\n return $stateParams.keepercontainerIp;\n };\n $scope.getDc = function () {\n return $scope.originData[0].key.dcName;\n };\n $scope.getTime = function () {\n if ($scope.updateTime == null) {\n return '';\n }\n return $scope.updateTime.substring(0, 19).replace(\"T\", \" \");\n };\n}\n\n\n//# sourceURL=webpack://XPipe-Console/./scripts/controllers/KeepercontainerOverallCtl.ts?"); /***/ }), @@ -9183,7 +9183,7 @@ eval("angular\n .module('index')\n .controller('KeepercontainerOverallCtl' \***********************************************************/ /***/ (() => { -eval("angular\n .module('index')\n .controller('KeepercontainerOverloadCtl', KeepercontainerOverloadCtl);\nKeepercontainerOverloadCtl.$inject = ['$rootScope', '$scope', '$window', '$stateParams', 'KeeperContainerService',\n 'toastr', 'NgTableParams', '$interval'];\nfunction KeepercontainerOverloadCtl($rootScope, $scope, $window, $stateParams, KeeperContainerService, toastr, NgTableParams, $interval) {\n $scope.overloadKeeperContainer = [];\n $scope.tableParams = new NgTableParams({}, {});\n $scope.migratingTableParams = new NgTableParams({}, {});\n $scope.selectAll = false;\n $scope.toggleAll = toggleAll;\n $scope.isChecked = isChecked;\n var OPERATE_TYPE = {\n DETAIL: 'detail',\n MIGRATING: 'migrating',\n STOPPED: 'stopped'\n };\n $scope.operateType = $stateParams.type;\n $scope.migratingKeeperContainers = [];\n $scope.scheduledWork;\n $scope.beginToMigrateOverloadKeeperContainers = beginToMigrateOverloadKeeperContainers;\n KeeperContainerService.getAllOverloadKeepercontainer()\n .then(function (result) {\n if (Array.isArray(result))\n $scope.overloadKeeperContainer = result;\n $scope.overloadKeeperContainer.forEach(function (container) {\n switch (container.cause) {\n case 'BOTH':\n container.cause = '数据量和流量超载';\n break;\n case 'PEER_DATA_OVERLOAD':\n container.cause = '数据量超载';\n break;\n case 'INPUT_FLOW_OVERLOAD':\n container.cause = '流量超载';\n break;\n case 'KEEPER_PAIR_BOTH':\n case 'KEEPER_PAIR_PEER_DATA_OVERLOAD':\n case 'KEEPER_PAIR_INPUT_FLOW_OVERLOAD':\n container.cause = 'keeper对超载';\n break;\n }\n if (!container.switchActive && !container.keeperPairOverload) {\n container.result = '迁移主keeper';\n }\n else if (container.switchActive && !container.keeperPairOverload) {\n container.result = '主备切换';\n }\n else if (!container.switchActive && container.keeperPairOverload) {\n container.result = '迁移备keeper';\n }\n });\n $scope.tableParams = new NgTableParams({\n page: 1,\n count: 10,\n }, {\n filterDelay: 100,\n counts: [10, 25, 50],\n dataset: $scope.overloadKeeperContainer\n });\n });\n function beginToMigrateOverloadKeeperContainers() {\n $scope.migratingKeeperContainers = $scope.overloadKeeperContainer.filter(function (keeperContainer) {\n return keeperContainer.selected;\n });\n $scope.tableParams = new NgTableParams({}, {});\n $scope.migratingTableParams = new NgTableParams({\n page: 1,\n count: 10,\n }, {\n filterDelay: 100,\n counts: [10, 25, 50],\n dataset: $scope.migratingKeeperContainers\n });\n $scope.operateType = OPERATE_TYPE.MIGRATING;\n KeeperContainerService.beginToMigrateOverloadKeeperContainers.apply(KeeperContainerService, $scope.migratingKeeperContainers)\n .then(function (result) {\n if (result.message == 'success') {\n toastr.success(\"迁移成功\");\n }\n else {\n toastr.error(result.message, \"迁移失败\");\n }\n getOverloadKeeperContainerMigrationProcess();\n $interval.cancel($scope.scheduledWork);\n });\n }\n function getOverloadKeeperContainerMigrationProcess() {\n if ($scope.operateType == OPERATE_TYPE.MIGRATING) {\n KeeperContainerService.getOverloadKeeperContainerMigrationProcess()\n .then(function (result) {\n if (result == null)\n return;\n $scope.migratingKeeperContainers = result;\n $scope.migratingTableParams = new NgTableParams({\n page: 1,\n count: 10,\n }, {\n filterDelay: 100,\n counts: [10, 25, 50],\n dataset: $scope.migratingKeeperContainers\n });\n });\n }\n }\n $scope.scheduledWork = $interval(getOverloadKeeperContainerMigrationProcess, 1000);\n function toggleAll() {\n $scope.selectAll = !$scope.selectAll;\n $scope.overloadKeeperContainer.forEach(function (keeperContainer) {\n keeperContainer.selected = !keeperContainer.selected;\n });\n }\n function isChecked() {\n return $scope.selectAll;\n }\n}\n\n\n//# sourceURL=webpack://XPipe-Console/./scripts/controllers/KeepercontainerOverloadCtl.ts?"); +eval("angular\n .module('index')\n .controller('KeepercontainerOverloadCtl', KeepercontainerOverloadCtl);\nKeepercontainerOverloadCtl.$inject = ['$rootScope', '$scope', '$window', '$stateParams', 'KeeperContainerService',\n 'toastr', 'NgTableParams', '$interval'];\nfunction KeepercontainerOverloadCtl($rootScope, $scope, $window, $stateParams, KeeperContainerService, toastr, NgTableParams, $interval) {\n $scope.overloadKeeperContainer = [];\n $scope.tableParams = new NgTableParams({}, {});\n $scope.migratingTableParams = new NgTableParams({}, {});\n $scope.selectAll = false;\n $scope.toggleAll = toggleAll;\n $scope.isChecked = isChecked;\n var OPERATE_TYPE = {\n DETAIL: 'detail',\n MIGRATING: 'migrating',\n STOPPED: 'stopped'\n };\n $scope.operateType = $stateParams.type;\n $scope.migratingKeeperContainers = [];\n $scope.scheduledWork;\n $scope.beginToMigrateOverloadKeeperContainers = beginToMigrateOverloadKeeperContainers;\n KeeperContainerService.getAllOverloadKeepercontainer()\n .then(function (result) {\n if (Array.isArray(result))\n $scope.overloadKeeperContainer = result;\n $scope.overloadKeeperContainer.forEach(function (container) {\n switch (container.cause) {\n case 'BOTH':\n container.cause = '数据量和流量超载';\n break;\n case 'PEER_DATA_OVERLOAD':\n container.cause = '数据量超载';\n break;\n case 'INPUT_FLOW_OVERLOAD':\n container.cause = '流量超载';\n break;\n case 'RESOURCE_LACK':\n container.cause = '资源不足';\n break;\n case 'PAIR_RESOURCE_LACK':\n container.cause = '资源不足(keeper对)';\n break;\n case 'KEEPER_PAIR_BOTH':\n case 'KEEPER_PAIR_PEER_DATA_OVERLOAD':\n case 'KEEPER_PAIR_INPUT_FLOW_OVERLOAD':\n container.cause = 'keeper对超载';\n break;\n }\n if (container.cause == '资源不足' || container.cause == '资源不足(keeper对)') {\n container.result = '';\n }\n else if (!container.switchActive && !container.keeperPairOverload) {\n container.result = '迁移主keeper';\n }\n else if (container.switchActive && !container.keeperPairOverload) {\n container.result = '主备切换';\n }\n else if (!container.switchActive && container.keeperPairOverload) {\n container.result = '迁移备keeper';\n }\n if (container.updateTime != null) {\n container.time = container.updateTime.substring(0, 19).replace(\"T\", \" \");\n }\n });\n $scope.tableParams = new NgTableParams({\n page: 1,\n count: 10,\n }, {\n filterDelay: 100,\n counts: [10, 25, 50],\n dataset: $scope.overloadKeeperContainer\n });\n });\n function beginToMigrateOverloadKeeperContainers() {\n $scope.migratingKeeperContainers = $scope.overloadKeeperContainer.filter(function (keeperContainer) {\n return keeperContainer.selected;\n });\n $scope.tableParams = new NgTableParams({}, {});\n $scope.migratingTableParams = new NgTableParams({\n page: 1,\n count: 10,\n }, {\n filterDelay: 100,\n counts: [10, 25, 50],\n dataset: $scope.migratingKeeperContainers\n });\n $scope.operateType = OPERATE_TYPE.MIGRATING;\n KeeperContainerService.beginToMigrateOverloadKeeperContainers.apply(KeeperContainerService, $scope.migratingKeeperContainers)\n .then(function (result) {\n if (result.message == 'success') {\n toastr.success(\"迁移成功\");\n }\n else {\n toastr.error(result.message, \"迁移失败\");\n }\n getOverloadKeeperContainerMigrationProcess();\n $interval.cancel($scope.scheduledWork);\n });\n }\n function getOverloadKeeperContainerMigrationProcess() {\n if ($scope.operateType == OPERATE_TYPE.MIGRATING) {\n KeeperContainerService.getOverloadKeeperContainerMigrationProcess()\n .then(function (result) {\n if (result == null)\n return;\n $scope.migratingKeeperContainers = result;\n $scope.migratingTableParams = new NgTableParams({\n page: 1,\n count: 10,\n }, {\n filterDelay: 100,\n counts: [10, 25, 50],\n dataset: $scope.migratingKeeperContainers\n });\n });\n }\n }\n $scope.scheduledWork = $interval(getOverloadKeeperContainerMigrationProcess, 1000);\n function toggleAll() {\n $scope.selectAll = !$scope.selectAll;\n $scope.overloadKeeperContainer.forEach(function (keeperContainer) {\n keeperContainer.selected = !keeperContainer.selected;\n });\n }\n function isChecked() {\n return $scope.selectAll;\n }\n}\n\n\n//# sourceURL=webpack://XPipe-Console/./scripts/controllers/KeepercontainerOverloadCtl.ts?"); /***/ }), @@ -9193,7 +9193,7 @@ eval("angular\n .module('index')\n .controller('KeepercontainerOverloadCtl \***********************************************************/ /***/ (() => { -eval("angular\n .module('index')\n .controller('KeepercontainerUsedInfoCtl', KeepercontainerUsedInfoCtl);\nfunction KeepercontainerUsedInfoCtl($rootScope, $scope, $window, $stateParams, KeeperContainerService, toastr, NgTableParams, $interval) {\n $scope.originData = [];\n $scope.tableParams = new NgTableParams({}, {});\n KeeperContainerService.getAllKeepercontainerUsedInfo().then(function (response) {\n console.log(response);\n if (Array.isArray(response)) {\n $scope.originData = response;\n KeeperContainerService.getAllInfos().then(function (anotherResponse) {\n if (Array.isArray(anotherResponse)) {\n Promise.all([response, anotherResponse]).then(function (responses) {\n var originData = responses[0];\n var anotherData = responses[1];\n originData.forEach(function (row) {\n var matchingData = anotherData.find(function (d) {\n return d.addr.host === row.keeperIp;\n });\n if (matchingData) {\n Object.assign(row, matchingData);\n }\n row.activeRedisUsedMemoryPercentage = $scope.getActivePercentage(row.activeRedisUsedMemory, row.totalRedisUsedMemory) * 100;\n row.activeRedisUsedMemoryStandardPercentage = $scope.getActivePercentage(row.activeRedisUsedMemory, row.redisUsedMemoryStandard) * 100;\n row.activeInputFlowPercentage = $scope.getActivePercentage(row.activeInputFlow, row.totalInputFlow) * 100;\n row.activeInputFlowStandardPercentage = $scope.getActivePercentage(row.activeInputFlow, row.inputFlowStandard) * 100;\n });\n });\n }\n });\n $scope.tableParams = new NgTableParams({\n page: 1,\n count: 10,\n }, {\n filterDelay: 100,\n counts: [10, 25, 50],\n dataset: $scope.originData\n });\n }\n KeeperContainerService.getKeepercontainerFullSynchronizationTime().then(function (response) {\n $scope.fullSynchronizationTime = response.message;\n });\n });\n $scope.getKeepercontainerFullSynchronizationTime = function () {\n return $scope.fullSynchronizationTime;\n };\n $scope.getActivePercentage = function (active, total) {\n if (total === 0) {\n return 0;\n }\n return active / total;\n };\n}\n\n\n//# sourceURL=webpack://XPipe-Console/./scripts/controllers/KeepercontainerUsedInfoCtl.ts?"); +eval("angular\n .module('index')\n .controller('KeepercontainerUsedInfoCtl', KeepercontainerUsedInfoCtl);\nfunction KeepercontainerUsedInfoCtl($rootScope, $scope, $window, $stateParams, KeeperContainerService, toastr, NgTableParams, $interval) {\n $scope.originData = [];\n $scope.tableParams = new NgTableParams({}, {});\n KeeperContainerService.getAllKeepercontainerUsedInfo().then(function (response) {\n console.log(response);\n if (Array.isArray(response)) {\n $scope.originData = response;\n KeeperContainerService.getAllInfos().then(function (anotherResponse) {\n if (Array.isArray(anotherResponse)) {\n Promise.all([response, anotherResponse]).then(function (responses) {\n var originData = responses[0];\n var anotherData = responses[1];\n originData.forEach(function (row) {\n var matchingData = anotherData.find(function (d) {\n return d.addr.host === row.keeperIp;\n });\n if (matchingData) {\n Object.assign(row, matchingData);\n }\n row.activeRedisUsedMemoryPercentage = $scope.getActivePercentage(row.activeRedisUsedMemory, row.totalRedisUsedMemory) * 100;\n row.activeRedisUsedMemoryStandardPercentage = $scope.getActivePercentage(row.activeRedisUsedMemory, row.redisUsedMemoryStandard) * 100;\n row.activeInputFlowPercentage = $scope.getActivePercentage(row.activeInputFlow, row.totalInputFlow) * 100;\n row.activeInputFlowStandardPercentage = $scope.getActivePercentage(row.activeInputFlow, row.inputFlowStandard) * 100;\n if ($scope.lastUpdateTime == null || row.updateTime > $scope.lastUpdateTime) {\n $scope.lastUpdateTime = row.updateTime;\n }\n });\n });\n }\n });\n $scope.tableParams = new NgTableParams({\n page: 1,\n count: 10,\n }, {\n filterDelay: 100,\n counts: [10, 25, 50],\n dataset: $scope.originData\n });\n }\n KeeperContainerService.getKeepercontainerFullSynchronizationTime().then(function (response) {\n $scope.fullSynchronizationTime = response.message;\n });\n });\n $scope.getKeepercontainerFullSynchronizationTime = function () {\n return $scope.fullSynchronizationTime;\n };\n $scope.getLastUpdateTime = function () {\n if ($scope.lastUpdateTime == null) {\n return '';\n }\n return $scope.lastUpdateTime.substring(0, 19).replace(\"T\", \" \");\n };\n $scope.getActivePercentage = function (active, total) {\n if (total === 0) {\n return 0;\n }\n return active / total;\n };\n}\n\n\n//# sourceURL=webpack://XPipe-Console/./scripts/controllers/KeepercontainerUsedInfoCtl.ts?"); /***/ }), diff --git a/redis/redis-console/src/main/resources/static/scripts/controllers/KeepercontainerOverallCtl.ts b/redis/redis-console/src/main/resources/static/scripts/controllers/KeepercontainerOverallCtl.ts index 814925f08..db046d2f9 100644 --- a/redis/redis-console/src/main/resources/static/scripts/controllers/KeepercontainerOverallCtl.ts +++ b/redis/redis-console/src/main/resources/static/scripts/controllers/KeepercontainerOverallCtl.ts @@ -11,6 +11,7 @@ function KeepercontainerOverallCtl($rootScope, $scope, $window, $stateParams, Ke if (Array.isArray(response)) { response.forEach(function (keeperContainerInfo) { if (keeperContainerInfo.keeperIp === $stateParams.keepercontainerIp) { + $scope.updateTime = keeperContainerInfo.updateTime; $scope.originData = Object.entries(keeperContainerInfo.detailInfo).map(function(item) { var key = item[0].split(':'); return { @@ -38,11 +39,18 @@ function KeepercontainerOverallCtl($rootScope, $scope, $window, $stateParams, Ke } }) - $scope.getIp = function () { + $scope.getIpDC = function () { return $stateParams.keepercontainerIp; } $scope.getDc = function () { return $scope.originData[0].key.dcName; } + + $scope.getTime = function (){ + if ($scope.updateTime == null) { + return ''; + } + return $scope.updateTime.substring(0, 19).replace("T", " "); + } } \ No newline at end of file diff --git a/redis/redis-console/src/main/resources/static/scripts/controllers/KeepercontainerOverloadCtl.ts b/redis/redis-console/src/main/resources/static/scripts/controllers/KeepercontainerOverloadCtl.ts index 3177608f8..af728017b 100644 --- a/redis/redis-console/src/main/resources/static/scripts/controllers/KeepercontainerOverloadCtl.ts +++ b/redis/redis-console/src/main/resources/static/scripts/controllers/KeepercontainerOverloadCtl.ts @@ -39,19 +39,30 @@ function KeepercontainerOverloadCtl($rootScope, $scope, $window, $stateParams, K case 'INPUT_FLOW_OVERLOAD': container.cause = '流量超载'; break; + case 'RESOURCE_LACK': + container.cause = '资源不足'; + break; + case 'PAIR_RESOURCE_LACK': + container.cause = '资源不足(keeper对)'; + break; case 'KEEPER_PAIR_BOTH': case 'KEEPER_PAIR_PEER_DATA_OVERLOAD': case 'KEEPER_PAIR_INPUT_FLOW_OVERLOAD': container.cause = 'keeper对超载'; break; } - if (!container.switchActive && !container.keeperPairOverload) { + if (container.cause == '资源不足' || container.cause == '资源不足(keeper对)') { + container.result = '' + }else if (!container.switchActive && !container.keeperPairOverload) { container.result = '迁移主keeper' } else if (container.switchActive && !container.keeperPairOverload) { container.result = '主备切换' } else if (!container.switchActive && container.keeperPairOverload) { container.result = '迁移备keeper' } + if (container.updateTime != null) { + container.time = container.updateTime.substring(0, 19).replace("T", " "); + } }); $scope.tableParams = new NgTableParams({ page : 1, diff --git a/redis/redis-console/src/main/resources/static/scripts/controllers/KeepercontainerUsedInfoCtl.ts b/redis/redis-console/src/main/resources/static/scripts/controllers/KeepercontainerUsedInfoCtl.ts index 2d23c98f2..c1d4ebce2 100644 --- a/redis/redis-console/src/main/resources/static/scripts/controllers/KeepercontainerUsedInfoCtl.ts +++ b/redis/redis-console/src/main/resources/static/scripts/controllers/KeepercontainerUsedInfoCtl.ts @@ -29,6 +29,9 @@ function KeepercontainerUsedInfoCtl($rootScope, $scope, $window, $stateParams, K row.activeRedisUsedMemoryStandardPercentage = $scope.getActivePercentage(row.activeRedisUsedMemory, row.redisUsedMemoryStandard)*100; row.activeInputFlowPercentage = $scope.getActivePercentage(row.activeInputFlow, row.totalInputFlow)*100; row.activeInputFlowStandardPercentage = $scope.getActivePercentage(row.activeInputFlow, row.inputFlowStandard)*100; + if ($scope.lastUpdateTime == null || row.updateTime > $scope.lastUpdateTime) { + $scope.lastUpdateTime = row.updateTime; + } }); }); } @@ -51,6 +54,13 @@ function KeepercontainerUsedInfoCtl($rootScope, $scope, $window, $stateParams, K return $scope.fullSynchronizationTime; } + $scope.getLastUpdateTime = function () { + if ($scope.lastUpdateTime == null) { + return ''; + } + return $scope.lastUpdateTime.substring(0, 19).replace("T", " "); + } + $scope.getActivePercentage = function (active, total) { if (total === 0) { return 0; diff --git a/redis/redis-console/src/main/resources/static/views/index/keepercontainer_overall.html b/redis/redis-console/src/main/resources/static/views/index/keepercontainer_overall.html index 9f73add5b..f42509a44 100644 --- a/redis/redis-console/src/main/resources/static/views/index/keepercontainer_overall.html +++ b/redis/redis-console/src/main/resources/static/views/index/keepercontainer_overall.html @@ -3,7 +3,7 @@
- KeeperContainerOverall:{{getIp()}} {{getDc()}} + KeeperContainerOverall:{{getIp()}} {{getDc()}} 更新时间:{{getTime()}}
diff --git a/redis/redis-console/src/main/resources/static/views/index/keepercontainer_overload.html b/redis/redis-console/src/main/resources/static/views/index/keepercontainer_overload.html index fd5d814dd..d6e059fdc 100644 --- a/redis/redis-console/src/main/resources/static/views/index/keepercontainer_overload.html +++ b/redis/redis-console/src/main/resources/static/views/index/keepercontainer_overload.html @@ -31,7 +31,7 @@

- {{info.srcKeeperContainer.keeperIp}} + {{info.srcOverLoadKeeperPairIp == null ? info.srcKeeperContainer.keeperIp : info.srcKeeperContainer.keeperIp + " <=> (" + info.srcOverLoadKeeperPairIp + ")"}} {{info.srcKeeperContainer.dcName}} {{info.srcKeeperContainer.activeRedisUsedMemory / 1024 | number:2}} {{info.srcKeeperContainer.activeInputFlow}} @@ -41,6 +41,7 @@

{{info.targetKeeperContainer.activeInputFlow}} {{info.cause}} {{info.result}} + {{info.time}}

diff --git a/redis/redis-console/src/main/resources/static/views/index/keepercontainer_usedinfo.html b/redis/redis-console/src/main/resources/static/views/index/keepercontainer_usedinfo.html index 862d0379f..ad37efcdd 100644 --- a/redis/redis-console/src/main/resources/static/views/index/keepercontainer_usedinfo.html +++ b/redis/redis-console/src/main/resources/static/views/index/keepercontainer_usedinfo.html @@ -4,6 +4,7 @@
KeeperContainerUsedInfo 预计全量同步时间:{{getKeepercontainerFullSynchronizationTime()}} 分钟 + 最后更新时间:{{getLastUpdateTime()}}
diff --git a/redis/redis-console/src/test/java/com/ctrip/xpipe/redis/console/keeper/impl/DefaultKeeperUsedInfoAnalyzerTest.java b/redis/redis-console/src/test/java/com/ctrip/xpipe/redis/console/keeper/impl/DefaultKeeperUsedInfoAnalyzerTest.java index 9f525748d..38bad77d5 100644 --- a/redis/redis-console/src/test/java/com/ctrip/xpipe/redis/console/keeper/impl/DefaultKeeperUsedInfoAnalyzerTest.java +++ b/redis/redis-console/src/test/java/com/ctrip/xpipe/redis/console/keeper/impl/DefaultKeeperUsedInfoAnalyzerTest.java @@ -6,11 +6,11 @@ import com.ctrip.xpipe.redis.console.config.impl.DefaultConsoleConfig; import com.ctrip.xpipe.redis.console.keeper.handler.KeeperContainerFilterChain; import com.ctrip.xpipe.redis.console.model.*; -import com.ctrip.xpipe.redis.console.service.ConfigService; -import com.ctrip.xpipe.redis.console.service.KeeperContainerAnalyzerService; -import com.ctrip.xpipe.redis.console.service.KeeperContainerService; -import com.ctrip.xpipe.redis.console.service.OrganizationService; +import com.ctrip.xpipe.redis.console.resources.DefaultMetaCache; +import com.ctrip.xpipe.redis.console.service.*; import com.ctrip.xpipe.redis.console.service.impl.DefaultKeeperContainerAnalyzerService; +import com.ctrip.xpipe.redis.core.entity.*; +import com.ctrip.xpipe.redis.core.meta.MetaCache; import com.google.common.collect.Maps; import org.junit.Assert; import org.junit.Before; @@ -40,35 +40,43 @@ public class DefaultKeeperUsedInfoAnalyzerTest { @InjectMocks private DefaultKeeperContainerUsedInfoAnalyzer analyzer; + @InjectMocks + private DefaultKeeperContainerMigrationAnalyzer migrationAnalyzer; @Mock private ConsoleConfig config; @Mock private ThreadPoolExecutor executor; @Mock - private FoundationService service; - @Mock private ConfigService configService; @Mock private KeeperContainerService keeperContainerService; @Mock private OrganizationService organizationService; + @Mock + private AzService azService; + @Mock + private MetaCache metaCache; private final KeeperContainerFilterChain filterChain = new KeeperContainerFilterChain(); public static final int expireTime = 1000; public static final String DC = "jq"; - public static final String IP1 = "1.1.1.1", IP2 = "2.2.2.2", IP3 = "3.3.3.3", IP4 = "4.4.4.4", IP5 = "5.5.5.5"; + public static final String IP1 = "1.1.1.1", IP2 = "2.2.2.2", IP3 = "3.3.3.3", IP4 = "4.4.4.4", IP5 = "5.5.5.5", IP6 = "6.6.6.6"; public static final String Cluster1 = "cluster1", Cluster2 = "cluster2", Cluster3 = "cluster3", Cluster4 = "cluster4", Cluster5 = "cluster5"; public static final String Shard1 = "shard1", Shard2 = "shard2", Shard3 = "shard3"; @Before public void before() { + Mockito.when(metaCache.getXpipeMeta()).thenReturn(new XpipeMeta()); + migrationAnalyzer.setMetaCache(metaCache); analyzer.setExecutors(executor); //Disabling activeKeeper/backupKeeper Switch filterChain.setConfig(config); - analyzer.setKeeperContainerFilterChain(filterChain); + migrationAnalyzer.setFilterChain(filterChain); DefaultKeeperContainerAnalyzerService keeperContainerAnalyzerService = new DefaultKeeperContainerAnalyzerService(); keeperContainerAnalyzerService.setConfigService(configService); keeperContainerAnalyzerService.setKeeperContainerService(keeperContainerService); keeperContainerAnalyzerService.setOrganizationService(organizationService); + Mockito.when(azService.getAvailableZoneTblById(Mockito.anyLong())).thenReturn(new AzTbl().setAzName("")); + keeperContainerAnalyzerService.setAzService(azService); List configModels = new ArrayList<>(); ConfigModel configModel = new ConfigModel(); ConfigModel configModel1 = new ConfigModel(); @@ -78,8 +86,7 @@ public void before() { configModels.add(configModel1); Mockito.when(configService.getConfigs(KEY_KEEPER_CONTAINER_STANDARD)).thenReturn(configModels); Mockito.when(keeperContainerService.find(Mockito.anyString())).thenReturn(new KeepercontainerTbl().setKeepercontainerActive(true)); - Mockito.when(organizationService.getOrganizationTblByCMSOrganiztionId(Mockito.anyLong())).thenReturn(new OrganizationTbl().setOrgName("org")); - analyzer.setKeeperContainerAnalyzerService(keeperContainerAnalyzerService); + migrationAnalyzer.setKeeperContainerAnalyzerService(keeperContainerAnalyzerService); Mockito.when(config.getClusterDividedParts()).thenReturn(2); Mockito.when(config.getKeeperCheckerIntervalMilli()).thenReturn(expireTime); Mockito.when(config.getKeeperPairOverLoadFactor()).thenReturn(5.0); @@ -96,6 +103,14 @@ public KeeperContainerUsedInfoModel createKeeperContainer(Map models, String keeperIp, long activeInputFlow, long activeRedisUsedMemory, String az, String org){ + KeeperContainerUsedInfoModel model = new KeeperContainerUsedInfoModel(keeperIp, DC, activeInputFlow, activeRedisUsedMemory); + model.setDiskAvailable(true).setDiskUsed(70).setDiskSize(100); + model.setAz(az).setOrg(org); + models.put(keeperIp, model); + return model; + } + @Test public void testUpdateKeeperContainerUsedInfo() { //To prevent a second updateKeeperContainerUsedInfo() data when expired @@ -166,9 +181,7 @@ public void testGetAllDcReadyToMigrationKeeperContainersWithBoth() { .createKeeper(Cluster1, Shard1, false, 4, 4) .createKeeper(Cluster2, Shard2, false, 6, 6); - analyzer.getCurrentDcKeeperContainerUsedInfoModelsMap().putAll(models); - analyzer.analyzeKeeperContainerUsedInfo(); - List allDcReadyToMigrationKeeperContainers = analyzer.getCurrentDcReadyToMigrationKeeperContainers(); + List allDcReadyToMigrationKeeperContainers = migrationAnalyzer.getMigrationPlans(models); Assert.assertEquals(2, allDcReadyToMigrationKeeperContainers.size()); Assert.assertEquals(IP2, allDcReadyToMigrationKeeperContainers.get(0).getSrcKeeperContainer().getKeeperIp()); Assert.assertEquals(IP4, allDcReadyToMigrationKeeperContainers.get(0).getTargetKeeperContainer().getKeeperIp()); @@ -205,10 +218,7 @@ public void testMultiSrcKeeperSingleTargetWithBoth() { createKeeperContainer(models, IP5, 0, 0); - analyzer.getCurrentDcKeeperContainerUsedInfoModelsMap().putAll(models); - analyzer.analyzeKeeperContainerUsedInfo(); - - List allDcReadyToMigrationKeeperContainers = analyzer.getCurrentDcReadyToMigrationKeeperContainers(); + List allDcReadyToMigrationKeeperContainers = migrationAnalyzer.getMigrationPlans(models); Assert.assertEquals(2, allDcReadyToMigrationKeeperContainers.size()); } @@ -242,10 +252,7 @@ public void testSingleSrcKeeperMultiTargetWithBoth() { .createKeeper(Cluster4, Shard1, false, 5, 5) .createKeeper(Cluster4, Shard2, true, 10, 10); - analyzer.getCurrentDcKeeperContainerUsedInfoModelsMap().putAll(models); - analyzer.analyzeKeeperContainerUsedInfo(); - - List allDcReadyToMigrationKeeperContainers = analyzer.getCurrentDcReadyToMigrationKeeperContainers(); + List allDcReadyToMigrationKeeperContainers = migrationAnalyzer.getMigrationPlans(models); Assert.assertEquals(2, allDcReadyToMigrationKeeperContainers.size()); Assert.assertEquals(IP3, allDcReadyToMigrationKeeperContainers.get(0).getTargetKeeperContainer().getKeeperIp()); Assert.assertEquals(2, allDcReadyToMigrationKeeperContainers.get(0).getMigrateKeeperCount()); @@ -255,31 +262,29 @@ public void testSingleSrcKeeperMultiTargetWithBoth() { @Test public void testKeeperResourceLackWithBoth() { - Map models1 = new HashMap<>(); - createKeeperContainer(models1, IP1, 20, 20) + Map models = new HashMap<>(); + createKeeperContainer(models, IP1, 20, 20) .createKeeper(Cluster1, Shard1, true, 10, 10) .createKeeper(Cluster1, Shard2, true, 10, 10) .createKeeper(Cluster2, Shard1, false, 10, 10) .createKeeper(Cluster2, Shard2, false, 10, 10); - createKeeperContainer(models1, IP2, 20, 20) + createKeeperContainer(models, IP2, 20, 20) .createKeeper(Cluster2, Shard1, true, 10, 10) .createKeeper(Cluster2, Shard2, true, 10, 10) .createKeeper(Cluster1, Shard1, false, 10, 10) .createKeeper(Cluster1, Shard2, false, 10, 10);; - createKeeperContainer(models1, IP3, 0, 0); + createKeeperContainer(models, IP3, 0, 0); - analyzer.getCurrentDcKeeperContainerUsedInfoModelsMap().putAll(models1); - analyzer.analyzeKeeperContainerUsedInfo(); - List allDcReadyToMigrationKeeperContainers = analyzer.getCurrentDcReadyToMigrationKeeperContainers(); - Assert.assertEquals(1, allDcReadyToMigrationKeeperContainers.size()); + List allDcReadyToMigrationKeeperContainers = migrationAnalyzer.getMigrationPlans(models); + Assert.assertEquals(1, allDcReadyToMigrationKeeperContainers.stream().filter(m -> !m.getCause().endsWith("RESOURCE_LACK")).count()); } @Test public void testGetAllDcReadyToMigrationKeeperContainersWithPeerDataOverLoad() { filterChain.setConfig(config); - Mockito.when(config.getKeeperPairOverLoadFactor()).thenReturn(2.0); + Mockito.when(config.getKeeperPairOverLoadFactor()).thenReturn(5.0); Map models = new HashMap<>(); createKeeperContainer(models, IP1, 4, 23) .createKeeper(Cluster1, Shard1, true, 1, 6) @@ -309,10 +314,8 @@ public void testGetAllDcReadyToMigrationKeeperContainersWithPeerDataOverLoad() { .createKeeper(Cluster5, Shard2, true, 1, 2) .createKeeper(Cluster5, Shard1, false, 1, 2); - analyzer.getCurrentDcKeeperContainerUsedInfoModelsMap().putAll(models); - analyzer.analyzeKeeperContainerUsedInfo(); - List allDcReadyToMigrationKeeperContainers = analyzer.getCurrentDcReadyToMigrationKeeperContainers(); - Assert.assertEquals(2, allDcReadyToMigrationKeeperContainers.stream().filter(container -> !container.isKeeperPairOverload()).count()); + List allDcReadyToMigrationKeeperContainers = migrationAnalyzer.getMigrationPlans(models); + Assert.assertEquals(4, allDcReadyToMigrationKeeperContainers.stream().filter(container -> !container.isKeeperPairOverload()).count()); } @Test @@ -340,10 +343,7 @@ public void testMultiSrcKeeperSingleTargetWithPeerDataOverLoad() { createKeeperContainer(models, IP3, 0, 0); - analyzer.getCurrentDcKeeperContainerUsedInfoModelsMap().putAll(models); - analyzer.analyzeKeeperContainerUsedInfo(); - - List allDcReadyToMigrationKeeperContainers = analyzer.getCurrentDcReadyToMigrationKeeperContainers(); + List allDcReadyToMigrationKeeperContainers = migrationAnalyzer.getMigrationPlans(models); Assert.assertEquals(2, allDcReadyToMigrationKeeperContainers.stream().filter(container -> !container.isKeeperPairOverload()).count()); } @@ -371,10 +371,7 @@ public void testSingleSrcKeeperMultiTargetWithPeerDataOverLoad() { createKeeperContainer(models, IP3,0,0); createKeeperContainer(models, IP4,0,0); - analyzer.getCurrentDcKeeperContainerUsedInfoModelsMap().putAll(models); - analyzer.analyzeKeeperContainerUsedInfo(); - - List allDcReadyToMigrationKeeperContainers = analyzer.getCurrentDcReadyToMigrationKeeperContainers(); + List allDcReadyToMigrationKeeperContainers = migrationAnalyzer.getMigrationPlans(models); Assert.assertEquals(2, allDcReadyToMigrationKeeperContainers.size()); Assert.assertEquals(2, allDcReadyToMigrationKeeperContainers.get(0).getMigrateKeeperCount()); Assert.assertEquals(1, allDcReadyToMigrationKeeperContainers.get(1).getMigrateKeeperCount()); @@ -383,7 +380,7 @@ public void testSingleSrcKeeperMultiTargetWithPeerDataOverLoad() { @Test public void testKeeperResourceLackWithPeerDataOverLoad() { filterChain.setConfig(config); - Mockito.when(config.getKeeperPairOverLoadFactor()).thenReturn(1.0); + Mockito.when(config.getKeeperPairOverLoadFactor()).thenReturn(5.0); Map models = new HashMap<>(); createKeeperContainer(models, IP1, 6, 32) .createKeeper(Cluster1, Shard1, true, 1, 5) @@ -403,9 +400,7 @@ public void testKeeperResourceLackWithPeerDataOverLoad() { createKeeperContainer(models, IP3,0,0); - analyzer.getCurrentDcKeeperContainerUsedInfoModelsMap().putAll(models); - analyzer.analyzeKeeperContainerUsedInfo(); - List allDcReadyToMigrationKeeperContainers = analyzer.getCurrentDcReadyToMigrationKeeperContainers(); + List allDcReadyToMigrationKeeperContainers = migrationAnalyzer.getMigrationPlans(models); Assert.assertEquals(2, allDcReadyToMigrationKeeperContainers.size()); } @@ -444,9 +439,9 @@ public void testGetAllDcReadyToMigrationKeeperContainersWithMixed() { .createKeeper(Cluster5, Shard2, true, 9, 9) .createKeeper(Cluster5, Shard1, false, 9, 9); - analyzer.getCurrentDcKeeperContainerUsedInfoModelsMap().putAll(models); - analyzer.analyzeKeeperContainerUsedInfo(); - List allDcReadyToMigrationKeeperContainers = analyzer.getCurrentDcReadyToMigrationKeeperContainers(); + createKeeperContainer(models, IP5, 0, 0); + + List allDcReadyToMigrationKeeperContainers = migrationAnalyzer.getMigrationPlans(models); Assert.assertEquals(3, allDcReadyToMigrationKeeperContainers.stream().filter(MigrationKeeperContainerDetailModel::isKeeperPairOverload).count()); } @@ -476,17 +471,14 @@ public void testMultiSrcKeeperSingleTargetWithMixed() { createKeeperContainer(models, IP3, 0, 0); - analyzer.getCurrentDcKeeperContainerUsedInfoModelsMap().putAll(models); - analyzer.analyzeKeeperContainerUsedInfo(); - - List allDcReadyToMigrationKeeperContainers = analyzer.getCurrentDcReadyToMigrationKeeperContainers(); + List allDcReadyToMigrationKeeperContainers = migrationAnalyzer.getMigrationPlans(models); Assert.assertEquals(2, allDcReadyToMigrationKeeperContainers.stream().filter(container -> !container.isKeeperPairOverload()).count()); } @Test public void testMultiSrcMultiTargetWithFixed() { filterChain.setConfig(config); - Mockito.when(config.getKeeperPairOverLoadFactor()).thenReturn(1.0); + Mockito.when(config.getKeeperPairOverLoadFactor()).thenReturn(5.0); Map models = new HashMap<>(); createKeeperContainer(models, IP1, 4, 19) .createKeeper(Cluster1, Shard1, true, 1, 8) @@ -512,15 +504,89 @@ public void testMultiSrcMultiTargetWithFixed() { .createKeeper(Cluster5, Shard1, true, 8, 1) .createKeeper(Cluster5, Shard2, false, 8, 1); - createKeeperContainer(models, IP4, 1, 8) - .createKeeper(Cluster5, Shard2, true, 1, 8) + createKeeperContainer(models, IP4, 8, 1) + .createKeeper(Cluster5, Shard2, true, 8, 1) .createKeeper(Cluster5, Shard1, false, 8, 1); - analyzer.getCurrentDcKeeperContainerUsedInfoModelsMap().putAll(models); - analyzer.analyzeKeeperContainerUsedInfo(); - - List allDcReadyToMigrationKeeperContainers = analyzer.getCurrentDcReadyToMigrationKeeperContainers(); + List allDcReadyToMigrationKeeperContainers = migrationAnalyzer.getMigrationPlans(models); Assert.assertEquals(2, allDcReadyToMigrationKeeperContainers.stream().filter(container -> !container.isKeeperPairOverload()).count()); } + @Test + public void testKeeperContainerAzAndOrg() { + filterChain.setConfig(config); + Mockito.when(config.getKeeperPairOverLoadFactor()).thenReturn(5.0); + Mockito.when(azService.getAvailableZoneTblById(Mockito.anyLong())).thenReturn(null); + Map models = new HashMap<>(); + createKeeperContainer(models, IP1, 17, 17, "PTJQ-A", "kj") + .createKeeper(Cluster1, Shard1, true, 5, 5) + .createKeeper(Cluster1, Shard2, true, 5, 5) + .createKeeper(Cluster2, Shard1, true, 3, 3) + .createKeeper(Cluster2, Shard2, true, 4, 4); + + createKeeperContainer(models, IP2, 0, 0, "PTJQ-B", "kj") + .createKeeper(Cluster1, Shard1, false, 5, 5) + .createKeeper(Cluster1, Shard2, false, 5, 5); + + createKeeperContainer(models, IP3, 0, 0, "PTJQ-B", "kj") + .createKeeper(Cluster2, Shard1, false, 3, 3) + .createKeeper(Cluster2, Shard2, false, 4, 4); + + createKeeperContainer(models, IP4, 0, 0, "PTJQ-C", "kj"); + createKeeperContainer(models, IP5, 0, 0, "PTJQ-A", "kj"); + createKeeperContainer(models, IP6, 0, 0, "PTJQ-A", "jp"); + List allDcReadyToMigrationKeeperContainers = migrationAnalyzer.getMigrationPlans(models); + Assert.assertEquals(IP5, allDcReadyToMigrationKeeperContainers.get(0).getTargetKeeperContainer().getKeeperIp()); + + } + + @Test + public void testKeeperContainersNoBackupKeeper() { + XpipeMeta meta = new XpipeMeta(); + DcMeta dcMeta = new DcMeta().setId(FoundationService.DEFAULT.getDataCenter().toUpperCase()); + ClusterMeta clusterMeta1 = new ClusterMeta().setId(Cluster2); + ShardMeta shardMeta1 = new ShardMeta().setId(Shard2); + shardMeta1.addKeeper(new KeeperMeta().setIp(IP1)); + shardMeta1.addKeeper(new KeeperMeta().setIp(IP2)); + clusterMeta1.addShard(shardMeta1); + dcMeta.addCluster(clusterMeta1); + ClusterMeta clusterMeta2 = new ClusterMeta().setId(Cluster5); + ShardMeta shardMeta2 = new ShardMeta().setId(Shard1); + shardMeta2.addKeeper(new KeeperMeta().setIp(IP1)); + shardMeta2.addKeeper(new KeeperMeta().setIp(IP3)); + clusterMeta2.addShard(shardMeta2); + dcMeta.addCluster(clusterMeta2); + meta.addDc(dcMeta); + Mockito.when(metaCache.getXpipeMeta()).thenReturn(meta); + filterChain.setConfig(config); + Mockito.when(config.getKeeperPairOverLoadFactor()).thenReturn(5.0); + Map models = new HashMap<>(); + createKeeperContainer(models, IP1, 42, 42) + .createKeeper(Cluster1, Shard1, true, 14, 14) + .createKeeper(Cluster1, Shard2, true, 14, 14) + .createKeeper(Cluster2, Shard1, true, 14, 14) + .createKeeper(Cluster2, Shard2, true, 0, 0) + .createKeeper(Cluster5, Shard1, true, 0, 0); + + createKeeperContainer(models, IP2, 1, 1) + .createKeeper(Cluster1, Shard1, false, 14, 14) + .createKeeper(Cluster1, Shard2, false, 14, 14) + .createKeeper(Cluster2, Shard1, false, 14, 14); + + createKeeperContainer(models, IP3, 1, 1); + + createKeeperContainer(models, IP4, 1, 1) + .createKeeper(Cluster5, Shard2, true, 1, 1) + .createKeeper(Cluster5, Shard3, false, 1, 1); + + createKeeperContainer(models, IP5, 1, 1) + .createKeeper(Cluster5, Shard2, false, 1, 1) + .createKeeper(Cluster5, Shard3, true, 1, 1); + + List allDcReadyToMigrationKeeperContainers = migrationAnalyzer.getMigrationPlans(models); + Assert.assertEquals(2, allDcReadyToMigrationKeeperContainers.size()); + Assert.assertNotEquals(IP3, allDcReadyToMigrationKeeperContainers.get(0).getTargetKeeperContainer().getKeeperIp()); + Assert.assertNotEquals(IP3, allDcReadyToMigrationKeeperContainers.get(1).getTargetKeeperContainer().getKeeperIp()); + } + } \ No newline at end of file