Skip to content

Commit

Permalink
前端OverLoadKeeprContainer界面展示
Browse files Browse the repository at this point in the history
  • Loading branch information
yifuzhou committed Dec 21, 2023
1 parent 9046252 commit feb00ed
Show file tree
Hide file tree
Showing 17 changed files with 201 additions and 36 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -10,13 +10,14 @@
@Service
public class DefaultKeeperContainerService extends AbstractService implements KeeperContainerService {

private static final String HTTP_PREFIX = "http://";
private static final String PATH_GET_KEEPER_DISK_INFO = "/keepers/disk";

private static final String DEFAULT_KEEPER_CONTAINER_PORT = "8080";

@Override
public KeeperDiskInfo getKeeperDiskInfo(String keeperContainerIp) throws RestClientException {
return restTemplate.exchange(keeperContainerIp + ":" + DEFAULT_KEEPER_CONTAINER_PORT + PATH_GET_KEEPER_DISK_INFO,
return restTemplate.exchange(HTTP_PREFIX + keeperContainerIp + ":" + DEFAULT_KEEPER_CONTAINER_PORT + PATH_GET_KEEPER_DISK_INFO,
HttpMethod.GET, null, KeeperDiskInfo.class).getBody();
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,9 @@ void reportKeeperContainerInfo() {

}
try {
logger.debug("[get KeeperContainer disk Info] keeperIp: {}", keeperIp);
KeeperDiskInfo keeperDiskInfo = keeperContainerService.getKeeperDiskInfo(keeperIp);
logger.debug("[KeeperContainer disk Info] keeperIp: {} keeperDiskInfo: {}", keeperIp, keeperDiskInfo);
model.setDiskAvailable(keeperDiskInfo.available)
.setDiskSize(keeperDiskInfo.spaceUsageInfo.size)
.setDiskUsed(keeperDiskInfo.spaceUsageInfo.use);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,6 @@ public class KeeperContainerUsedInfoModel {

private String diskType = "default";

private List<String> overLoadCause = new ArrayList<>();

public KeeperContainerUsedInfoModel() {
}

Expand Down Expand Up @@ -68,7 +66,6 @@ public KeeperContainerUsedInfoModel(KeeperContainerUsedInfoModel model, Map.Entr
this.diskSize = model.getDiskSize();
this.diskUsed = model.getDiskUsed();
this.diskType = model.getDiskType();
this.overLoadCause = model.getOverLoadCause();
}

public String getDcName() {
Expand Down Expand Up @@ -170,14 +167,6 @@ public void setDiskType(String diskType) {
this.diskType = diskType;
}

public List<String> getOverLoadCause() {
return overLoadCause;
}

public void setOverLoadCause(List<String> overLoadCause) {
this.overLoadCause = overLoadCause;
}

public int getActiveKeeperCount() {
return activeKeeperCount;
}
Expand Down Expand Up @@ -240,7 +229,6 @@ public String toString() {
", diskSize=" + diskSize +
", diskUsed=" + diskUsed +
", diskType='" + diskType + '\'' +
", overLoadCause=" + overLoadCause +
'}';
}

Expand All @@ -265,6 +253,8 @@ public static class KeeperUsedInfo {

private String keeperIP;

public KeeperUsedInfo() {
}

public KeeperUsedInfo(long peerData, long inputFlow, String keeperIP) {
this.peerData = peerData;
Expand Down
Original file line number Diff line number Diff line change
@@ -1,15 +1,30 @@
package com.ctrip.xpipe.redis.checker.resource;

import com.ctrip.xpipe.redis.checker.AbstractCheckerTest;
import com.ctrip.xpipe.redis.checker.controller.result.RetMessage;
import com.ctrip.xpipe.redis.checker.model.KeeperContainerUsedInfoModel;
import com.ctrip.xpipe.redis.core.console.ConsoleCheckerPath;
import com.ctrip.xpipe.redis.core.entity.XpipeMeta;
import com.fasterxml.jackson.core.JsonProcessingException;
import okhttp3.mockwebserver.MockResponse;
import okhttp3.mockwebserver.MockWebServer;
import okhttp3.mockwebserver.RecordedRequest;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.springframework.http.HttpStatus;
import org.springframework.http.ResponseEntity;
import org.springframework.web.client.RestTemplate;

import java.net.InetAddress;
import java.util.ArrayList;
import java.util.List;

import static org.mockito.ArgumentMatchers.*;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;

/**
* @author lishanglin
Expand Down Expand Up @@ -46,6 +61,33 @@ public void testGetXpipeMeta() throws Exception {
Assert.assertEquals("GET", request.getMethod());
}

@Test
public void testReportKeeperContainerInfo() {
RestTemplate restTemplate = new RestTemplate();
// 构造测试数据
List<KeeperContainerUsedInfoModel> keeperContainerUsedInfoModels = new ArrayList<>();
keeperContainerUsedInfoModels.add(new KeeperContainerUsedInfoModel("1.1.1.1", "dc1", 1234, 1));
keeperContainerUsedInfoModels.add(new KeeperContainerUsedInfoModel("2.2.2.2", "dc2", 5678, 2));

// 模拟 RestTemplate.postForEntity() 方法的返回结果
ResponseEntity<RetMessage> responseEntity = new ResponseEntity<>(HttpStatus.OK);
when(restTemplate.postForEntity(
anyString(), any(), eq(RetMessage.class), anyInt())).thenReturn(responseEntity);

// 调用被测试的方法
String console = "http://console-url";
int index = 1;
service.reportKeeperContainerInfo(console, keeperContainerUsedInfoModels, index);

// 验证 RestTemplate.postForEntity() 方法是否被正确调用
verify(restTemplate).postForEntity(
eq(console + ConsoleCheckerPath.PATH_POST_KEEPER_CONTAINER_INFO_RESULT),
eq(keeperContainerUsedInfoModels),
eq(RetMessage.class),
eq(index)
);
}

@Override
protected String getXpipeMetaConfigFile() {
return "dc-meta-test.xml";
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -190,4 +190,6 @@ public interface ConsoleConfig extends CoreConfig, CheckerConfig, AlertConfig {
double getKeeperPairOverLoadFactor();

double getKeeperContainerDiskOverLoadFactor();

double getKeeperContainerIoRate();
}
Original file line number Diff line number Diff line change
Expand Up @@ -126,6 +126,7 @@ public class DefaultConsoleConfig extends AbstractCoreConfig implements ConsoleC
private static final String KEY_CONSOLE_KEEPER_CONTAINER_OVERLOAD_STANDARD_FACTOR = "console.keeper.container.overload.standard.factor";
private static final String KEY_CONSOLE_KEEPER_PAIR_OVERLOAD_FACTOR = "console.keeper.container.pair.overload.standard.factor";
private static final String KEY_CONSOLE_KEEPER_CONTAINER_DISK_OVERLOAD_FACTOR = "console.keeper.container.disk.overload.factor";
private static final String KEY_CONSOLE_KEEPER_CONTAINER_IO_RATE = "console.keeper.container.io.rate";
private static final String KEY_CONSOLE_AUTO_MIGRATE_OVERLOAD_KEEPER_CONTAINER_OPEN = "console.auto.migrate.overload.keeper.container.open";
private static final String KEY_CONSOLE_AUTO_MIGRATE_OVERLOAD_KEEPER_CONTAINER_INTERVAL_MILLI = "console.auto.migrate.overload.keeper.container.interval.milli";

Expand Down Expand Up @@ -802,4 +803,9 @@ public double getKeeperContainerDiskOverLoadFactor() {
return getFloatProperty(KEY_CONSOLE_KEEPER_CONTAINER_DISK_OVERLOAD_FACTOR, 0.8F);
}

@Override
public double getKeeperContainerIoRate() {
return getFloatProperty(KEY_CONSOLE_KEEPER_CONTAINER_IO_RATE, 500F);
}

}
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
package com.ctrip.xpipe.redis.console.controller.consoleportal;

import com.ctrip.xpipe.redis.checker.controller.result.RetMessage;
import com.ctrip.xpipe.redis.checker.model.DcClusterShard;
import com.ctrip.xpipe.redis.checker.model.DcClusterShardActive;
import com.ctrip.xpipe.redis.checker.model.KeeperContainerUsedInfoModel;
import com.ctrip.xpipe.redis.console.controller.AbstractConsoleController;
import com.ctrip.xpipe.redis.console.keeper.KeeperContainerOverloadCause;
import com.ctrip.xpipe.redis.console.keeper.KeeperContainerUsedInfoAnalyzer;
import com.ctrip.xpipe.redis.console.model.KeeperContainerInfoModel;
import com.ctrip.xpipe.redis.console.model.MigrationKeeperContainerDetailModel;
Expand All @@ -12,6 +14,7 @@
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.*;

import java.lang.reflect.Member;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
Expand Down Expand Up @@ -102,4 +105,11 @@ public RetMessage beginToMigrateOverloadKeeperContainers(@RequestBody List<Migra
public List<KeeperContainerUsedInfoModel> getLastedAllReadyMigrateKeeperContainers() {
return analyzer.getAllKeeperContainerUsedInfoModelsList();
}

@RequestMapping(value = "/keepercontainer/max/fullSynchronizationTime", method = RequestMethod.GET)
public int getMaxKeeperContainerFullSynchronizationTime() {
// return analyzer.getMaxKeeperContainerFullSynchronizationTime();
return 40;
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -17,4 +17,6 @@ public interface KeeperContainerUsedInfoAnalyzer {
Map<Integer, List<KeeperContainerUsedInfoModel>> getAllKeeperContainerUsedInfoModels();

List<KeeperContainerUsedInfoModel> getAllKeeperContainerUsedInfoModelsList();

int getMaxKeeperContainerFullSynchronizationTime();
}
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,8 @@ public class DefaultKeeperContainerUsedInfoAnalyzer extends AbstractService impl

private List<MigrationKeeperContainerDetailModel> allDcKeeperContainerDetailModel = new ArrayList<>();

private long maxKeeperContainerActiveRedisUsedMemory;

Map<IPPair, IPPairData> keeperPairUsedInfoMap = new HashMap<>();

Map<String, Map<String ,Map<DcClusterShardActive, KeeperUsedInfo>>> ipPairMap = new HashMap<>();
Expand Down Expand Up @@ -112,6 +114,12 @@ public List<KeeperContainerUsedInfoModel> getAllKeeperContainerUsedInfoModelsLis
return allKeeperContainerUsedInfoModelsList;
}

@Override
public int getMaxKeeperContainerFullSynchronizationTime() {
double keeperContainerIoRate = config.getKeeperContainerIoRate();
return (int) (maxKeeperContainerActiveRedisUsedMemory/1024/1024/keeperContainerIoRate/60);
}

@Override
public synchronized void updateKeeperContainerUsedInfo(int index, List<KeeperContainerUsedInfoModel> keeperContainerUsedInfoModels) {
if (keeperContainerUsedInfoModels != null && !keeperContainerUsedInfoModels.isEmpty()){
Expand Down Expand Up @@ -198,8 +206,12 @@ void analyzeKeeperContainerUsedInfo() {
private Map<DcClusterShardActive, KeeperUsedInfo> analyzeKeeperPair(){
keeperPairUsedInfoMap.clear();
ipPairMap.clear();
maxKeeperContainerActiveRedisUsedMemory = 0;
Map<DcClusterShardActive, KeeperUsedInfo> allDetailInfo = new HashMap<>();
for (KeeperContainerUsedInfoModel infoModel : allKeeperContainerUsedInfoModelsList) {
if (infoModel.getActiveRedisUsedMemory() > maxKeeperContainerActiveRedisUsedMemory) {
maxKeeperContainerActiveRedisUsedMemory = infoModel.getActiveRedisUsedMemory();
}
if (infoModel.getDetailInfo() != null) {
allDetailInfo.putAll(infoModel.getDetailInfo());
}
Expand Down Expand Up @@ -311,14 +323,13 @@ private List<MigrationKeeperContainerDetailModel> getOverloadKeeperMigrationDeta
long overloadPeerData = src.getActiveRedisUsedMemory() - srcStandard.getPeerDataOverload();
KeeperContainerOverloadCause overloadCause = getKeeperContainerOverloadCause(overloadInputFlow, overloadPeerData);
if (overloadCause == null) return null;
src.getOverLoadCause().add(overloadCause.name());

switch (overloadCause) {
case PEER_DATA_OVERLOAD:
return getOverloadKeeperMigrationDetails(src, true, overloadPeerData, srcStandard, standardMap, keeperUsedInfoMap, allDetailInfo, minPeerDataKeeperContainers);
return getOverloadKeeperMigrationDetails(src, true, overloadPeerData, srcStandard, standardMap, keeperUsedInfoMap, allDetailInfo, minPeerDataKeeperContainers, overloadCause);
case INPUT_FLOW_OVERLOAD:
case BOTH:
return getOverloadKeeperMigrationDetails(src, false, overloadInputFlow, srcStandard, standardMap, keeperUsedInfoMap, allDetailInfo, minInputFlowKeeperContainers);
return getOverloadKeeperMigrationDetails(src, false, overloadInputFlow, srcStandard, standardMap, keeperUsedInfoMap, allDetailInfo, minInputFlowKeeperContainers, overloadCause);
default:
logger.warn("invalid keeper container overload cause {}", overloadCause);
return null;
Expand All @@ -331,7 +342,8 @@ private List<MigrationKeeperContainerDetailModel> getOverloadKeeperMigrationDeta
Map<String, KeeperContainerOverloadStandardModel> standardMap,
Map<String, KeeperContainerUsedInfoModel> keeperUsedInfoMap,
Map<DcClusterShardActive, KeeperUsedInfo> allDetailInfo,
PriorityQueue<KeeperContainerUsedInfoModel> availableKeeperContainers) {
PriorityQueue<KeeperContainerUsedInfoModel> availableKeeperContainers,
KeeperContainerOverloadCause overloadCause) {
List<Map.Entry<DcClusterShardActive, KeeperUsedInfo>> allDcClusterShards;
if (isPeerDataOverload) {
allDcClusterShards = src.getDetailInfo().entrySet().stream()
Expand All @@ -357,7 +369,7 @@ private List<MigrationKeeperContainerDetailModel> getOverloadKeeperMigrationDeta
if (keeperContainerFilterChain.doKeeperContainerFilter(backUpKeeperUsedInfoModel, backUpKeeperStandard) &&
keeperContainerFilterChain.doKeeperFilter(dcClusterShard, backUpKeeperUsedInfoModel, srcStandard, backUpKeeperStandard, keeperPairUsedInfoMap)) {
if (switchActiveDetail == null) {
switchActiveDetail = new MigrationKeeperContainerDetailModel(src, null, 0, true, false, new ArrayList<>());
switchActiveDetail = new MigrationKeeperContainerDetailModel(src, null, 0, true, false, overloadCause.name(), new ArrayList<>());
}
switchActiveDetail.addReadyToMigrateShard(dcClusterShard.getKey());
if (target == null || !backUpKeeperUsedInfoModel.getKeeperIp().equals(target.getKeeperIp())) {
Expand Down Expand Up @@ -389,7 +401,7 @@ private List<MigrationKeeperContainerDetailModel> getOverloadKeeperMigrationDeta
if (switchActiveDetail != null) result.add(switchActiveDetail);
return result;
}
keeperContainerDetailModel = new MigrationKeeperContainerDetailModel(src, target, 0, false, false, new ArrayList<>());
keeperContainerDetailModel = new MigrationKeeperContainerDetailModel(src, target, 0, false, false, overloadCause.name(), new ArrayList<>());
}
if (!keeperContainerFilterChain.doKeeperFilter(dcClusterShard, target, srcStandard, standardMap.get(target.getKeeperIp()), keeperPairUsedInfoMap)) {
target = null;
Expand Down Expand Up @@ -466,15 +478,13 @@ private List<MigrationKeeperContainerDetailModel> getKeeperPairMigrationKeeperDe
long overloadPeerData = longLongPair.getPeerData() - minStandardModel.getPeerDataOverload();
KeeperContainerOverloadCause overloadCause = getKeeperPairOverloadCause(overloadInputFlow, overloadPeerData);
if (overloadCause == null) return null;
pairA.getOverLoadCause().add(overloadCause.name());
pairB.getOverLoadCause().add(overloadCause.name());

switch (overloadCause) {
case KEEPER_PAIR_PEER_DATA_OVERLOAD:
return getKeeperPairMigrationDetails(pairA, pairB, true, overloadPeerData, standardMap, minPeerDataKeeperContainers);
return getKeeperPairMigrationDetails(pairA, pairB, true, overloadPeerData, standardMap, minPeerDataKeeperContainers, overloadCause);
case KEEPER_PAIR_INPUT_FLOW_OVERLOAD:
case KEEPER_PAIR_BOTH:
return getKeeperPairMigrationDetails(pairA, pairB, false, overloadInputFlow, standardMap, minInputFlowKeeperContainers);
return getKeeperPairMigrationDetails(pairA, pairB, false, overloadInputFlow, standardMap, minInputFlowKeeperContainers, overloadCause);
default:
logger.warn("invalid keeper container overload cause {}", overloadCause);
return null;
Expand All @@ -487,7 +497,8 @@ private List<MigrationKeeperContainerDetailModel> getKeeperPairMigrationDetails(
boolean isPeerDataOverload,
long overloadData,
Map<String, KeeperContainerOverloadStandardModel> standardMap,
PriorityQueue<KeeperContainerUsedInfoModel> availableKeeperContainers){
PriorityQueue<KeeperContainerUsedInfoModel> availableKeeperContainers,
KeeperContainerOverloadCause overloadCause){
List<MigrationKeeperContainerDetailModel> result = new ArrayList<>();
if (keeperPairUsedInfoMap.get(new IPPair(pairA.getKeeperIp(), pairB.getKeeperIp())).getNumber() == 1) return result;
List<Map.Entry<DcClusterShardActive, KeeperUsedInfo>> allDcClusterShards;
Expand Down Expand Up @@ -524,7 +535,7 @@ private List<MigrationKeeperContainerDetailModel> getKeeperPairMigrationDetails(
usedTarget.add(target);
target = availableKeeperContainers.poll();
}
keeperContainerDetailModel = new MigrationKeeperContainerDetailModel(srcKeeperContainer, target, 0, false, true, new ArrayList<>());
keeperContainerDetailModel = new MigrationKeeperContainerDetailModel(srcKeeperContainer, target, 0, false, true, overloadCause.name(), new ArrayList<>());
}
if (!keeperContainerFilterChain.doKeeperPairFilter(dcClusterShard, target, standardMap.get(srcKeeperIp), standardMap.get(target.getKeeperIp()), keeperPairUsedInfoMap)) {
usedTarget.add(target);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@ public class MigrationKeeperContainerDetailModel implements Serializable {

private boolean keeperPairOverload;

private String cause;

List<DcClusterShard> migrateShards;

public MigrationKeeperContainerDetailModel() {
Expand All @@ -32,12 +34,14 @@ public MigrationKeeperContainerDetailModel(KeeperContainerUsedInfoModel srcKeepe
int migrateKeeperCount,
boolean switchActive,
boolean keeperPairOverload,
String cause,
List<DcClusterShard> migrateShards) {
this.srcKeeperContainer = srcKeeperContainer;
this.targetKeeperContainer = targetKeeperContainer;
this.migrateKeeperCount = migrateKeeperCount;
this.switchActive = switchActive;
this.keeperPairOverload = keeperPairOverload;
this.cause = cause;
this.migrateShards = migrateShards;
}

Expand Down Expand Up @@ -103,6 +107,14 @@ public void setKeeperPairOverload(boolean keeperPairOverload) {
this.keeperPairOverload = keeperPairOverload;
}

public String getCause() {
return cause;
}

public void setCause(String cause) {
this.cause = cause;
}

public void migrateKeeperCountIncrease() {
this.migrateKeeperCount++;
}
Expand Down
Loading

0 comments on commit feb00ed

Please sign in to comment.