From 8cac7d4009ac6fd5639446cfea33f7a3a0a65a28 Mon Sep 17 00:00:00 2001 From: Nisarg Thakkar Date: Sat, 27 Jul 2024 11:00:11 -0700 Subject: [PATCH 1/2] [controller] Harden update-store workflow --- .../venice/hadoop/VenicePushJobTest.java | 1 - .../java/com/linkedin/venice/ConfigKeys.java | 10 +- .../helix/StoragePersonaRepository.java | 6 +- .../linkedin/venice/meta/BackupStrategy.java | 6 +- .../venice/meta/HybridStoreConfig.java | 5 + .../venice/meta/PartitionerConfigImpl.java | 5 +- .../com/linkedin/venice/meta/ZKStore.java | 10 +- .../linkedin/venice/utils/PartitionUtils.java | 8 +- .../venice/meta/BackupStrategyTest.java | 23 + .../venice/meta/TestHybridStoreConfig.java | 22 + ...LevelConfigForActiveActiveReplication.java | 36 + ...lusterLevelConfigForNativeReplication.java | 6 +- ...stParentControllerWithMultiDataCenter.java | 6 +- ...VeniceHelixAdminWithSharedEnvironment.java | 91 +- .../VeniceParentHelixAdminTest.java | 98 +- .../venice/endToEnd/PartialUpdateTest.java | 3 + .../venice/endToEnd/PushJobDetailsTest.java | 10 +- .../PushStatusStoreMultiColoTest.java | 2 +- .../linkedin/venice/endToEnd/TestBatch.java | 73 +- ...JobWithEmergencySourceRegionSelection.java | 5 +- .../TestPushJobWithNativeReplication.java | 13 +- .../venice/endToEnd/TestStoreMigration.java | 33 +- .../TestStoreUpdateStoragePersona.java | 15 +- .../endToEnd/TestWritePathComputation.java | 44 +- .../venice/hadoop/TestVenicePushJob.java | 40 +- .../IngestionHeartBeatTest.java | 111 +- .../utils/VeniceControllerCreateOptions.java | 16 + .../utils/VeniceControllerWrapper.java | 10 +- .../VeniceMultiClusterCreateOptions.java | 16 + ...VeniceMultiRegionClusterCreateOptions.java | 16 + ...woLayerMultiRegionMultiClusterWrapper.java | 4 +- .../utils/IntegrationTestPushUtils.java | 30 +- .../com/linkedin/venice/utils/TestUtils.java | 4 +- .../com/linkedin/venice/controller/Admin.java | 37 +- .../venice/controller/StoreViewUtils.java | 29 +- .../VeniceControllerClusterConfig.java | 13 +- .../controller/VeniceControllerService.java | 1 + .../venice/controller/VeniceHelixAdmin.java | 1082 ++----------- .../controller/VeniceParentHelixAdmin.java | 1377 ++++++----------- .../SystemSchemaInitializationRoutine.java | 42 +- .../init/SystemStoreInitializationHelper.java | 23 +- .../kafka/consumer/AdminConsumptionTask.java | 2 + .../kafka/consumer/AdminExecutionTask.java | 2 - ...SupersetSchemaGeneratorWithCustomProp.java | 29 +- .../venice/controller/util/AdminUtils.java | 113 ++ .../ParentControllerConfigUpdateUtils.java | 171 -- .../PrimaryControllerConfigUpdateUtils.java | 170 ++ .../controller/util/UpdateStoreUtils.java | 1293 ++++++++++++++++ .../controller/util/UpdateStoreWrapper.java | 18 + .../AbstractTestVeniceParentHelixAdmin.java | 11 +- .../TestVeniceHelixAdminWithoutCluster.java | 56 - .../TestVeniceParentHelixAdmin.java | 246 ++- .../SystemStoreInitializationHelperTest.java | 3 + ...SupersetSchemaGeneratorWithCustomProp.java | 37 + .../controller/util/AdminUtilsTest.java | 167 ++ ...rimaryControllerConfigUpdateUtilsTest.java | 154 ++ .../controller/util/UpdateStoreUtilsTest.java | 1040 +++++++++++++ ...ParentControllerConfigUpdateUtilsTest.java | 308 ---- .../resources/superset_schema_test/v5.avsc | 12 + .../resources/superset_schema_test/v6.avsc | 9 + 60 files changed, 4304 insertions(+), 2919 deletions(-) create mode 100644 internal/venice-common/src/test/java/com/linkedin/venice/meta/BackupStrategyTest.java create mode 100644 services/venice-controller/src/main/java/com/linkedin/venice/controller/util/AdminUtils.java delete mode 100644 services/venice-controller/src/main/java/com/linkedin/venice/controller/util/ParentControllerConfigUpdateUtils.java create mode 100644 services/venice-controller/src/main/java/com/linkedin/venice/controller/util/PrimaryControllerConfigUpdateUtils.java create mode 100644 services/venice-controller/src/main/java/com/linkedin/venice/controller/util/UpdateStoreUtils.java create mode 100644 services/venice-controller/src/main/java/com/linkedin/venice/controller/util/UpdateStoreWrapper.java create mode 100644 services/venice-controller/src/test/java/com/linkedin/venice/controller/util/AdminUtilsTest.java create mode 100644 services/venice-controller/src/test/java/com/linkedin/venice/controller/util/PrimaryControllerConfigUpdateUtilsTest.java create mode 100644 services/venice-controller/src/test/java/com/linkedin/venice/controller/util/UpdateStoreUtilsTest.java delete mode 100644 services/venice-controller/src/test/java/com/linkedin/venice/controller/utils/ParentControllerConfigUpdateUtilsTest.java create mode 100644 services/venice-controller/src/test/resources/superset_schema_test/v5.avsc create mode 100644 services/venice-controller/src/test/resources/superset_schema_test/v6.avsc diff --git a/clients/venice-push-job/src/test/java/com/linkedin/venice/hadoop/VenicePushJobTest.java b/clients/venice-push-job/src/test/java/com/linkedin/venice/hadoop/VenicePushJobTest.java index c5161eb82a..78439d3742 100644 --- a/clients/venice-push-job/src/test/java/com/linkedin/venice/hadoop/VenicePushJobTest.java +++ b/clients/venice-push-job/src/test/java/com/linkedin/venice/hadoop/VenicePushJobTest.java @@ -440,7 +440,6 @@ private StoreInfo getStoreInfo(Consumer info, boolean applyFirst) { storeInfo.setChunkingEnabled(false); storeInfo.setCompressionStrategy(CompressionStrategy.NO_OP); storeInfo.setWriteComputationEnabled(false); - storeInfo.setIncrementalPushEnabled(false); storeInfo.setNativeReplicationSourceFabric("dc-0"); Map coloMaps = new HashMap() { { diff --git a/internal/venice-common/src/main/java/com/linkedin/venice/ConfigKeys.java b/internal/venice-common/src/main/java/com/linkedin/venice/ConfigKeys.java index 231c7eb482..b72b253233 100644 --- a/internal/venice-common/src/main/java/com/linkedin/venice/ConfigKeys.java +++ b/internal/venice-common/src/main/java/com/linkedin/venice/ConfigKeys.java @@ -375,8 +375,16 @@ private ConfigKeys() { "controller.store.graveyard.cleanup.sleep.interval.between.list.fetch.minutes"; /** - * Whether the superset schema generation in Parent Controller should be done via passed callback or not. + * Whether the superset schema generation in Primary Controller should be done via passed callback or not. */ + public static final String CONTROLLER_EXTERNAL_SUPERSET_SCHEMA_GENERATION_ENABLED = + "controller.external.superset.schema.generation.enabled"; + + /** + * Whether the superset schema generation in Primary Controller should be done via passed callback or not. + * @deprecated Use {@link #CONTROLLER_EXTERNAL_SUPERSET_SCHEMA_GENERATION_ENABLED} + */ + @Deprecated public static final String CONTROLLER_PARENT_EXTERNAL_SUPERSET_SCHEMA_GENERATION_ENABLED = "controller.parent.external.superset.schema.generation.enabled"; diff --git a/internal/venice-common/src/main/java/com/linkedin/venice/helix/StoragePersonaRepository.java b/internal/venice-common/src/main/java/com/linkedin/venice/helix/StoragePersonaRepository.java index cde1c6a5ed..42f235bd9e 100644 --- a/internal/venice-common/src/main/java/com/linkedin/venice/helix/StoragePersonaRepository.java +++ b/internal/venice-common/src/main/java/com/linkedin/venice/helix/StoragePersonaRepository.java @@ -179,15 +179,15 @@ private void deleteStores(List storeNames) { public StoragePersona getPersonaContainingStore(String storeName) { String personaName = storeNamePersonaMap.get(storeName); - if (personaName == null) + if (personaName == null) { return null; + } return getPersona(personaName); } private boolean isStoreSetValid(StoragePersona persona, Optional additionalStore) { Set setToValidate = new HashSet<>(); - if (additionalStore.isPresent()) - setToValidate.add(additionalStore.get().getName()); + additionalStore.ifPresent(store -> setToValidate.add(store.getName())); setToValidate.addAll(persona.getStoresToEnforce()); return setToValidate.stream() .allMatch( diff --git a/internal/venice-common/src/main/java/com/linkedin/venice/meta/BackupStrategy.java b/internal/venice-common/src/main/java/com/linkedin/venice/meta/BackupStrategy.java index c1ea17b8be..d6422747d6 100644 --- a/internal/venice-common/src/main/java/com/linkedin/venice/meta/BackupStrategy.java +++ b/internal/venice-common/src/main/java/com/linkedin/venice/meta/BackupStrategy.java @@ -24,7 +24,7 @@ public enum BackupStrategy { // KEEP_IN_KAFKA_ONLY, /** Keep in user-specified store eg HDD, other DB */ // KEEP_IN_USER_STORE; - private int value; + private final int value; BackupStrategy(int v) { this.value = v; @@ -35,6 +35,10 @@ public enum BackupStrategy { Arrays.stream(values()).forEach(s -> idMapping.put(s.value, s)); } + public int getValue() { + return value; + } + public static BackupStrategy fromInt(int i) { BackupStrategy strategy = idMapping.get(i); if (strategy == null) { diff --git a/internal/venice-common/src/main/java/com/linkedin/venice/meta/HybridStoreConfig.java b/internal/venice-common/src/main/java/com/linkedin/venice/meta/HybridStoreConfig.java index 061098317f..cbceaff05c 100644 --- a/internal/venice-common/src/main/java/com/linkedin/venice/meta/HybridStoreConfig.java +++ b/internal/venice-common/src/main/java/com/linkedin/venice/meta/HybridStoreConfig.java @@ -27,4 +27,9 @@ public interface HybridStoreConfig extends DataModelBackedStructure= 0 + && (getOffsetLagThresholdToGoOnline() >= 0 || getProducerTimestampLagThresholdToGoOnlineInSeconds() >= 0); + } } diff --git a/internal/venice-common/src/main/java/com/linkedin/venice/meta/PartitionerConfigImpl.java b/internal/venice-common/src/main/java/com/linkedin/venice/meta/PartitionerConfigImpl.java index ccd727d7f2..00692908aa 100644 --- a/internal/venice-common/src/main/java/com/linkedin/venice/meta/PartitionerConfigImpl.java +++ b/internal/venice-common/src/main/java/com/linkedin/venice/meta/PartitionerConfigImpl.java @@ -92,6 +92,9 @@ public int hashCode() { @JsonIgnore public PartitionerConfig clone() { - return new PartitionerConfigImpl(getPartitionerClass(), getPartitionerParams(), getAmplificationFactor()); + return new PartitionerConfigImpl( + getPartitionerClass(), + new HashMap<>(getPartitionerParams()), + getAmplificationFactor()); } } diff --git a/internal/venice-common/src/main/java/com/linkedin/venice/meta/ZKStore.java b/internal/venice-common/src/main/java/com/linkedin/venice/meta/ZKStore.java index 48c4fb810f..7494d2fd6b 100644 --- a/internal/venice-common/src/main/java/com/linkedin/venice/meta/ZKStore.java +++ b/internal/venice-common/src/main/java/com/linkedin/venice/meta/ZKStore.java @@ -209,7 +209,7 @@ public ZKStore(Store store) { setSchemaAutoRegisterFromPushJobEnabled(store.isSchemaAutoRegisterFromPushJobEnabled()); setLatestSuperSetValueSchemaId(store.getLatestSuperSetValueSchemaId()); setHybridStoreDiskQuotaEnabled(store.isHybridStoreDiskQuotaEnabled()); - setEtlStoreConfig(store.getEtlStoreConfig()); + setEtlStoreConfig(store.getEtlStoreConfig().clone()); setStoreMetadataSystemStoreEnabled(store.isStoreMetadataSystemStoreEnabled()); setLatestVersionPromoteToCurrentTimestamp(store.getLatestVersionPromoteToCurrentTimestamp()); setBackupVersionRetentionMs(store.getBackupVersionRetentionMs()); @@ -220,7 +220,7 @@ public ZKStore(Store store) { setStoreMetaSystemStoreEnabled(store.isStoreMetaSystemStoreEnabled()); setActiveActiveReplicationEnabled(store.isActiveActiveReplicationEnabled()); setRmdVersion(store.getRmdVersion()); - setViewConfigs(store.getViewConfigs()); + setViewConfigs(new HashMap<>(store.getViewConfigs())); setStorageNodeReadQuotaEnabled(store.isStorageNodeReadQuotaEnabled()); setUnusedSchemaDeletionEnabled(store.isUnusedSchemaDeletionEnabled()); setMinCompactionLagSeconds(store.getMinCompactionLagSeconds()); @@ -365,11 +365,7 @@ public void setLargestUsedVersionNumber(int largestUsedVersionNumber) { @SuppressWarnings("unused") // Used by Serializer/De-serializer for storing to Zoo Keeper @Override public long getStorageQuotaInByte() { - // This is a safeguard in case that some old stores do not have storage quota field - return (this.storeProperties.storageQuotaInByte <= 0 - && this.storeProperties.storageQuotaInByte != UNLIMITED_STORAGE_QUOTA) - ? DEFAULT_STORAGE_QUOTA - : this.storeProperties.storageQuotaInByte; + return this.storeProperties.storageQuotaInByte; } @SuppressWarnings("unused") // Used by Serializer/De-serializer for storing to Zoo Keeper diff --git a/internal/venice-common/src/main/java/com/linkedin/venice/utils/PartitionUtils.java b/internal/venice-common/src/main/java/com/linkedin/venice/utils/PartitionUtils.java index 4ecf0f4456..fb84adef7e 100644 --- a/internal/venice-common/src/main/java/com/linkedin/venice/utils/PartitionUtils.java +++ b/internal/venice-common/src/main/java/com/linkedin/venice/utils/PartitionUtils.java @@ -63,12 +63,16 @@ public static int calculatePartitionCount( } else if (partitionCount < minPartitionCount) { partitionCount = minPartitionCount; } + + int returnPartitionCount = partitionCount <= 0 ? 1 : (int) partitionCount; + LOGGER.info( "Assign partition count: {} calculated by storage quota: {} to the new version of store: {}", - partitionCount, + returnPartitionCount, storageQuota, storeName); - return (int) partitionCount; + + return returnPartitionCount; } public static VenicePartitioner getVenicePartitioner(PartitionerConfig config) { diff --git a/internal/venice-common/src/test/java/com/linkedin/venice/meta/BackupStrategyTest.java b/internal/venice-common/src/test/java/com/linkedin/venice/meta/BackupStrategyTest.java new file mode 100644 index 0000000000..29fa48cf72 --- /dev/null +++ b/internal/venice-common/src/test/java/com/linkedin/venice/meta/BackupStrategyTest.java @@ -0,0 +1,23 @@ +package com.linkedin.venice.meta; + +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertThrows; + +import com.linkedin.venice.exceptions.VeniceException; +import org.testng.annotations.Test; + + +public class BackupStrategyTest { + @Test + public void testFromInt() { + assertEquals(BackupStrategy.fromInt(0), BackupStrategy.KEEP_MIN_VERSIONS); + assertEquals(BackupStrategy.fromInt(1), BackupStrategy.DELETE_ON_NEW_PUSH_START); + assertThrows(VeniceException.class, () -> BackupStrategy.fromInt(2)); + } + + @Test + public void testGetValue() { + assertEquals(BackupStrategy.KEEP_MIN_VERSIONS.getValue(), 0); + assertEquals(BackupStrategy.DELETE_ON_NEW_PUSH_START.getValue(), 1); + } +} diff --git a/internal/venice-common/src/test/java/com/linkedin/venice/meta/TestHybridStoreConfig.java b/internal/venice-common/src/test/java/com/linkedin/venice/meta/TestHybridStoreConfig.java index c4dd2368d6..2f75104798 100644 --- a/internal/venice-common/src/test/java/com/linkedin/venice/meta/TestHybridStoreConfig.java +++ b/internal/venice-common/src/test/java/com/linkedin/venice/meta/TestHybridStoreConfig.java @@ -22,4 +22,26 @@ public void deserializes() throws IOException { Assert.assertEquals(fasterXml.getRewindTimeInSeconds(), 123L); Assert.assertEquals(fasterXml.getDataReplicationPolicy(), DataReplicationPolicy.NON_AGGREGATE); } + + @Test + public void testIsHybrid() { + HybridStoreConfig hybridStoreConfig; + hybridStoreConfig = new HybridStoreConfigImpl(-1, -1, -1, null, null); + Assert.assertFalse(hybridStoreConfig.isHybrid()); + + hybridStoreConfig = new HybridStoreConfigImpl(100, -1, -1, null, null); + Assert.assertFalse(hybridStoreConfig.isHybrid()); + + hybridStoreConfig = new HybridStoreConfigImpl(100, 100, -1, null, null); + Assert.assertTrue(hybridStoreConfig.isHybrid()); + + hybridStoreConfig = new HybridStoreConfigImpl(100, 100, 100, null, null); + Assert.assertTrue(hybridStoreConfig.isHybrid()); + + hybridStoreConfig = new HybridStoreConfigImpl(100, -1, 100, null, null); + Assert.assertTrue(hybridStoreConfig.isHybrid()); + + hybridStoreConfig = new HybridStoreConfigImpl(-1, -1, 100, null, null); + Assert.assertFalse(hybridStoreConfig.isHybrid()); + } } diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/TestClusterLevelConfigForActiveActiveReplication.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/TestClusterLevelConfigForActiveActiveReplication.java index 3a20c7beb4..e4c6c748f2 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/TestClusterLevelConfigForActiveActiveReplication.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/TestClusterLevelConfigForActiveActiveReplication.java @@ -98,4 +98,40 @@ public void testClusterLevelActiveActiveReplicationConfigForNewHybridStores() th assertFalse(parentControllerClient.getStore(storeName).getStore().isActiveActiveReplicationEnabled()); }); } + + @Test(timeOut = TEST_TIMEOUT) + public void testClusterLevelActiveActiveReplicationConfigForNewIncrementalPushStores() throws IOException { + String storeName = Utils.getUniqueString("test-store-incremental"); + String pushJobId1 = "test-push-job-id-1"; + parentControllerClient.createNewStore(storeName, "test-owner", "\"string\"", "\"string\""); + parentControllerClient.emptyPush(storeName, pushJobId1, 1); + + // Version 1 should exist. + StoreInfo store = assertCommand(parentControllerClient.getStore(storeName)).getStore(); + assertEquals(store.getVersions().size(), 1); + + // Check store level Active/Active is enabled or not + assertFalse(store.isActiveActiveReplicationEnabled()); + assertFalse(store.isIncrementalPushEnabled()); + assertFalse(store.isActiveActiveReplicationEnabled()); + + // Convert to incremental push store + assertCommand( + parentControllerClient.updateStore(storeName, new UpdateStoreQueryParams().setIncrementalPushEnabled(true))); + TestUtils.waitForNonDeterministicAssertion(TEST_TIMEOUT, TimeUnit.MILLISECONDS, () -> { + StoreInfo storeToTest = parentControllerClient.getStore(storeName).getStore(); + assertTrue(storeToTest.isIncrementalPushEnabled()); + assertTrue(storeToTest.isActiveActiveReplicationEnabled()); + }); + + // After inc push is disabled, even default A/A config for pure hybrid store is false, + // original store A/A config is enabled. + assertCommand( + parentControllerClient.updateStore(storeName, new UpdateStoreQueryParams().setIncrementalPushEnabled(false))); + TestUtils.waitForNonDeterministicAssertion(TEST_TIMEOUT, TimeUnit.MILLISECONDS, () -> { + StoreInfo storeToTest = parentControllerClient.getStore(storeName).getStore(); + assertFalse(storeToTest.isIncrementalPushEnabled()); + assertTrue(storeToTest.isActiveActiveReplicationEnabled()); + }); + } } diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/TestClusterLevelConfigForNativeReplication.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/TestClusterLevelConfigForNativeReplication.java index 7ac63c31d3..15ca295123 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/TestClusterLevelConfigForNativeReplication.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/TestClusterLevelConfigForNativeReplication.java @@ -13,6 +13,7 @@ import com.linkedin.venice.integration.utils.ServiceFactory; import com.linkedin.venice.integration.utils.VeniceMultiRegionClusterCreateOptions; import com.linkedin.venice.integration.utils.VeniceTwoLayerMultiRegionMultiClusterWrapper; +import com.linkedin.venice.meta.DataReplicationPolicy; import com.linkedin.venice.meta.StoreInfo; import com.linkedin.venice.meta.Version; import com.linkedin.venice.utils.TestUtils; @@ -97,6 +98,7 @@ public void testClusterLevelNativeReplicationConfigForNewStores() { parentControllerClient.updateStore( storeName, new UpdateStoreQueryParams().setIncrementalPushEnabled(true) + .setHybridDataReplicationPolicy(DataReplicationPolicy.NONE) .setHybridRewindSeconds(1L) .setHybridOffsetLagThreshold(10))); TestUtils.waitForNonDeterministicAssertion(TEST_TIMEOUT, TimeUnit.MILLISECONDS, () -> { @@ -107,7 +109,7 @@ public void testClusterLevelNativeReplicationConfigForNewStores() { } @Test(timeOut = TEST_TIMEOUT) - public void testConvertHybridDuringPushjob() { + public void testConvertHybridDuringPushJob() { String storeName = Utils.getUniqueString("test-store"); parentControllerClient.createNewStore(storeName, "test-owner", "\"string\"", "\"string\""); parentControllerClient.requestTopicForWrites( @@ -128,7 +130,7 @@ public void testConvertHybridDuringPushjob() { storeName, new UpdateStoreQueryParams().setHybridRewindSeconds(1L).setHybridOffsetLagThreshold(1L)); Assert.assertTrue(response.isError()); - Assert.assertTrue(response.getError().contains("Cannot convert to hybrid as there is already a pushjob running")); + Assert.assertTrue(response.getError().contains("Cannot convert to hybrid as there is already a push job running")); parentControllerClient.killOfflinePushJob(Version.composeKafkaTopic(storeName, 1)); } diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/TestParentControllerWithMultiDataCenter.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/TestParentControllerWithMultiDataCenter.java index 5df0c3f9bb..cfda85204d 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/TestParentControllerWithMultiDataCenter.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/TestParentControllerWithMultiDataCenter.java @@ -470,8 +470,10 @@ public void testEnableActiveActiveReplicationSchema() { Assert.assertFalse(schemaResponse2.isError(), "addValeSchema returned error: " + schemaResponse2.getError()); // Enable AA on store - UpdateStoreQueryParams updateStoreToEnableAARepl = - new UpdateStoreQueryParams().setNativeReplicationEnabled(true).setActiveActiveReplicationEnabled(true); + UpdateStoreQueryParams updateStoreToEnableAARepl = new UpdateStoreQueryParams().setNativeReplicationEnabled(true) + .setActiveActiveReplicationEnabled(true) + .setHybridOffsetLagThreshold(1000) + .setHybridRewindSeconds(1000); TestWriteUtils.updateStore(storeName, parentControllerClient, updateStoreToEnableAARepl); /** * Test Active/Active replication config enablement generates the active active metadata schema. diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/TestVeniceHelixAdminWithSharedEnvironment.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/TestVeniceHelixAdminWithSharedEnvironment.java index 652ed7b389..061ef757a5 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/TestVeniceHelixAdminWithSharedEnvironment.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/TestVeniceHelixAdminWithSharedEnvironment.java @@ -483,27 +483,6 @@ public void testUpdateStoreMetadata() throws Exception { PartitionerConfig partitionerConfig = new PartitionerConfigImpl(); veniceAdmin.setStorePartitionerConfig(clusterName, storeName, partitionerConfig); - veniceAdmin.setIncrementalPushEnabled(clusterName, storeName, true); - Assert.assertTrue(veniceAdmin.getStore(clusterName, storeName).isIncrementalPushEnabled()); - - veniceAdmin.setBootstrapToOnlineTimeoutInHours(clusterName, storeName, 48); - Assert.assertEquals(veniceAdmin.getStore(clusterName, storeName).getBootstrapToOnlineTimeoutInHours(), 48); - - veniceAdmin.setHybridStoreDiskQuotaEnabled(clusterName, storeName, true); - Assert.assertTrue(veniceAdmin.getStore(clusterName, storeName).isHybridStoreDiskQuotaEnabled()); - - // test setting per-store RMD (replication metadata) version ID - int rmdVersion = veniceAdmin.getStore(clusterName, storeName).getRmdVersion(); - Assert.assertEquals(rmdVersion, -1); - - veniceAdmin.setReplicationMetadataVersionID(clusterName, storeName, 2); - rmdVersion = veniceAdmin.getStore(clusterName, storeName).getRmdVersion(); - Assert.assertNotEquals(rmdVersion, -1); - Assert.assertEquals(rmdVersion, 2); - - // test hybrid config - // set incrementalPushEnabled to be false as hybrid and incremental are mutex - veniceAdmin.setIncrementalPushEnabled(clusterName, storeName, false); Assert.assertFalse(veniceAdmin.getStore(clusterName, storeName).isHybrid()); veniceAdmin.updateStore( clusterName, @@ -648,26 +627,18 @@ public void testGetRealTimeTopic() { Assert.assertThrows(VeniceNoStoreException.class, () -> veniceAdmin.getRealTimeTopic(clusterName, storeName)); veniceAdmin.createStore(clusterName, storeName, "owner", KEY_SCHEMA, VALUE_SCHEMA); + + // Must not be able to get a real time topic if the store is not hybrid + Assert.assertThrows(VeniceException.class, () -> veniceAdmin.getRealTimeTopic(clusterName, storeName)); + veniceAdmin.updateStore( clusterName, storeName, new UpdateStoreQueryParams().setHybridRewindSeconds(25L).setHybridOffsetLagThreshold(100L)); // make store // hybrid - try { - veniceAdmin.getRealTimeTopic(clusterName, storeName); - Assert.fail("Must not be able to get a real time topic until the store is initialized with a version"); - } catch (VeniceException e) { - Assert.assertTrue( - e.getMessage().contains("is not initialized with a version"), - "Got unexpected error message: " + e.getMessage()); - } - - int partitions = 2; // TODO verify partition count for RT topic. - veniceAdmin.incrementVersionIdempotent(clusterName, storeName, Version.guidBasedDummyPushId(), partitions, 1); - - String rtTopic = veniceAdmin.getRealTimeTopic(clusterName, storeName); - Assert.assertEquals(rtTopic, Version.composeRealTimeTopic(storeName)); + String expectedRtTopic = Version.composeRealTimeTopic(storeName); + Assert.assertEquals(veniceAdmin.getRealTimeTopic(clusterName, storeName), expectedRtTopic); } @Test(timeOut = TOTAL_TIMEOUT_FOR_LONG_TEST_MS) @@ -1471,33 +1442,27 @@ public void leakyTopicTruncation() { } } - @Test(timeOut = TOTAL_TIMEOUT_FOR_LONG_TEST_MS) - public void testSetLargestUsedVersion() { - String storeName = "testSetLargestUsedVersion"; - veniceAdmin.createStore(clusterName, storeName, storeOwner, KEY_SCHEMA, VALUE_SCHEMA); - Store store = veniceAdmin.getStore(clusterName, storeName); - Assert.assertEquals(store.getLargestUsedVersionNumber(), 0); - - Version version = - veniceAdmin.incrementVersionIdempotent(clusterName, storeName, Version.guidBasedDummyPushId(), 1, 1); - store = veniceAdmin.getStore(clusterName, storeName); - Assert.assertTrue(version.getNumber() > 0); - Assert.assertEquals(store.getLargestUsedVersionNumber(), version.getNumber()); - - veniceAdmin.setStoreLargestUsedVersion(clusterName, storeName, 0); - store = veniceAdmin.getStore(clusterName, storeName); - Assert.assertEquals(store.getLargestUsedVersionNumber(), 0); - } - @Test(timeOut = TOTAL_TIMEOUT_FOR_LONG_TEST_MS) public void testWriteComputationEnabled() { String storeName = Utils.getUniqueString("test_store"); - veniceAdmin.createStore(clusterName, storeName, storeOwner, "\"string\"", "\"string\""); + String VALUE_FIELD_NAME = "int_field"; + String SECOND_VALUE_FIELD_NAME = "opt_int_field"; + String VALUE_SCHEMA_V2_STR = "{\n" + "\"type\": \"record\",\n" + "\"name\": \"TestValueSchema\",\n" + + "\"namespace\": \"com.linkedin.venice.fastclient.schema\",\n" + "\"fields\": [\n" + " {\"name\": \"" + + VALUE_FIELD_NAME + "\", \"type\": \"int\", \"default\": 10},\n" + "{\"name\": \"" + SECOND_VALUE_FIELD_NAME + + "\", \"type\": [\"null\", \"int\"], \"default\": null}]\n" + "}"; + + veniceAdmin.createStore(clusterName, storeName, storeOwner, "\"string\"", VALUE_SCHEMA_V2_STR); Store store = veniceAdmin.getStore(clusterName, storeName); Assert.assertFalse(store.isWriteComputationEnabled()); - veniceAdmin.updateStore(clusterName, storeName, new UpdateStoreQueryParams().setWriteComputationEnabled(true)); + veniceAdmin.updateStore( + clusterName, + storeName, + new UpdateStoreQueryParams().setHybridRewindSeconds(1000) + .setHybridOffsetLagThreshold(1000) + .setWriteComputationEnabled(true)); store = veniceAdmin.getStore(clusterName, storeName); Assert.assertTrue(store.isWriteComputationEnabled()); } @@ -1748,10 +1713,15 @@ public void testVersionLevelActiveActiveReplicationConfig() { String pushJobId1 = "test-push-job-id-1"; veniceAdmin.createStore(clusterName, storeName, "test-owner", KEY_SCHEMA, VALUE_SCHEMA); /** - * Enable L/F and Active/Active replication + * Enable L/F, NR and Active/Active replication */ - veniceAdmin - .updateStore(clusterName, storeName, new UpdateStoreQueryParams().setActiveActiveReplicationEnabled(true)); + veniceAdmin.updateStore( + clusterName, + storeName, + new UpdateStoreQueryParams().setNativeReplicationEnabled(true) + .setActiveActiveReplicationEnabled(true) + .setHybridOffsetLagThreshold(1000) + .setHybridRewindSeconds(1000)); /** * Add version 1 @@ -1901,7 +1871,10 @@ public void testUpdateStoreWithVersionInheritedConfigs() { veniceAdmin.updateStore( clusterName, storeName, - new UpdateStoreQueryParams().setHybridOffsetLagThreshold(1) + new UpdateStoreQueryParams().setNativeReplicationEnabled(true) + .setActiveActiveReplicationEnabled(true) + .setChunkingEnabled(true) + .setHybridOffsetLagThreshold(1) .setHybridRewindSeconds(1) .setStoreViews(viewConfig)); veniceAdmin.incrementVersionIdempotent(clusterName, storeName, Version.guidBasedDummyPushId(), 1, 1); diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/VeniceParentHelixAdminTest.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/VeniceParentHelixAdminTest.java index c95cfe579f..321cef6c55 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/VeniceParentHelixAdminTest.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/VeniceParentHelixAdminTest.java @@ -2,7 +2,7 @@ import static com.linkedin.venice.ConfigKeys.CONTROLLER_AUTO_MATERIALIZE_DAVINCI_PUSH_STATUS_SYSTEM_STORE; import static com.linkedin.venice.ConfigKeys.CONTROLLER_AUTO_MATERIALIZE_META_SYSTEM_STORE; -import static com.linkedin.venice.ConfigKeys.CONTROLLER_PARENT_EXTERNAL_SUPERSET_SCHEMA_GENERATION_ENABLED; +import static com.linkedin.venice.ConfigKeys.CONTROLLER_EXTERNAL_SUPERSET_SCHEMA_GENERATION_ENABLED; import static com.linkedin.venice.ConfigKeys.TERMINAL_STATE_TOPIC_CHECK_DELAY_MS; import static com.linkedin.venice.ConfigKeys.TOPIC_CLEANUP_SLEEP_INTERVAL_BETWEEN_TOPIC_LIST_FETCH_MS; import static com.linkedin.venice.controller.SchemaConstants.BAD_VALUE_SCHEMA_FOR_WRITE_COMPUTE_V2; @@ -33,7 +33,6 @@ import com.linkedin.venice.controllerapi.VersionCreationResponse; import com.linkedin.venice.integration.utils.ServiceFactory; import com.linkedin.venice.integration.utils.VeniceClusterWrapper; -import com.linkedin.venice.integration.utils.VeniceControllerWrapper; import com.linkedin.venice.integration.utils.VeniceMultiRegionClusterCreateOptions; import com.linkedin.venice.integration.utils.VeniceTwoLayerMultiRegionMultiClusterWrapper; import com.linkedin.venice.meta.ETLStoreConfig; @@ -346,7 +345,7 @@ public void testHybridAndETLStoreConfig() { Assert.assertFalse(etlStoreConfig.isFutureVersionETLEnabled()); Assert.assertTrue( controllerResponse.getError() - .contains("Cannot enable ETL for this store " + "because etled user proxy account is not set")); + .contains("Cannot enable ETL for this store because etled user proxy account is not set")); // test enabling ETL with empty proxy account, expected failure params = new UpdateStoreQueryParams(); @@ -358,7 +357,7 @@ public void testHybridAndETLStoreConfig() { Assert.assertFalse(etlStoreConfig.isFutureVersionETLEnabled()); Assert.assertTrue( controllerResponse.getError() - .contains("Cannot enable ETL for this store " + "because etled user proxy account is not set")); + .contains("Cannot enable ETL for this store because etled user proxy account is not set")); // test enabling ETL with etl proxy account, expected success params = new UpdateStoreQueryParams(); @@ -405,22 +404,20 @@ public void testSupersetSchemaWithCustomSupersetSchemaGenerator() throws IOExcep // This cluster setup don't have server, we cannot perform push here. properties.setProperty(CONTROLLER_AUTO_MATERIALIZE_META_SYSTEM_STORE, String.valueOf(false)); properties.setProperty(CONTROLLER_AUTO_MATERIALIZE_DAVINCI_PUSH_STATUS_SYSTEM_STORE, String.valueOf(false)); - properties.setProperty(CONTROLLER_PARENT_EXTERNAL_SUPERSET_SCHEMA_GENERATION_ENABLED, String.valueOf(true)); - properties - .put(VeniceControllerWrapper.SUPERSET_SCHEMA_GENERATOR, new SupersetSchemaGeneratorWithCustomProp(CUSTOM_PROP)); + properties.setProperty(CONTROLLER_EXTERNAL_SUPERSET_SCHEMA_GENERATION_ENABLED, String.valueOf(true)); try (VeniceTwoLayerMultiRegionMultiClusterWrapper twoLayerMultiRegionMultiClusterWrapper = ServiceFactory.getVeniceTwoLayerMultiRegionMultiClusterWrapper( - 1, - 1, - 1, - 1, - 0, - 0, - 1, - Optional.of(properties), - Optional.empty(), - Optional.empty())) { + new VeniceMultiRegionClusterCreateOptions.Builder().numberOfRegions(1) + .numberOfClusters(1) + .numberOfParentControllers(1) + .numberOfChildControllers(1) + .numberOfServers(0) + .numberOfRouters(0) + .replicationFactor(1) + .parentControllerProperties(properties) + .supersetSchemaGenerator(new SupersetSchemaGeneratorWithCustomProp(CUSTOM_PROP)) + .build())) { String parentControllerUrl = twoLayerMultiRegionMultiClusterWrapper.getControllerConnectString(); try (ControllerClient parentControllerClient = new ControllerClient(twoLayerMultiRegionMultiClusterWrapper.getClusterNames()[0], parentControllerUrl)) { @@ -440,8 +437,11 @@ public void testSupersetSchemaWithCustomSupersetSchemaGenerator() throws IOExcep Assert.assertNotNull(newStoreResponse); Assert.assertFalse(newStoreResponse.isError(), "error in newStoreResponse: " + newStoreResponse.getError()); // Enable write compute - ControllerResponse updateStoreResponse = parentControllerClient - .updateStore(storeName, new UpdateStoreQueryParams().setWriteComputationEnabled(true)); + ControllerResponse updateStoreResponse = parentControllerClient.updateStore( + storeName, + new UpdateStoreQueryParams().setHybridRewindSeconds(86400) + .setHybridOffsetLagThreshold(1000) + .setWriteComputationEnabled(true)); Assert.assertFalse(updateStoreResponse.isError()); MultiSchemaResponse schemaResponse = parentControllerClient.getAllValueSchema(storeName); @@ -562,25 +562,26 @@ public void testStoreMetaDataUpdateFromParentToChildController( parentControllerProps .setProperty(CONTROLLER_AUTO_MATERIALIZE_DAVINCI_PUSH_STATUS_SYSTEM_STORE, String.valueOf(false)); if (isSupersetSchemaGeneratorEnabled) { - parentControllerProps - .setProperty(CONTROLLER_PARENT_EXTERNAL_SUPERSET_SCHEMA_GENERATION_ENABLED, String.valueOf(true)); - parentControllerProps.put( - VeniceControllerWrapper.SUPERSET_SCHEMA_GENERATOR, - new SupersetSchemaGeneratorWithCustomProp("test_prop")); + parentControllerProps.setProperty(CONTROLLER_EXTERNAL_SUPERSET_SCHEMA_GENERATION_ENABLED, String.valueOf(true)); + } + + VeniceMultiRegionClusterCreateOptions.Builder options = + new VeniceMultiRegionClusterCreateOptions.Builder().numberOfRegions(1) + .numberOfClusters(1) + .numberOfParentControllers(1) + .numberOfChildControllers(1) + .numberOfServers(0) + .numberOfRouters(0) + .replicationFactor(1) + .parentControllerProperties(parentControllerProps) + .sslToKafka(isControllerSslEnabled); + + if (isSupersetSchemaGeneratorEnabled) { + options = options.supersetSchemaGenerator(new SupersetSchemaGeneratorWithCustomProp("test_prop")); } try (VeniceTwoLayerMultiRegionMultiClusterWrapper venice = - ServiceFactory.getVeniceTwoLayerMultiRegionMultiClusterWrapper( - new VeniceMultiRegionClusterCreateOptions.Builder().numberOfRegions(1) - .numberOfClusters(1) - .numberOfParentControllers(1) - .numberOfChildControllers(1) - .numberOfServers(0) - .numberOfRouters(0) - .replicationFactor(1) - .parentControllerProperties(parentControllerProps) - .sslToKafka(isControllerSslEnabled) - .build())) { + ServiceFactory.getVeniceTwoLayerMultiRegionMultiClusterWrapper(options.build())) { String childControllerUrl = venice.getChildRegions().get(0).getControllerConnectString(); String parentControllerUrl = venice.getControllerConnectString(); Optional sslFactory = @@ -1036,7 +1037,9 @@ private void testWriteComputeSchemaAutoGenerationFailure(ControllerClient parent private void validateEnablingWriteComputeFailed(String storeName, ControllerClient parentControllerClient) { UpdateStoreQueryParams updateStoreQueryParams = new UpdateStoreQueryParams(); - updateStoreQueryParams.setWriteComputationEnabled(true); + updateStoreQueryParams.setHybridRewindSeconds(86400) + .setHybridOffsetLagThreshold(1000) + .setWriteComputationEnabled(true); ControllerResponse response = parentControllerClient.updateStore(storeName, updateStoreQueryParams); Assert.assertTrue( response.isError(), @@ -1063,7 +1066,9 @@ private void testWriteComputeSchemaAutoGeneration(ControllerClient parentControl // Step 2. Update this store to enable write compute. UpdateStoreQueryParams updateStoreQueryParams = new UpdateStoreQueryParams(); - updateStoreQueryParams.setWriteComputationEnabled(true); + updateStoreQueryParams.setHybridOffsetLagThreshold(1000) + .setHybridRewindSeconds(86400) + .setWriteComputationEnabled(true); parentControllerClient.updateStore(storeName, updateStoreQueryParams); // Step 3. Get value schema and write compute schema generated by the controller. @@ -1126,7 +1131,9 @@ private void testWriteComputeSchemaEnable(ControllerClient parentControllerClien // Step 2. Update this store to enable write compute. UpdateStoreQueryParams updateStoreQueryParams = new UpdateStoreQueryParams(); - updateStoreQueryParams.setWriteComputationEnabled(true); + updateStoreQueryParams.setHybridOffsetLagThreshold(1000) + .setHybridRewindSeconds(86400) + .setWriteComputationEnabled(true); parentControllerClient.updateStore(storeName, updateStoreQueryParams); // Could not enable write compute bad schema did not have defaults @@ -1157,6 +1164,13 @@ private void testSupersetSchemaGenerationWithUpdateDefaultValue(ControllerClient // Step 1. Create a store with missing default fields schema parentControllerClient .createNewStore(storeName, owner, keySchemaStr, TestWriteUtils.UNION_RECORD_V1_SCHEMA.toString()); + + // Step 2. Update this store to make it hybrid + assertCommand( + parentControllerClient.updateStore( + storeName, + new UpdateStoreQueryParams().setHybridOffsetLagThreshold(1000).setHybridRewindSeconds(86400))); + MultiSchemaResponse valueAndWriteComputeSchemaResponse = parentControllerClient.getAllValueAndDerivedSchema(storeName); MultiSchemaResponse.Schema[] registeredSchemas = valueAndWriteComputeSchemaResponse.getSchemas(); @@ -1164,17 +1178,15 @@ private void testSupersetSchemaGenerationWithUpdateDefaultValue(ControllerClient MultiSchemaResponse.Schema registeredSchema = registeredSchemas[0]; Assert.assertFalse(registeredSchema.isDerivedSchema()); // No write compute schema yet. - // Step 2. Update this store to enable write compute. - UpdateStoreQueryParams updateStoreQueryParams = new UpdateStoreQueryParams(); - updateStoreQueryParams.setWriteComputationEnabled(true); - parentControllerClient.updateStore(storeName, updateStoreQueryParams); + // Step 3. Update this store to enable write compute. + parentControllerClient.updateStore(storeName, new UpdateStoreQueryParams().setWriteComputationEnabled(true)); // Could not enable write compute bad schema did not have defaults StoreInfo store = parentControllerClient.getStore(storeName).getStore(); Assert.assertTrue(store.isWriteComputationEnabled()); Assert.assertEquals(store.getLatestSuperSetValueSchemaId(), 1); - // Step 3. Add a valid latest value schema for write-compute + // Step 4. Add a valid latest value schema for write-compute parentControllerClient.addValueSchema(storeName, TestWriteUtils.UNION_RECORD_V2_SCHEMA.toString()); TestUtils.waitForNonDeterministicAssertion( 30, diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/PartialUpdateTest.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/PartialUpdateTest.java index ebab678294..d6691bc1f4 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/PartialUpdateTest.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/PartialUpdateTest.java @@ -78,6 +78,7 @@ import com.linkedin.venice.integration.utils.VeniceMultiClusterWrapper; import com.linkedin.venice.integration.utils.VeniceServerWrapper; import com.linkedin.venice.integration.utils.VeniceTwoLayerMultiRegionMultiClusterWrapper; +import com.linkedin.venice.meta.DataReplicationPolicy; import com.linkedin.venice.meta.ReadOnlySchemaRepository; import com.linkedin.venice.meta.Store; import com.linkedin.venice.meta.Version; @@ -345,6 +346,7 @@ public void testIncrementalPushPartialUpdateClassicFormat() throws IOException { .setWriteComputationEnabled(true) .setChunkingEnabled(true) .setIncrementalPushEnabled(true) + .setHybridDataReplicationPolicy(DataReplicationPolicy.NONE) .setHybridRewindSeconds(10L) .setHybridOffsetLagThreshold(2L); ControllerResponse updateStoreResponse = @@ -413,6 +415,7 @@ public void testIncrementalPushPartialUpdateNewFormat(boolean useSparkCompute) t .setWriteComputationEnabled(true) .setChunkingEnabled(true) .setIncrementalPushEnabled(true) + .setHybridDataReplicationPolicy(DataReplicationPolicy.NONE) .setHybridRewindSeconds(10L) .setHybridOffsetLagThreshold(2L); ControllerResponse updateStoreResponse = diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/PushJobDetailsTest.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/PushJobDetailsTest.java index 33a0d183a1..2863408ac0 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/PushJobDetailsTest.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/PushJobDetailsTest.java @@ -39,6 +39,7 @@ import com.linkedin.venice.integration.utils.VeniceClusterWrapper; import com.linkedin.venice.integration.utils.VeniceMultiClusterWrapper; import com.linkedin.venice.integration.utils.VeniceTwoLayerMultiRegionMultiClusterWrapper; +import com.linkedin.venice.meta.Store; import com.linkedin.venice.meta.Version; import com.linkedin.venice.status.PushJobDetailsStatus; import com.linkedin.venice.status.protocol.PushJobDetails; @@ -338,7 +339,9 @@ public void testPushJobDetails(boolean useCustomCheckpoints) throws IOException // because hadoop job client cannot fetch counters properly. parentControllerClient.updateStore( testStoreName, - new UpdateStoreQueryParams().setStorageQuotaInByte(-1).setPartitionCount(2).setIncrementalPushEnabled(true)); + new UpdateStoreQueryParams().setStorageQuotaInByte(Store.UNLIMITED_STORAGE_QUOTA) + .setPartitionCount(2) + .setIncrementalPushEnabled(true)); Properties pushJobProps = defaultVPJProps(multiRegionMultiClusterWrapper, inputDirPathForFullPush, testStoreName); pushJobProps.setProperty(PUSH_JOB_STATUS_UPLOAD_ENABLE, String.valueOf(true)); try (VenicePushJob testPushJob = new VenicePushJob("test-push-job-details-job", pushJobProps)) { @@ -374,7 +377,7 @@ public void testPushJobDetails(boolean useCustomCheckpoints) throws IOException // case 3: failed batch push job, non-user error: // setting the quota to be 0, hadoop job client cannot fetch counters properly and should fail the job - parentControllerClient.updateStore(testStoreName, new UpdateStoreQueryParams().setStorageQuotaInByte(0)); + parentControllerClient.updateStore(testStoreName, new UpdateStoreQueryParams().setStorageQuotaInByte(1)); try (VenicePushJob testPushJob = new VenicePushJob("test-push-job-details-job-v2", pushJobProps)) { assertThrows(VeniceException.class, testPushJob::run); } @@ -413,7 +416,8 @@ public void testPushJobDetails(boolean useCustomCheckpoints) throws IOException validatePushJobMetrics(false, false, true, metricsExpectedCount); // case 5: failed batch push job, user error: data with duplicate keys - UpdateStoreQueryParams queryParams = new UpdateStoreQueryParams().setStorageQuotaInByte(-1); + UpdateStoreQueryParams queryParams = + new UpdateStoreQueryParams().setStorageQuotaInByte(Store.UNLIMITED_STORAGE_QUOTA); parentControllerClient.updateStore(testStoreName, queryParams); pushJobProps = defaultVPJProps(multiRegionMultiClusterWrapper, inputDirPathWithDupKeys, testStoreName); diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/PushStatusStoreMultiColoTest.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/PushStatusStoreMultiColoTest.java index 22b2d801b8..bdcf1f0c45 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/PushStatusStoreMultiColoTest.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/PushStatusStoreMultiColoTest.java @@ -111,7 +111,7 @@ public void cleanUp() { Utils.closeQuietlyWithErrorLogged(multiRegionMultiClusterWrapper); } - public void setUpStore() { + private void setUpStore() { storeName = Utils.getUniqueString("store"); String owner = "test"; // set up push status store. diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestBatch.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestBatch.java index d95accd2d2..f425965d5c 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestBatch.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestBatch.java @@ -114,8 +114,6 @@ import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; -//TODO: write a VPJWrapper that can handle the whole flow - @Test(singleThreaded = true) public abstract class TestBatch { @@ -225,18 +223,12 @@ public void testDataPushWithSchemaWithAWrongDefault() { inputDir -> new KeyAndValueSchemas(writeSimpleAvroFileWithASchemaWithAWrongDefaultValue(inputDir, recordCnt)), properties -> {}, (avroClient, vsonClient, metricsRepository) -> { - for (int i = 0; i < recordCnt; i++) { - Object valueObject = avroClient.get(Integer.toString(i)).get(); - Assert.assertTrue( - valueObject instanceof GenericRecord, - "The returned value must be a ''GenericRecord' for key: " + i); - GenericRecord value = (GenericRecord) valueObject; - Assert.assertEquals(value.get(DEFAULT_KEY_FIELD_PROP).toString(), Integer.toString(i)); - Assert.assertEquals(Float.valueOf(value.get("score").toString()), 100.0f); - } + Assert.fail("Store creation should have failed"); }); + } catch (AssertionError e) { + Assert.assertTrue(e.getMessage().contains("Invalid default for field")); } catch (Exception e) { - Assert.assertTrue(e.getMessage().contains("Could not create store")); + Assert.fail("Unexpected exception", e); } } @@ -471,7 +463,7 @@ public void testIncrementalPush() throws Exception { } }, storeName, - new UpdateStoreQueryParams().setIncrementalPushEnabled(true)); + null); testBatchStore( inputDir -> new KeyAndValueSchemas(writeSimpleAvroFileWithStringToStringSchema(inputDir)), @@ -483,7 +475,7 @@ public void testIncrementalPush() throws Exception { } }, storeName, - new UpdateStoreQueryParams().setIncrementalPushEnabled(true)); + null); } @Test(timeOut = TEST_TIMEOUT, dataProvider = "Two-True-and-False", dataProviderClass = DataProviderUtils.class) @@ -525,59 +517,6 @@ public void testIncrementalPushWithCompression( null); } - @Test(timeOut = TEST_TIMEOUT) - public void testIncrementalPushWritesToRealTimeTopicWithPolicy() throws Exception { - double randomNumber = Math.random(); - String classAndFunctionName = getClass().getSimpleName() + ".testIncrementalPushWritesToRealTimeTopicWithPolicy()"; - String uniqueTestId = "attempt [" + randomNumber + "] of " + classAndFunctionName; - LOGGER.info("Start of {}", uniqueTestId); - try { - String storeName = testBatchStore( - inputDir -> new KeyAndValueSchemas(writeSimpleAvroFileWithStringToStringSchema(inputDir)), - properties -> {}, - (avroClient, vsonClient, metricsRepository) -> { - for (int i = 1; i <= 100; i++) { - Assert.assertEquals(avroClient.get(Integer.toString(i)).get().toString(), "test_name_" + i); - } - }, - new UpdateStoreQueryParams().setIncrementalPushEnabled(true) - .setChunkingEnabled(true) - .setHybridOffsetLagThreshold(10) - .setHybridRewindSeconds(0)); - - testBatchStore( - inputDir -> new KeyAndValueSchemas(writeSimpleAvroFileWithStringToStringSchema2(inputDir)), - properties -> properties.setProperty(INCREMENTAL_PUSH, "true"), - (avroClient, vsonClient, metricsRepository) -> { - for (int i = 51; i <= 150; i++) { - Assert.assertEquals(avroClient.get(Integer.toString(i)).get().toString(), "test_name_" + (i * 2)); - } - }, - storeName, - null); - - testBatchStore( - inputDir -> new KeyAndValueSchemas(writeSimpleAvroFileWithStringToStringSchema(inputDir)), - properties -> {}, - (avroClient, vsonClient, metricsRepository) -> { - TestUtils.waitForNonDeterministicAssertion(30, TimeUnit.SECONDS, true, () -> { - for (int i = 1; i <= 100; i++) { - Assert.assertEquals(avroClient.get(Integer.toString(i)).get().toString(), "test_name_" + i); - } - for (int i = 101; i <= 150; i++) { - Assert.assertNull(avroClient.get(Integer.toString(i)).get()); - } - }); - }, - storeName, - null); - LOGGER.info("Successful end of {}", uniqueTestId); - } catch (Throwable e) { - LOGGER.error("Caught throwable in {}", uniqueTestId, e); - throw e; - } - } - @Test(timeOut = TEST_TIMEOUT) public void testMetaStoreSchemaValidation() throws Exception { String storeName = testBatchStore( diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestPushJobWithEmergencySourceRegionSelection.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestPushJobWithEmergencySourceRegionSelection.java index a4113f88b7..0c3d05ccb2 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestPushJobWithEmergencySourceRegionSelection.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestPushJobWithEmergencySourceRegionSelection.java @@ -129,8 +129,7 @@ public void testNativeReplicationForBatchPushWithEmergencySourceOverride(int rec new UpdateStoreQueryParams().setStorageQuotaInByte(Store.UNLIMITED_STORAGE_QUOTA) .setPartitionCount(partitionCount) .setNativeReplicationEnabled(true) - .setNativeReplicationSourceFabric("dc-1") - .setActiveActiveReplicationEnabled(true); + .setNativeReplicationSourceFabric("dc-1"); createStoreForJob(clusterName, keySchemaStr, valueSchemaStr, props, updateStoreParams).close(); @@ -150,7 +149,7 @@ public void testNativeReplicationForBatchPushWithEmergencySourceOverride(int rec * Check the update store command in parent controller has been propagated into child controllers, before * sending any commands directly into child controllers, which can help avoid race conditions. */ - TestUtils.verifyDCConfigNativeAndActiveRepl(storeName, true, true, dc0Client, dc1Client, dc2Client); + TestUtils.verifyDCConfigNativeAndActiveRepl(storeName, true, false, dc0Client, dc1Client, dc2Client); } try (VenicePushJob job = new VenicePushJob("Test push job", props)) { diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestPushJobWithNativeReplication.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestPushJobWithNativeReplication.java index c3540d0916..a88667acfd 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestPushJobWithNativeReplication.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestPushJobWithNativeReplication.java @@ -481,7 +481,8 @@ public void testNativeReplicationForIncrementalPush() throws Exception { updateStoreQueryParams -> updateStoreQueryParams.setPartitionCount(1) .setHybridOffsetLagThreshold(TEST_TIMEOUT) .setHybridRewindSeconds(2L) - .setIncrementalPushEnabled(true), + .setIncrementalPushEnabled(true) + .setHybridDataReplicationPolicy(DataReplicationPolicy.NONE), 100, (parentControllerClient, clusterName, storeName, props, inputDir) -> { try (VenicePushJob job = new VenicePushJob("Batch Push", props)) { @@ -614,11 +615,7 @@ public void testMultiDataCenterRePushWithIncrementalPush() throws Exception { String incPushToRTVersion = System.currentTimeMillis() + "_test_inc_push_to_rt"; VeniceControllerWrapper parentController = parentControllers.stream().filter(c -> c.isLeaderController(clusterName)).findAny().get(); - incPushToRTWriter = startIncrementalPush( - parentControllerClient, - storeName, - parentController.getVeniceAdmin().getVeniceWriterFactory(), - incPushToRTVersion); + incPushToRTWriter = startIncrementalPush(parentControllerClient, storeName, incPushToRTVersion); final String newVersionTopic = Version.composeKafkaTopic( storeName, parentControllerClient.getStore(storeName).getStore().getLargestUsedVersionNumber()); @@ -1007,7 +1004,6 @@ private void assertStoreHealth(ControllerClient controllerClient, String systemS private VeniceWriter startIncrementalPush( ControllerClient controllerClient, String storeName, - VeniceWriterFactory veniceWriterFactory, String incrementalPushVersion) { VersionCreationResponse response = controllerClient.requestTopicForWrites( storeName, @@ -1024,8 +1020,9 @@ private VeniceWriter startIncrementalPush( -1); assertFalse(response.isError()); Assert.assertNotNull(response.getKafkaTopic()); + VeniceWriterFactory veniceWriterFactory = new VeniceWriterFactory(new Properties(), null, null); VeniceWriter veniceWriter = veniceWriterFactory.createVeniceWriter( - new VeniceWriterOptions.Builder(response.getKafkaTopic()) + new VeniceWriterOptions.Builder(response.getKafkaTopic()).setBrokerAddress(response.getKafkaBootstrapServers()) .setKeySerializer(new VeniceAvroKafkaSerializer(STRING_SCHEMA.toString())) .setValueSerializer(new VeniceAvroKafkaSerializer(STRING_SCHEMA.toString())) .build()); diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestStoreMigration.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestStoreMigration.java index 1748ecdc11..cff079b641 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestStoreMigration.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestStoreMigration.java @@ -72,12 +72,12 @@ import com.linkedin.venice.utils.Utils; import com.linkedin.venice.utils.VeniceProperties; import java.io.File; +import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; -import java.util.HashSet; +import java.util.List; import java.util.Optional; import java.util.Properties; -import java.util.Set; import java.util.concurrent.ExecutionException; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeUnit; @@ -372,7 +372,7 @@ public void testStoreMigrationWithDaVinciPushStatusSystemStore() throws Exceptio StoreMigrationTestUtil.startMigration(parentControllerUrl, storeName, srcClusterName, destClusterName); // Store migration status output via closure PrintFunction - Set statusOutput = new HashSet(); + List statusOutput = new ArrayList<>(); PrintFunction printFunction = (message) -> { statusOutput.add(message.trim()); System.err.println(message); @@ -406,21 +406,22 @@ public void testStoreMigrationWithDaVinciPushStatusSystemStore() throws Exceptio .assertEquals(pushStatusStoreReader.getPartitionStatus(storeName, 1, 0, Optional.empty()).size(), 1)); // Verify that store and system store only exist in destination cluster after ending migration - statusOutput.clear(); StoreMigrationTestUtil .endMigration(parentControllerUrl, childControllerUrl0, storeName, srcClusterName, destClusterName); - StoreMigrationTestUtil - .checkMigrationStatus(parentControllerUrl, storeName, srcClusterName, destClusterName, printFunction); - Assert.assertFalse( - statusOutput.contains(String.format("%s exists in this cluster %s", storeName, srcClusterName)), - statusOutput.toString()); - Assert - .assertTrue(statusOutput.contains(String.format("%s exists in this cluster %s", storeName, destClusterName))); - Assert.assertFalse( - statusOutput.contains(String.format("%s exists in this cluster %s", systemStoreName, srcClusterName))); - Assert.assertTrue( - statusOutput.contains(String.format("%s exists in this cluster %s", systemStoreName, destClusterName))); + TestUtils.waitForNonDeterministicAssertion(30, TimeUnit.SECONDS, () -> { + statusOutput.clear(); + StoreMigrationTestUtil + .checkMigrationStatus(parentControllerUrl, storeName, srcClusterName, destClusterName, printFunction); + Assert.assertFalse( + statusOutput.contains(String.format("%s exists in this cluster %s", storeName, srcClusterName))); + Assert.assertTrue( + statusOutput.contains(String.format("%s exists in this cluster %s", storeName, destClusterName))); + Assert.assertFalse( + statusOutput.contains(String.format("%s exists in this cluster %s", systemStoreName, srcClusterName))); + Assert.assertTrue( + statusOutput.contains(String.format("%s exists in this cluster %s", systemStoreName, destClusterName))); + }); } finally { Utils.closeQuietlyWithErrorLogged(pushStatusStoreReader); D2ClientUtils.shutdownClient(d2Client); @@ -661,7 +662,7 @@ public void testStoreMigrationAfterFailedAttempt() throws Exception { () -> assertTrue(srcParentControllerClient.getStore(storeName).getStore().isMigrating())); // Store migration status output via closure PrintFunction - Set statusOutput = new HashSet(); + List statusOutput = new ArrayList<>(); AdminTool.PrintFunction printFunction = (message) -> { statusOutput.add(message.trim()); System.err.println(message); diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestStoreUpdateStoragePersona.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestStoreUpdateStoragePersona.java index 99e40b2b5c..5c752361e2 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestStoreUpdateStoragePersona.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestStoreUpdateStoragePersona.java @@ -8,8 +8,8 @@ import com.linkedin.venice.controllerapi.UpdateStoreQueryParams; import com.linkedin.venice.exceptions.VeniceException; import com.linkedin.venice.integration.utils.ServiceFactory; -import com.linkedin.venice.integration.utils.VeniceMultiRegionClusterCreateOptions; -import com.linkedin.venice.integration.utils.VeniceTwoLayerMultiRegionMultiClusterWrapper; +import com.linkedin.venice.integration.utils.VeniceClusterCreateOptions; +import com.linkedin.venice.integration.utils.VeniceClusterWrapper; import com.linkedin.venice.meta.Store; import com.linkedin.venice.persona.StoragePersona; import com.linkedin.venice.utils.TestStoragePersonaUtils; @@ -26,23 +26,20 @@ public class TestStoreUpdateStoragePersona { - // Ideally this should work with a single region cluster, but today persona only works with a multi region cluster - private VeniceTwoLayerMultiRegionMultiClusterWrapper venice; + private VeniceClusterWrapper venice; private ControllerClient controllerClient; @BeforeClass(alwaysRun = true) public void setUp() { - venice = ServiceFactory.getVeniceTwoLayerMultiRegionMultiClusterWrapper( - new VeniceMultiRegionClusterCreateOptions.Builder().numberOfRegions(1) - .numberOfParentControllers(1) - .numberOfChildControllers(1) + venice = ServiceFactory.getVeniceCluster( + new VeniceClusterCreateOptions.Builder().numberOfControllers(1) .numberOfServers(1) .numberOfRouters(1) .replicationFactor(2) .sslToStorageNodes(false) .sslToKafka(false) .build()); - controllerClient = new ControllerClient(venice.getClusterNames()[0], venice.getControllerConnectString()); + controllerClient = new ControllerClient(venice.getClusterName(), venice.getAllControllersURLs()); } @AfterClass(alwaysRun = true) diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestWritePathComputation.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestWritePathComputation.java index 60da54bae1..e89572aa7a 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestWritePathComputation.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestWritePathComputation.java @@ -29,7 +29,7 @@ public class TestWritePathComputation { + "\"fields\": [\n" + " {\"name\": \"" + VALUE_FIELD_NAME + "\", \"type\": \"int\", \"default\": 10},\n" + "{\"name\": \"" + SECOND_VALUE_FIELD_NAME + "\", \"type\": [\"null\", \"int\"], \"default\": null}]\n" + "}"; - @Test(timeOut = 60 * Time.MS_PER_SECOND) + @Test(timeOut = 90 * Time.MS_PER_SECOND) public void testFeatureFlagSingleDC() { VeniceMultiClusterCreateOptions options = new VeniceMultiClusterCreateOptions.Builder().numberOfClusters(1) .numberOfControllers(1) @@ -41,23 +41,51 @@ public void testFeatureFlagSingleDC() { String clusterName = multiClusterWrapper.getClusterNames()[0]; VeniceControllerWrapper childController = multiClusterWrapper.getLeaderController(clusterName); String storeName = "test-store0"; + String storeName2 = "test-store2"; // Create store Admin childAdmin = multiClusterWrapper.getLeaderController(clusterName, GET_LEADER_CONTROLLER_TIMEOUT).getVeniceAdmin(); childAdmin.createStore(clusterName, storeName, "tester", "\"string\"", "\"string\""); + childAdmin.createStore(clusterName, storeName2, "tester", KEY_SCHEMA_STR, VALUE_SCHEMA_V2_STR); TestUtils.waitForNonDeterministicAssertion(15, TimeUnit.SECONDS, () -> { Assert.assertTrue(childAdmin.hasStore(clusterName, storeName)); Assert.assertFalse(childAdmin.getStore(clusterName, storeName).isWriteComputationEnabled()); + Assert.assertTrue(childAdmin.hasStore(clusterName, storeName2)); + Assert.assertFalse(childAdmin.getStore(clusterName, storeName2).isWriteComputationEnabled()); }); // Set flag String childControllerUrl = childController.getControllerUrl(); try (ControllerClient childControllerClient = new ControllerClient(clusterName, childControllerUrl)) { + ControllerResponse response = + childControllerClient.updateStore(storeName, new UpdateStoreQueryParams().setWriteComputationEnabled(true)); + Assert.assertTrue(response.isError()); + Assert.assertTrue(response.getError().contains("Write computation is only supported for hybrid stores")); + + ControllerResponse response2 = childControllerClient.updateStore( + storeName, + new UpdateStoreQueryParams().setHybridRewindSeconds(1000) + .setHybridOffsetLagThreshold(1000) + .setWriteComputationEnabled(true)); + Assert.assertTrue(response2.isError()); + Assert.assertTrue(response2.getError().contains("top level field probably missing defaults")); + + TestUtils.waitForNonDeterministicAssertion(15, TimeUnit.SECONDS, () -> { + Assert.assertFalse( + childAdmin.getStore(clusterName, storeName).isWriteComputationEnabled(), + "Write Compute should not be enabled if the value schema is not a Record."); + }); + assertCommand( - childControllerClient.updateStore(storeName, new UpdateStoreQueryParams().setWriteComputationEnabled(true)), - "Write Compute should be enabled"); - Assert.assertTrue(childAdmin.getStore(clusterName, storeName).isWriteComputationEnabled()); + childControllerClient.updateStore( + storeName2, + new UpdateStoreQueryParams().setHybridRewindSeconds(1000) + .setHybridOffsetLagThreshold(1000) + .setWriteComputationEnabled(true))); + TestUtils.waitForNonDeterministicAssertion(15, TimeUnit.SECONDS, () -> { + Assert.assertTrue(childAdmin.getStore(clusterName, storeName2).isWriteComputationEnabled()); + }); // Reset flag assertCommand( @@ -70,7 +98,7 @@ public void testFeatureFlagSingleDC() { } } - @Test(timeOut = 90 * Time.MS_PER_SECOND) + @Test(timeOut = 120 * Time.MS_PER_SECOND) public void testFeatureFlagMultipleDC() { try (VeniceTwoLayerMultiRegionMultiClusterWrapper twoLayerMultiRegionMultiClusterWrapper = ServiceFactory.getVeniceTwoLayerMultiRegionMultiClusterWrapper(1, 1, 1, 1, 1, 0)) { @@ -100,7 +128,7 @@ public void testFeatureFlagMultipleDC() { ControllerResponse response = parentControllerClient .updateStore(storeName, new UpdateStoreQueryParams().setWriteComputationEnabled(true)); Assert.assertTrue(response.isError()); - Assert.assertTrue(response.getError().contains("top level field probably missing defaults")); + Assert.assertTrue(response.getError().contains("Write computation is only supported for hybrid stores")); ControllerResponse response2 = parentControllerClient.updateStore( storeName, @@ -113,10 +141,10 @@ public void testFeatureFlagMultipleDC() { TestUtils.waitForNonDeterministicAssertion(15, TimeUnit.SECONDS, () -> { Assert.assertFalse( parentAdmin.getStore(clusterName, storeName).isWriteComputationEnabled(), - "Write Compute should not be enabled before the value schema is not a Record."); + "Write Compute should not be enabled if the value schema is not a Record."); Assert.assertFalse( childAdmin.getStore(clusterName, storeName).isWriteComputationEnabled(), - "Write Compute should not be enabled before the value schema is not a Record."); + "Write Compute should not be enabled if the value schema is not a Record."); }); assertCommand( diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/hadoop/TestVenicePushJob.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/hadoop/TestVenicePushJob.java index e8a6a49770..a5245c87cb 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/hadoop/TestVenicePushJob.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/hadoop/TestVenicePushJob.java @@ -5,6 +5,7 @@ import static com.linkedin.venice.utils.IntegrationTestPushUtils.createStoreForJob; import static com.linkedin.venice.utils.IntegrationTestPushUtils.defaultVPJProps; import static com.linkedin.venice.utils.TestWriteUtils.getTempDataDirectory; +import static com.linkedin.venice.utils.TestWriteUtils.loadFileAsString; import static com.linkedin.venice.utils.TestWriteUtils.writeSimpleAvroFileWithStringToStringSchema; import static com.linkedin.venice.utils.TestWriteUtils.writeSimpleAvroFileWithStringToStringSchema2; import static com.linkedin.venice.utils.TestWriteUtils.writeSimpleVsonFileWithUserSchema; @@ -468,11 +469,14 @@ public void testWCBatchJob() throws Exception { String routerUrl = veniceCluster.getRandomRouterURL(); ControllerClient controllerClient = new ControllerClient(veniceCluster.getClusterName(), routerUrl); - UpdateStoreQueryParams params = new UpdateStoreQueryParams(); - params.setWriteComputationEnabled(true); - params.setIncrementalPushEnabled(false); + UpdateStoreQueryParams params = new UpdateStoreQueryParams().setHybridRewindSeconds(Time.SECONDS_PER_DAY) + .setHybridOffsetLagThreshold(1000) + .setWriteComputationEnabled(true) + .setIncrementalPushEnabled(false); - controllerClient.createNewStoreWithParameters(storeName, "owner", "\"string\"", "\"string\"", params); + String valueSchemaStr = loadFileAsString("UserValue.avsc"); + + controllerClient.createNewStoreWithParameters(storeName, "owner", "\"string\"", valueSchemaStr, params); String inputDirPath = "file://" + inputDir.getAbsolutePath(); Properties props = defaultVPJProps(veniceCluster, inputDirPath, storeName); @@ -630,7 +634,7 @@ public void testKIFRepushFetch(boolean chunkingEnabled) throws Exception { new UpdateStoreQueryParams().setStorageQuotaInByte(Store.UNLIMITED_STORAGE_QUOTA) .setPartitionCount(2) .setIncrementalPushEnabled(true) - .setWriteComputationEnabled(true))); + .setChunkingEnabled(true))); Properties props = defaultVPJProps(veniceCluster, inputDirPath, storeName); props.setProperty(SEND_CONTROL_MESSAGES_DIRECTLY, "true"); // create a batch version. @@ -653,13 +657,27 @@ public void testKIFRepushFetch(boolean chunkingEnabled) throws Exception { new UpdateStoreQueryParams().setHybridOffsetLagThreshold(1) .setHybridRewindSeconds(0) .setChunkingEnabled(chunkingEnabled))); - // Run the repush job, it should still pass - TestWriteUtils.runPushJob("Test push job", props); - try (AvroGenericStoreClient avroClient = ClientFactory.getAndStartGenericAvroClient( - ClientConfig.defaultGenericClientConfig(storeName).setVeniceURL(veniceCluster.getRandomRouterURL()))) { - for (int i = 1; i <= 100; i++) { - Assert.assertEquals(avroClient.get(Integer.toString(i)).get().toString(), "test_name_" + i); + try { + // Run the repush job, it should still pass + TestWriteUtils.runPushJob("Test push job", props); + + if (!chunkingEnabled) { + Assert.fail("Expected an exception since chunking was disabled in store config"); + } + + try (AvroGenericStoreClient avroClient = ClientFactory.getAndStartGenericAvroClient( + ClientConfig.defaultGenericClientConfig(storeName).setVeniceURL(veniceCluster.getRandomRouterURL()))) { + for (int i = 1; i <= 100; i++) { + Assert.assertEquals(avroClient.get(Integer.toString(i)).get().toString(), "test_name_" + i); + } + } + } catch (VeniceException e) { + if (!chunkingEnabled) { + Assert.assertTrue( + e.getMessage().contains("Source version has chunking enabled while chunking is disabled in store config")); + } else { + Assert.fail("Unexpected exception: " + e.getMessage(), e); } } } diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/ingestionHeartbeat/IngestionHeartBeatTest.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/ingestionHeartbeat/IngestionHeartBeatTest.java index 33c105272b..3e7859a2a0 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/ingestionHeartbeat/IngestionHeartBeatTest.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/ingestionHeartbeat/IngestionHeartBeatTest.java @@ -20,6 +20,7 @@ import com.linkedin.venice.client.store.ClientConfig; import com.linkedin.venice.client.store.ClientFactory; import com.linkedin.venice.compression.CompressionStrategy; +import com.linkedin.venice.controller.util.AdminUtils; import com.linkedin.venice.controllerapi.ControllerClient; import com.linkedin.venice.controllerapi.ControllerResponse; import com.linkedin.venice.controllerapi.UpdateStoreQueryParams; @@ -34,7 +35,10 @@ import com.linkedin.venice.integration.utils.VeniceTwoLayerMultiRegionMultiClusterWrapper; import com.linkedin.venice.kafka.protocol.KafkaMessageEnvelope; import com.linkedin.venice.message.KafkaKey; +import com.linkedin.venice.meta.BufferReplayPolicy; import com.linkedin.venice.meta.DataReplicationPolicy; +import com.linkedin.venice.meta.HybridStoreConfig; +import com.linkedin.venice.meta.HybridStoreConfigImpl; import com.linkedin.venice.meta.Store; import com.linkedin.venice.meta.Version; import com.linkedin.venice.pubsub.PubSubTopicPartitionImpl; @@ -64,8 +68,8 @@ import java.util.concurrent.atomic.AtomicBoolean; import org.apache.avro.Schema; import org.apache.avro.generic.GenericRecord; +import org.testng.Assert; import org.testng.annotations.AfterClass; -import org.testng.annotations.AfterTest; import org.testng.annotations.BeforeClass; import org.testng.annotations.DataProvider; import org.testng.annotations.Test; @@ -82,7 +86,6 @@ public class IngestionHeartBeatTest { private VeniceTwoLayerMultiRegionMultiClusterWrapper multiRegionMultiClusterWrapper; private VeniceControllerWrapper parentController; private List childDatacenters; - private String storeName; @BeforeClass(alwaysRun = true) public void setUp() { @@ -109,17 +112,6 @@ public void setUp() { this.parentController = parentControllers.get(0); } - @AfterTest(alwaysRun = true) - public void cleanupStore() { - if (this.parentController != null) { - String parentControllerUrl = parentController.getControllerUrl(); - try (ControllerClient parentControllerClient = - new ControllerClient(multiRegionMultiClusterWrapper.getClusterNames()[0], parentControllerUrl)) { - parentControllerClient.disableAndDeleteStore(storeName); - } - } - } - @DataProvider public static Object[][] AAConfigAndIncPushAndDRPProvider() { return DataProviderUtils @@ -131,7 +123,7 @@ public void testIngestionHeartBeat( boolean isActiveActiveEnabled, boolean isIncrementalPushEnabled, DataReplicationPolicy dataReplicationPolicy) throws IOException, InterruptedException { - storeName = Utils.getUniqueString("ingestionHeartBeatTest"); + String storeName = Utils.getUniqueString("ingestionHeartBeatTest"); String parentControllerUrl = parentController.getControllerUrl(); File inputDir = getTempDataDirectory(); Schema recordSchema = writeSimpleAvroFileWithStringToNameRecordV1Schema(inputDir); @@ -147,22 +139,47 @@ public void testIngestionHeartBeat( assertCommand( parentControllerClient .createNewStore(storeName, "test_owner", keySchemaStr, NAME_RECORD_V1_SCHEMA.toString())); + + assertCommand( + parentControllerClient.updateStore( + storeName, + new UpdateStoreQueryParams().setStorageQuotaInByte(Store.UNLIMITED_STORAGE_QUOTA) + .setCompressionStrategy(CompressionStrategy.NO_OP) + .setHybridRewindSeconds(500L) + .setHybridOffsetLagThreshold(10L) + .setPartitionCount(2) + .setReplicationFactor(2) + .setNativeReplicationEnabled(true))); + UpdateStoreQueryParams updateStoreParams = - new UpdateStoreQueryParams().setStorageQuotaInByte(Store.UNLIMITED_STORAGE_QUOTA) - .setCompressionStrategy(CompressionStrategy.NO_OP) - .setIncrementalPushEnabled(isIncrementalPushEnabled) - .setHybridRewindSeconds(500L) - .setHybridOffsetLagThreshold(10L) - .setPartitionCount(2) - .setReplicationFactor(2) - .setNativeReplicationEnabled(true) + new UpdateStoreQueryParams().setIncrementalPushEnabled(isIncrementalPushEnabled) .setActiveActiveReplicationEnabled(isActiveActiveEnabled) .setHybridDataReplicationPolicy(dataReplicationPolicy); ControllerResponse updateStoreResponse = parentControllerClient.retryableRequest(5, c -> c.updateStore(storeName, updateStoreParams)); - assertFalse(updateStoreResponse.isError(), "Update store got error: " + updateStoreResponse.getError()); + HybridStoreConfig expectedHybridStoreConfig = + new HybridStoreConfigImpl(500L, 10L, -1, dataReplicationPolicy, BufferReplayPolicy.REWIND_FROM_EOP); + + boolean isIncrementalPushAllowed = + AdminUtils.isIncrementalPushSupported(true, isActiveActiveEnabled, expectedHybridStoreConfig); + + // ACTIVE_ACTIVE DRP is only supported for stores with AA enabled + boolean isAAConfigSupported = + isActiveActiveEnabled || dataReplicationPolicy != DataReplicationPolicy.ACTIVE_ACTIVE; + + // Push should succeed if: + // 1. It is a full push, or + // 2. It is an incremental push, and incremental push is allowed + boolean shouldPushSucceed = !isIncrementalPushEnabled || isIncrementalPushAllowed; + + boolean isConfigSupported = isAAConfigSupported && shouldPushSucceed; + if (isConfigSupported) { + assertCommand(updateStoreResponse); + } else { + assertTrue(updateStoreResponse.isError()); + } VersionCreationResponse response = parentControllerClient.emptyPush(storeName, "test_push_id", 1000); assertEquals(response.getVersion(), 1); @@ -178,26 +195,43 @@ public void testIngestionHeartBeat( String childControllerUrl = childDatacenters.get(0).getRandomController().getControllerUrl(); try (ControllerClient childControllerClient = new ControllerClient(CLUSTER_NAME, childControllerUrl)) { runVPJ(vpjProperties, expectedVersionNumber, childControllerClient); + if (!shouldPushSucceed) { + Assert.fail("Push should have failed"); + } + } catch (Exception e) { + if (shouldPushSucceed) { + Assert.fail("Push should not fail", e); + } } VeniceClusterWrapper veniceClusterWrapper = childDatacenters.get(0).getClusters().get(CLUSTER_NAME); veniceClusterWrapper.waitVersion(storeName, expectedVersionNumber); - // Verify data pushed via full push/inc push using client - try (AvroGenericStoreClient storeReader = ClientFactory.getAndStartGenericAvroClient( - ClientConfig.defaultGenericClientConfig(storeName).setVeniceURL(veniceClusterWrapper.getRandomRouterURL()))) { - TestUtils.waitForNonDeterministicAssertion(10, TimeUnit.SECONDS, true, () -> { - try { - for (int i = 1; i < 100; i++) { - String key = String.valueOf(i); - GenericRecord value = readValue(storeReader, key); - assertNotNull(value, "Key " + key + " should not be missing!"); - assertEquals(value.get("firstName").toString(), "first_name_" + key); - assertEquals(value.get("lastName").toString(), "last_name_" + key); + if (shouldPushSucceed) { + // Verify data pushed via full push/inc push using client + try (AvroGenericStoreClient storeReader = ClientFactory.getAndStartGenericAvroClient( + ClientConfig.defaultGenericClientConfig(storeName) + .setVeniceURL(veniceClusterWrapper.getRandomRouterURL()))) { + TestUtils.waitForNonDeterministicAssertion(10, TimeUnit.SECONDS, true, () -> { + try { + for (int i = 1; i < 100; i++) { + String key = String.valueOf(i); + GenericRecord value = readValue(storeReader, key); + assertNotNull(value, "Key " + key + " should not be missing!"); + assertEquals(value.get("firstName").toString(), "first_name_" + key); + assertEquals(value.get("lastName").toString(), "last_name_" + key); + } + } catch (Exception e) { + throw new VeniceException(e); } - } catch (Exception e) { - throw new VeniceException(e); - } - }); + }); + } + } + + // Since the config combination is not supported, we can either validate the heartbeats using the default values, + // or skip the validation. Here, we choose to skip it since the default validation case will be one of the + // permutations where the heartbeats will get validated. + if (!isConfigSupported) { + return; } // create consumer to consume from RT/VT to verify HB and Leader completed header @@ -280,6 +314,7 @@ private void verifyHBinKafkaTopic( break; } } + if ((!isIncrementalPushEnabled || isActiveActiveEnabled) && (isActiveActiveEnabled || dataReplicationPolicy != DataReplicationPolicy.AGGREGATE)) { assertTrue( diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceControllerCreateOptions.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceControllerCreateOptions.java index 07404fd8a3..1426403684 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceControllerCreateOptions.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceControllerCreateOptions.java @@ -11,6 +11,7 @@ import static com.linkedin.venice.integration.utils.VeniceClusterWrapperConstants.DEFAULT_REPLICATION_FACTOR; import com.linkedin.venice.authorization.AuthorizerService; +import com.linkedin.venice.controller.supersetschema.SupersetSchemaGenerator; import java.util.Arrays; import java.util.Map; import java.util.Objects; @@ -38,6 +39,7 @@ public class VeniceControllerCreateOptions { private final Properties extraProperties; private final AuthorizerService authorizerService; private final String regionName; + private final SupersetSchemaGenerator supersetSchemaGenerator; private VeniceControllerCreateOptions(Builder builder) { multiRegion = builder.multiRegion; @@ -59,6 +61,7 @@ private VeniceControllerCreateOptions(Builder builder) { authorizerService = builder.authorizerService; isParent = builder.childControllers != null && builder.childControllers.length != 0; regionName = builder.regionName; + supersetSchemaGenerator = builder.supersetSchemaGenerator; } @Override @@ -116,6 +119,9 @@ public String toString() { .append(", ") .append("childControllers:") .append(getAddressesOfChildControllers()) + .append(", ") + .append("supersetSchemaGenerator:") + .append(supersetSchemaGenerator) .toString(); } @@ -205,6 +211,10 @@ public String getRegionName() { return regionName; } + public SupersetSchemaGenerator getSupersetSchemaGenerator() { + return supersetSchemaGenerator; + } + public static class Builder { private boolean multiRegion = false; private final String[] clusterNames; @@ -224,6 +234,7 @@ public static class Builder { private Properties extraProperties = new Properties(); private AuthorizerService authorizerService; private String regionName; + private SupersetSchemaGenerator supersetSchemaGenerator; public Builder(String[] clusterNames, ZkServerWrapper zkServer, PubSubBrokerWrapper kafkaBroker) { this.clusterNames = Objects.requireNonNull(clusterNames, "clusterNames cannot be null when creating controller"); @@ -315,6 +326,11 @@ public Builder regionName(String regionName) { return this; } + public Builder supersetSchemaGenerator(SupersetSchemaGenerator supersetSchemaGenerator) { + this.supersetSchemaGenerator = supersetSchemaGenerator; + return this; + } + private void verifyAndAddParentControllerSpecificDefaults() { extraProperties.setProperty(LOCAL_REGION_NAME, DEFAULT_PARENT_DATA_CENTER_REGION_NAME); if (!extraProperties.containsKey(CONTROLLER_AUTO_MATERIALIZE_META_SYSTEM_STORE)) { diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceControllerWrapper.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceControllerWrapper.java index 78e51df38b..95b06a2e8c 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceControllerWrapper.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceControllerWrapper.java @@ -60,7 +60,6 @@ import com.linkedin.venice.controller.VeniceControllerContext; import com.linkedin.venice.controller.VeniceHelixAdmin; import com.linkedin.venice.controller.kafka.consumer.AdminConsumerService; -import com.linkedin.venice.controller.supersetschema.SupersetSchemaGenerator; import com.linkedin.venice.d2.D2Server; import com.linkedin.venice.meta.PersistenceType; import com.linkedin.venice.pubsub.PubSubClientsFactory; @@ -100,8 +99,6 @@ public class VeniceControllerWrapper extends ProcessWrapper { public static final String PARENT_D2_CLUSTER_NAME = "ParentControllerD2Cluster"; public static final String PARENT_D2_SERVICE_NAME = "ParentController"; - public static final String SUPERSET_SCHEMA_GENERATOR = "SupersetSchemaGenerator"; - public static final double DEFAULT_STORAGE_ENGINE_OVERHEAD_RATIO = 0.85d; private VeniceController service; @@ -354,18 +351,13 @@ static StatefulServiceProvider generateService(VeniceCo if (clientConfig instanceof ClientConfig) { consumerClientConfig = Optional.of((ClientConfig) clientConfig); } - Optional supersetSchemaGenerator = Optional.empty(); - Object passedSupersetSchemaGenerator = options.getExtraProperties().get(SUPERSET_SCHEMA_GENERATOR); - if (passedSupersetSchemaGenerator instanceof SupersetSchemaGenerator) { - supersetSchemaGenerator = Optional.of((SupersetSchemaGenerator) passedSupersetSchemaGenerator); - } VeniceControllerContext ctx = new VeniceControllerContext.Builder().setPropertiesList(propertiesList) .setMetricsRepository(metricsRepository) .setServiceDiscoveryAnnouncers(d2ServerList) .setAuthorizerService(options.getAuthorizerService()) .setD2Client(d2Client) .setRouterClientConfig(consumerClientConfig.orElse(null)) - .setExternalSupersetSchemaGenerator(supersetSchemaGenerator.orElse(null)) + .setExternalSupersetSchemaGenerator(options.getSupersetSchemaGenerator()) .build(); VeniceController veniceController = new VeniceController(ctx); return new VeniceControllerWrapper( diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceMultiClusterCreateOptions.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceMultiClusterCreateOptions.java index d02a19a1b4..6af4a5390d 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceMultiClusterCreateOptions.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceMultiClusterCreateOptions.java @@ -10,6 +10,7 @@ import static com.linkedin.venice.integration.utils.VeniceClusterWrapperConstants.DEFAULT_SSL_TO_STORAGE_NODES; import static com.linkedin.venice.integration.utils.VeniceClusterWrapperConstants.STANDALONE_REGION_NAME; +import com.linkedin.venice.controller.supersetschema.SupersetSchemaGenerator; import java.util.Collections; import java.util.Map; import java.util.Properties; @@ -37,6 +38,7 @@ public class VeniceMultiClusterCreateOptions { private final PubSubBrokerWrapper pubSubBrokerWrapper; private final Properties childControllerProperties; private final Properties extraProperties; + private final SupersetSchemaGenerator supersetSchemaGenerator; public String getRegionName() { return regionName; @@ -122,6 +124,10 @@ public Properties getExtraProperties() { return extraProperties; } + public SupersetSchemaGenerator getSupersetSchemaGenerator() { + return supersetSchemaGenerator; + } + @Override public String toString() { return new StringBuilder().append("VeniceMultiClusterCreateOptions - ") @@ -187,6 +193,9 @@ public String toString() { .append(", ") .append("kafkaClusterMap:") .append(kafkaClusterMap) + .append(", ") + .append("supersetSchemaGenerator:") + .append(supersetSchemaGenerator) .toString(); } @@ -212,6 +221,7 @@ private VeniceMultiClusterCreateOptions(Builder builder) { extraProperties = builder.extraProperties; forkServer = builder.forkServer; kafkaClusterMap = builder.kafkaClusterMap; + supersetSchemaGenerator = builder.supersetSchemaGenerator; } public static class Builder { @@ -236,6 +246,7 @@ public static class Builder { private PubSubBrokerWrapper pubSubBrokerWrapper; private Properties childControllerProperties; private Properties extraProperties; + private SupersetSchemaGenerator supersetSchemaGenerator; public Builder numberOfClusters(int numberOfClusters) { this.numberOfClusters = numberOfClusters; @@ -346,6 +357,11 @@ public Builder extraProperties(Properties extraProperties) { return this; } + public Builder supersetSchemaGenerator(SupersetSchemaGenerator supersetSchemaGenerator) { + this.supersetSchemaGenerator = supersetSchemaGenerator; + return this; + } + private void addDefaults() { if (numberOfClusters == 0) { numberOfClusters = 1; diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceMultiRegionClusterCreateOptions.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceMultiRegionClusterCreateOptions.java index c2dafc8526..a7675fb0ff 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceMultiRegionClusterCreateOptions.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceMultiRegionClusterCreateOptions.java @@ -8,6 +8,7 @@ import static com.linkedin.venice.integration.utils.VeniceClusterWrapperConstants.DEFAULT_SSL_TO_STORAGE_NODES; import com.linkedin.venice.authorization.AuthorizerService; +import com.linkedin.venice.controller.supersetschema.SupersetSchemaGenerator; import java.util.Properties; @@ -28,6 +29,7 @@ public class VeniceMultiRegionClusterCreateOptions { private final AuthorizerService parentAuthorizerService; private final String parentVeniceZkBasePath; private final String childVeniceZkBasePath; + private final SupersetSchemaGenerator supersetSchemaGenerator; public int getNumberOfRegions() { return numberOfRegions; @@ -93,6 +95,10 @@ public String getChildVeniceZkBasePath() { return childVeniceZkBasePath; } + public SupersetSchemaGenerator getSupersetSchemaGenerator() { + return supersetSchemaGenerator; + } + @Override public String toString() { return new StringBuilder().append("VeniceMultiClusterCreateOptions - ") @@ -143,6 +149,9 @@ public String toString() { .append(", ") .append("childVeniceZkBasePath:") .append(childVeniceZkBasePath) + .append(", ") + .append("supersetSchemaGenerator:") + .append(supersetSchemaGenerator) .toString(); } @@ -163,6 +172,7 @@ private VeniceMultiRegionClusterCreateOptions(Builder builder) { parentAuthorizerService = builder.parentAuthorizerService; parentVeniceZkBasePath = builder.parentVeniceZkBasePath; childVeniceZkBasePath = builder.childVeniceZkBasePath; + supersetSchemaGenerator = builder.supersetSchemaGenerator; } public static class Builder { @@ -182,6 +192,7 @@ public static class Builder { private AuthorizerService parentAuthorizerService; private String parentVeniceZkBasePath = "/"; private String childVeniceZkBasePath = "/"; + private SupersetSchemaGenerator supersetSchemaGenerator; public Builder numberOfRegions(int numberOfRegions) { this.numberOfRegions = numberOfRegions; @@ -271,6 +282,11 @@ public Builder childVeniceZkBasePath(String veniceZkBasePath) { return this; } + public Builder supersetSchemaGenerator(SupersetSchemaGenerator supersetSchemaGenerator) { + this.supersetSchemaGenerator = supersetSchemaGenerator; + return this; + } + private void addDefaults() { if (numberOfRegions == 0) { numberOfRegions = 1; diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceTwoLayerMultiRegionMultiClusterWrapper.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceTwoLayerMultiRegionMultiClusterWrapper.java index 3c9e760c5c..98491d2bc8 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceTwoLayerMultiRegionMultiClusterWrapper.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceTwoLayerMultiRegionMultiClusterWrapper.java @@ -193,7 +193,8 @@ static ServiceProvider generateSer .sslToStorageNodes(options.isSslToStorageNodes()) .sslToKafka(options.isSslToKafka()) .forkServer(options.isForkServer()) - .kafkaClusterMap(kafkaClusterMap); + .kafkaClusterMap(kafkaClusterMap) + .supersetSchemaGenerator(options.getSupersetSchemaGenerator()); // Create multi-clusters for (int i = 0; i < options.getNumberOfRegions(); i++) { String regionName = childRegionName.get(i); @@ -225,6 +226,7 @@ static ServiceProvider generateSer .clusterToServerD2(clusterToServerD2) .regionName(parentRegionName) .authorizerService(options.getParentAuthorizerService()) + .supersetSchemaGenerator(options.getSupersetSchemaGenerator()) .build(); // Create parentControllers for multi-cluster for (int i = 0; i < options.getNumberOfParentControllers(); i++) { diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/utils/IntegrationTestPushUtils.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/utils/IntegrationTestPushUtils.java index 3aa14ac9e5..77c07a7cf5 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/utils/IntegrationTestPushUtils.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/utils/IntegrationTestPushUtils.java @@ -16,6 +16,7 @@ import static com.linkedin.venice.samza.VeniceSystemFactory.VENICE_PARENT_D2_ZK_HOSTS; import static com.linkedin.venice.samza.VeniceSystemFactory.VENICE_PUSH_TYPE; import static com.linkedin.venice.samza.VeniceSystemFactory.VENICE_STORE; +import static com.linkedin.venice.utils.TestUtils.assertCommand; import static com.linkedin.venice.vpj.VenicePushJobConstants.D2_ZK_HOSTS_PREFIX; import static com.linkedin.venice.vpj.VenicePushJobConstants.DEFAULT_KEY_FIELD_PROP; import static com.linkedin.venice.vpj.VenicePushJobConstants.DEFAULT_VALUE_FIELD_PROP; @@ -30,7 +31,6 @@ import com.linkedin.venice.controllerapi.ControllerClient; import com.linkedin.venice.controllerapi.ControllerResponse; import com.linkedin.venice.controllerapi.D2ControllerClientFactory; -import com.linkedin.venice.controllerapi.NewStoreResponse; import com.linkedin.venice.controllerapi.UpdateStoreQueryParams; import com.linkedin.venice.endToEnd.DaVinciClientDiskFullTest; import com.linkedin.venice.exceptions.VeniceException; @@ -175,9 +175,7 @@ public static ControllerClient createStoreForJob( keySchemaStr, valueSchema, props, - CompressionStrategy.NO_OP, - false, - false); + CompressionStrategy.NO_OP); } public static void makeStoreHybrid( @@ -274,9 +272,7 @@ public static ControllerClient createStoreForJob(String veniceClusterName, Schem getKeySchemaString(recordSchema, props), getValueSchemaString(recordSchema, props), props, - CompressionStrategy.NO_OP, - false, - false); + CompressionStrategy.NO_OP); } public static ControllerClient createStoreForJob( @@ -284,17 +280,13 @@ public static ControllerClient createStoreForJob( String keySchemaStr, String valueSchemaStr, Properties props, - CompressionStrategy compressionStrategy, - boolean chunkingEnabled, - boolean incrementalPushEnabled) { + CompressionStrategy compressionStrategy) { UpdateStoreQueryParams storeParams = new UpdateStoreQueryParams().setStorageQuotaInByte(Store.UNLIMITED_STORAGE_QUOTA) .setCompressionStrategy(compressionStrategy) .setBatchGetLimit(2000) - .setReadQuotaInCU(DEFAULT_PER_ROUTER_READ_QUOTA) - .setChunkingEnabled(chunkingEnabled) - .setIncrementalPushEnabled(incrementalPushEnabled); + .setReadQuotaInCU(DEFAULT_PER_ROUTER_READ_QUOTA); return createStoreForJob(veniceClusterName, keySchemaStr, valueSchemaStr, props, storeParams); } @@ -306,12 +298,12 @@ public static ControllerClient createStoreForJob( Properties props, UpdateStoreQueryParams storeParams) { ControllerClient controllerClient = getControllerClient(veniceClusterName, props); - NewStoreResponse newStoreResponse = controllerClient - .createNewStore(props.getProperty(VENICE_STORE_NAME_PROP), "test@linkedin.com", keySchemaStr, valueSchemaStr); - - if (newStoreResponse.isError()) { - throw new VeniceException("Could not create store " + props.getProperty(VENICE_STORE_NAME_PROP)); - } + assertCommand( + controllerClient.createNewStore( + props.getProperty(VENICE_STORE_NAME_PROP), + "test@linkedin.com", + keySchemaStr, + valueSchemaStr)); updateStore(veniceClusterName, props, storeParams.setStorageQuotaInByte(Store.UNLIMITED_STORAGE_QUOTA)); return controllerClient; diff --git a/internal/venice-test-common/src/main/java/com/linkedin/venice/utils/TestUtils.java b/internal/venice-test-common/src/main/java/com/linkedin/venice/utils/TestUtils.java index 6d86e99694..f82a78b7bd 100644 --- a/internal/venice-test-common/src/main/java/com/linkedin/venice/utils/TestUtils.java +++ b/internal/venice-test-common/src/main/java/com/linkedin/venice/utils/TestUtils.java @@ -625,10 +625,10 @@ public static void createAndVerifyStoreInAllRegions( String storeName, ControllerClient parentControllerClient, List controllerClientList) { - Assert.assertFalse(parentControllerClient.createNewStore(storeName, "owner", "\"string\"", "\"string\"").isError()); + assertCommand(parentControllerClient.createNewStore(storeName, "owner", "\"string\"", "\"string\"")); TestUtils.waitForNonDeterministicAssertion(60, TimeUnit.SECONDS, () -> { for (ControllerClient client: controllerClientList) { - Assert.assertFalse(client.getStore(storeName).isError()); + assertCommand(client.getStore(storeName)); } }); } diff --git a/services/venice-controller/src/main/java/com/linkedin/venice/controller/Admin.java b/services/venice-controller/src/main/java/com/linkedin/venice/controller/Admin.java index d37ead6d48..5d1426f84e 100644 --- a/services/venice-controller/src/main/java/com/linkedin/venice/controller/Admin.java +++ b/services/venice-controller/src/main/java/com/linkedin/venice/controller/Admin.java @@ -3,6 +3,7 @@ import com.linkedin.venice.acl.AclException; import com.linkedin.venice.common.VeniceSystemStoreType; import com.linkedin.venice.controller.kafka.consumer.AdminConsumerService; +import com.linkedin.venice.controller.supersetschema.SupersetSchemaGenerator; import com.linkedin.venice.controllerapi.NodeReplicasReadinessState; import com.linkedin.venice.controllerapi.RepushInfo; import com.linkedin.venice.controllerapi.StoreComparisonInfo; @@ -14,6 +15,7 @@ import com.linkedin.venice.helix.HelixReadOnlyZKSharedSystemStoreRepository; import com.linkedin.venice.helix.Replica; import com.linkedin.venice.meta.Instance; +import com.linkedin.venice.meta.ReadWriteSchemaRepository; import com.linkedin.venice.meta.RegionPushDetails; import com.linkedin.venice.meta.RoutersClusterConfig; import com.linkedin.venice.meta.Store; @@ -389,7 +391,14 @@ default SchemaEntry addValueSchema(String clusterName, String storeName, String SchemaEntry.DEFAULT_SCHEMA_CREATION_COMPATIBILITY_TYPE); } - SchemaEntry addSupersetSchema( + /** + * Add a new superset schema for the given store with all specified properties. + *

+ * Generate the superset schema off the current schema and latest superset schema (if any. If not, pick the latest + * value schema) existing in the store. + * If the newly generated superset schema is unique add it to the store and update latestSuperSetValueSchemaId of the store. + */ + void addSupersetSchema( String clusterName, String storeName, String valueSchemaStr, @@ -450,8 +459,6 @@ void validateAndMaybeRetrySystemStoreAutoCreation( void rollbackToBackupVersion(String clusterName, String storeName, String regionFilter); - void setStoreLargestUsedVersion(String clusterName, String storeName, int versionNumber); - void setStoreOwner(String clusterName, String storeName, String owner); void setStorePartitionCount(String clusterName, String storeName, int partitionCount); @@ -765,6 +772,13 @@ void updateRoutersClusterConfig( */ boolean isParent(); + /** + * The "Primary Controller" term is used to refer to whichever controller is the main controller in a Venice set-up. + * In a multi-region deployment, the primary controller is the parent controller. + * In a single-region deployment, the primary controller is the only controller. + */ + boolean isPrimary(); + /** * Return the state of the region of the parent controller. * @return {@link ParentControllerRegionState#ACTIVE} which means that the parent controller in the region is serving requests. @@ -1020,4 +1034,21 @@ default void clearInstanceMonitor(String clusterName) { HelixVeniceClusterResources getHelixVeniceClusterResources(String cluster); PubSubTopicRepository getPubSubTopicRepository(); + + default Schema getSupersetOrLatestValueSchema(String clusterName, Store store) { + ReadWriteSchemaRepository schemaRepository = getHelixVeniceClusterResources(clusterName).getSchemaRepository(); + // If already a superset schema exists, try to generate the new superset from that and the input value schema + SchemaEntry existingSchema = schemaRepository.getSupersetOrLatestValueSchema(store.getName()); + return existingSchema == null ? null : existingSchema.getSchema(); + } + + /** + * Return the current superset schema generator for the given cluster. + */ + SupersetSchemaGenerator getSupersetSchemaGenerator(String clusterName); + + /** + * Return the multi-cluster configs for the controller. + */ + VeniceControllerMultiClusterConfig getMultiClusterConfigs(); } diff --git a/services/venice-controller/src/main/java/com/linkedin/venice/controller/StoreViewUtils.java b/services/venice-controller/src/main/java/com/linkedin/venice/controller/StoreViewUtils.java index 3e363fdd6b..aaf6c462e5 100644 --- a/services/venice-controller/src/main/java/com/linkedin/venice/controller/StoreViewUtils.java +++ b/services/venice-controller/src/main/java/com/linkedin/venice/controller/StoreViewUtils.java @@ -20,28 +20,7 @@ public class StoreViewUtils { private static final VeniceJsonSerializer viewConfigVeniceJsonSerializer = new VeniceJsonSerializer<>(ViewConfig.class); - static Map convertStringMapViewToStoreViewConfigRecordMap( - Map stringMap) throws VeniceException { - Map mergedViewConfigRecords = new HashMap<>(); - if (!stringMap.isEmpty()) { - for (Map.Entry stringViewConfig: stringMap.entrySet()) { - try { - ViewConfig viewConfig = - viewConfigVeniceJsonSerializer.deserialize(stringViewConfig.getValue().getBytes(), ""); - StoreViewConfigRecord newViewConfigRecord = new StoreViewConfigRecord( - viewConfig.getViewClassName(), - CollectionUtils.getStringKeyCharSequenceValueMapFromStringMap(viewConfig.getViewParameters())); - mergedViewConfigRecords.put(stringViewConfig.getKey(), newViewConfigRecord); - } catch (IOException e) { - LOGGER.error("Failed to serialize provided view config: {}", stringViewConfig.getValue()); - throw new VeniceException("Failed to serialize provided view config:" + stringViewConfig.getValue(), e); - } - } - } - return mergedViewConfigRecords; - } - - static Map convertStringMapViewToStoreViewConfigMap(Map stringMap) { + public static Map convertStringMapViewToStoreViewConfigMap(Map stringMap) { Map mergedViewConfigRecords = new HashMap<>(); if (!stringMap.isEmpty()) { for (Map.Entry stringViewConfig: stringMap.entrySet()) { @@ -62,20 +41,20 @@ static Map convertStringMapViewToStoreViewConfigMap(Map return mergedViewConfigRecords; } - static Map convertStringMapViewToViewConfigMap(Map stringMap) { + public static Map convertStringMapViewToViewConfigMap(Map stringMap) { return convertStringMapViewToStoreViewConfigMap(stringMap).entrySet() .stream() .collect(Collectors.toMap(Map.Entry::getKey, e -> new ViewConfigImpl(e.getValue()))); } - static Map convertViewConfigMapToStoreViewRecordMap( + public static Map convertViewConfigMapToStoreViewRecordMap( Map viewConfigMap) { return viewConfigMap.entrySet() .stream() .collect(Collectors.toMap(Map.Entry::getKey, e -> convertViewConfigToStoreViewConfigRecord(e.getValue()))); } - static StoreViewConfigRecord convertViewConfigToStoreViewConfigRecord(ViewConfig viewConfig) { + public static StoreViewConfigRecord convertViewConfigToStoreViewConfigRecord(ViewConfig viewConfig) { return new StoreViewConfigRecord(viewConfig.getViewClassName(), viewConfig.dataModel().getViewParameters()); } } diff --git a/services/venice-controller/src/main/java/com/linkedin/venice/controller/VeniceControllerClusterConfig.java b/services/venice-controller/src/main/java/com/linkedin/venice/controller/VeniceControllerClusterConfig.java index 5b1dfa71d0..f43526ea06 100644 --- a/services/venice-controller/src/main/java/com/linkedin/venice/controller/VeniceControllerClusterConfig.java +++ b/services/venice-controller/src/main/java/com/linkedin/venice/controller/VeniceControllerClusterConfig.java @@ -47,6 +47,7 @@ import static com.linkedin.venice.ConfigKeys.CONTROLLER_EARLY_DELETE_BACKUP_ENABLED; import static com.linkedin.venice.ConfigKeys.CONTROLLER_ENABLE_DISABLED_REPLICA_ENABLED; import static com.linkedin.venice.ConfigKeys.CONTROLLER_ENFORCE_SSL; +import static com.linkedin.venice.ConfigKeys.CONTROLLER_EXTERNAL_SUPERSET_SCHEMA_GENERATION_ENABLED; import static com.linkedin.venice.ConfigKeys.CONTROLLER_HAAS_SUPER_CLUSTER_NAME; import static com.linkedin.venice.ConfigKeys.CONTROLLER_HELIX_CLOUD_ID; import static com.linkedin.venice.ConfigKeys.CONTROLLER_HELIX_CLOUD_INFO_PROCESSOR_NAME; @@ -395,7 +396,7 @@ public class VeniceControllerClusterConfig { private final int parentSystemStoreRepairRetryCount; - private final boolean parentExternalSupersetSchemaGenerationEnabled; + private final boolean externalSupersetSchemaGenerationEnabled; private final boolean systemSchemaInitializationAtStartTimeEnabled; @@ -950,8 +951,10 @@ public VeniceControllerClusterConfig(VeniceProperties props) { this.parentSystemStoreRepairRetryCount = props.getInt(CONTROLLER_PARENT_SYSTEM_STORE_REPAIR_RETRY_COUNT, 1); this.clusterDiscoveryD2ServiceName = props.getString(CLUSTER_DISCOVERY_D2_SERVICE, ClientConfig.DEFAULT_CLUSTER_DISCOVERY_D2_SERVICE_NAME); - this.parentExternalSupersetSchemaGenerationEnabled = - props.getBoolean(CONTROLLER_PARENT_EXTERNAL_SUPERSET_SCHEMA_GENERATION_ENABLED, false); + this.externalSupersetSchemaGenerationEnabled = props.getBooleanWithAlternative( + CONTROLLER_EXTERNAL_SUPERSET_SCHEMA_GENERATION_ENABLED, + CONTROLLER_PARENT_EXTERNAL_SUPERSET_SCHEMA_GENERATION_ENABLED, + false); this.systemSchemaInitializationAtStartTimeEnabled = props.getBoolean(SYSTEM_SCHEMA_INITIALIZATION_AT_START_TIME_ENABLED, false); this.isKMERegistrationFromMessageHeaderEnabled = @@ -1637,8 +1640,8 @@ public int getParentSystemStoreRepairRetryCount() { return parentSystemStoreRepairRetryCount; } - public boolean isParentExternalSupersetSchemaGenerationEnabled() { - return parentExternalSupersetSchemaGenerationEnabled; + public boolean isExternalSupersetSchemaGenerationEnabled() { + return externalSupersetSchemaGenerationEnabled; } public boolean isSystemSchemaInitializationAtStartTimeEnabled() { diff --git a/services/venice-controller/src/main/java/com/linkedin/venice/controller/VeniceControllerService.java b/services/venice-controller/src/main/java/com/linkedin/venice/controller/VeniceControllerService.java index 65799d0187..eca375a082 100644 --- a/services/venice-controller/src/main/java/com/linkedin/venice/controller/VeniceControllerService.java +++ b/services/venice-controller/src/main/java/com/linkedin/venice/controller/VeniceControllerService.java @@ -90,6 +90,7 @@ public VeniceControllerService( sslConfig, accessController, icProvider, + externalSupersetSchemaGenerator, pubSubTopicRepository, pubSubClientsFactory, Arrays.asList(initRoutineForPushJobDetailsSystemStore, initRoutineForHeartbeatSystemStore)); diff --git a/services/venice-controller/src/main/java/com/linkedin/venice/controller/VeniceHelixAdmin.java b/services/venice-controller/src/main/java/com/linkedin/venice/controller/VeniceHelixAdmin.java index 24faf1dedf..37dc1d75c9 100644 --- a/services/venice-controller/src/main/java/com/linkedin/venice/controller/VeniceHelixAdmin.java +++ b/services/venice-controller/src/main/java/com/linkedin/venice/controller/VeniceHelixAdmin.java @@ -7,9 +7,6 @@ import static com.linkedin.venice.ConfigKeys.SSL_KAFKA_BOOTSTRAP_SERVERS; import static com.linkedin.venice.ConfigKeys.SSL_TO_KAFKA_LEGACY; import static com.linkedin.venice.controller.UserSystemStoreLifeCycleHelper.AUTO_META_SYSTEM_STORE_PUSH_ID_PREFIX; -import static com.linkedin.venice.meta.HybridStoreConfigImpl.DEFAULT_HYBRID_OFFSET_LAG_THRESHOLD; -import static com.linkedin.venice.meta.HybridStoreConfigImpl.DEFAULT_HYBRID_TIME_LAG_THRESHOLD; -import static com.linkedin.venice.meta.HybridStoreConfigImpl.DEFAULT_REWIND_TIME_IN_SECONDS; import static com.linkedin.venice.meta.Store.NON_EXISTING_VERSION; import static com.linkedin.venice.meta.Version.PushType; import static com.linkedin.venice.meta.VersionStatus.ERROR; @@ -54,10 +51,13 @@ import com.linkedin.venice.controller.init.SystemSchemaInitializationRoutine; import com.linkedin.venice.controller.kafka.StoreStatusDecider; import com.linkedin.venice.controller.kafka.consumer.AdminConsumerService; -import com.linkedin.venice.controller.kafka.protocol.admin.HybridStoreConfigRecord; -import com.linkedin.venice.controller.kafka.protocol.admin.StoreViewConfigRecord; import com.linkedin.venice.controller.stats.DisabledPartitionStats; import com.linkedin.venice.controller.stats.PushJobStatusStats; +import com.linkedin.venice.controller.supersetschema.DefaultSupersetSchemaGenerator; +import com.linkedin.venice.controller.supersetschema.SupersetSchemaGenerator; +import com.linkedin.venice.controller.util.PrimaryControllerConfigUpdateUtils; +import com.linkedin.venice.controller.util.UpdateStoreUtils; +import com.linkedin.venice.controller.util.UpdateStoreWrapper; import com.linkedin.venice.controllerapi.ControllerClient; import com.linkedin.venice.controllerapi.ControllerResponse; import com.linkedin.venice.controllerapi.ControllerRoute; @@ -72,7 +72,6 @@ import com.linkedin.venice.controllerapi.UpdateStoragePersonaQueryParams; import com.linkedin.venice.controllerapi.UpdateStoreQueryParams; import com.linkedin.venice.controllerapi.VersionResponse; -import com.linkedin.venice.exceptions.ErrorType; import com.linkedin.venice.exceptions.InvalidVeniceSchemaException; import com.linkedin.venice.exceptions.ResourceStillExistsException; import com.linkedin.venice.exceptions.VeniceException; @@ -106,12 +105,8 @@ import com.linkedin.venice.ingestion.control.RealTimeTopicSwitcher; import com.linkedin.venice.kafka.protocol.enums.ControlMessageType; import com.linkedin.venice.meta.BackupStrategy; -import com.linkedin.venice.meta.BufferReplayPolicy; import com.linkedin.venice.meta.DataReplicationPolicy; -import com.linkedin.venice.meta.ETLStoreConfig; -import com.linkedin.venice.meta.ETLStoreConfigImpl; import com.linkedin.venice.meta.HybridStoreConfig; -import com.linkedin.venice.meta.HybridStoreConfigImpl; import com.linkedin.venice.meta.Instance; import com.linkedin.venice.meta.InstanceStatus; import com.linkedin.venice.meta.LiveClusterConfig; @@ -436,6 +431,10 @@ public class VeniceHelixAdmin implements Admin, StoreCleaner { private Set pushJobUserErrorCheckpoints; + private final Optional externalSupersetSchemaGenerator; + + private final SupersetSchemaGenerator defaultSupersetSchemaGenerator = new DefaultSupersetSchemaGenerator(); + public VeniceHelixAdmin( VeniceControllerMultiClusterConfig multiClusterConfigs, MetricsRepository metricsRepository, @@ -450,6 +449,7 @@ public VeniceHelixAdmin( Optional.empty(), Optional.empty(), Optional.empty(), + Optional.empty(), pubSubTopicRepository, pubSubClientsFactory, Collections.EMPTY_LIST); @@ -464,6 +464,7 @@ public VeniceHelixAdmin( Optional sslConfig, Optional accessController, Optional icProvider, + Optional externalSupersetSchemaGenerator, PubSubTopicRepository pubSubTopicRepository, PubSubClientsFactory pubSubClientsFactory, List additionalInitRoutines) { @@ -484,6 +485,7 @@ public VeniceHelixAdmin( this.minNumberOfStoreVersionsToPreserve = multiClusterConfigs.getMinNumberOfStoreVersionsToPreserve(); this.d2Client = d2Client; + this.externalSupersetSchemaGenerator = externalSupersetSchemaGenerator; this.pubSubTopicRepository = pubSubTopicRepository; if (sslEnabled) { @@ -621,8 +623,7 @@ public VeniceHelixAdmin( multiClusterConfigs, this, Optional.of(AvroProtocolDefinition.METADATA_SYSTEM_SCHEMA_STORE_KEY.getCurrentProtocolVersionSchema()), - Optional.of(VeniceSystemStoreUtils.DEFAULT_USER_SYSTEM_STORE_UPDATE_QUERY_PARAMS), - true)); + Optional.of(VeniceSystemStoreUtils.DEFAULT_USER_SYSTEM_STORE_UPDATE_QUERY_PARAMS))); } if (multiClusterConfigs.isZkSharedDaVinciPushStatusSystemSchemaStoreAutoCreationEnabled()) { // Add routine to create zk shared da vinci push status system store @@ -632,8 +633,7 @@ public VeniceHelixAdmin( multiClusterConfigs, this, Optional.of(AvroProtocolDefinition.PUSH_STATUS_SYSTEM_SCHEMA_STORE_KEY.getCurrentProtocolVersionSchema()), - Optional.of(VeniceSystemStoreUtils.DEFAULT_USER_SYSTEM_STORE_UPDATE_QUERY_PARAMS), - true)); + Optional.of(VeniceSystemStoreUtils.DEFAULT_USER_SYSTEM_STORE_UPDATE_QUERY_PARAMS))); } initRoutines.addAll(additionalInitRoutines); @@ -3162,23 +3162,17 @@ private void ensureRealTimeTopicIsReady(String clusterName, PubSubTopic realTime if (store == null) { throwStoreDoesNotExist(clusterName, storeName); } - if (!store.isHybrid() && !store.isWriteComputationEnabled() && !store.isSystemStore()) { + if (!store.isHybrid() && !store.isSystemStore()) { logAndThrow("Store " + storeName + " is not hybrid, refusing to return a realtime topic"); } Version version = store.getVersion(store.getLargestUsedVersionNumber()); - int partitionCount = version != null ? version.getPartitionCount() : 0; + // during transition to version based partition count, some old stores may have partition count on the store // config only. + // Now store-level partition count is set when a store is converted to hybrid + int partitionCount = version != null ? version.getPartitionCount() : store.getPartitionCount(); if (partitionCount == 0) { - // Now store-level partition count is set when a store is converted to hybrid - partitionCount = store.getPartitionCount(); - if (partitionCount == 0) { - if (version == null) { - throw new VeniceException("Store: " + storeName + " is not initialized with a version yet"); - } else { - throw new VeniceException("Store: " + storeName + " has partition count set to 0"); - } - } + throw new VeniceException("Unable to determine partition count for the real-time topic " + realTimeTopic); } VeniceControllerClusterConfig clusterConfig = getHelixVeniceClusterResources(clusterName).getConfig(); @@ -4085,20 +4079,8 @@ Pair waitVersion(String clusterName, String storeName, int versi */ @Override public void setStoreCurrentVersion(String clusterName, String storeName, int versionNumber) { - this.setStoreCurrentVersion(clusterName, storeName, versionNumber, false); - } - - /** - * In most cases, parent region should not update the current version. This is only allowed via an update-store call - * where the region filter list only contains one region, which is the region of the parent controller - */ - private void setStoreCurrentVersion( - String clusterName, - String storeName, - int versionNumber, - boolean allowedInParent) { - if (isParent() && !allowedInParent) { - // Parent colo should not update the current version of a store unless explicitly asked to do so + if (isParent()) { + // Parent colo should not update the current version of a store LOGGER.info( "Skipping current version update for store: {} in cluster: {} because it is not allowed in the " + "parent region", @@ -4126,6 +4108,7 @@ private void setStoreCurrentVersion( } return store; }); + LOGGER.info("Set store: {} version to {} in cluster: {}", storeName, versionNumber, clusterName); } @Override @@ -4214,17 +4197,6 @@ public int getBackupVersionNumber(List versions, int currentVersion) { return NON_EXISTING_VERSION; } - /** - * Update the largest used version number of a specified store. - */ - @Override - public void setStoreLargestUsedVersion(String clusterName, String storeName, int versionNumber) { - storeMetadataUpdate(clusterName, storeName, store -> { - store.setLargestUsedVersionNumber(versionNumber); - return store; - }); - } - /** * Update the owner of a specified store. */ @@ -4242,63 +4214,13 @@ public void setStoreOwner(String clusterName, String storeName, String owner) { */ @Override public void setStorePartitionCount(String clusterName, String storeName, int partitionCount) { - VeniceControllerClusterConfig clusterConfig = getHelixVeniceClusterResources(clusterName).getConfig(); storeMetadataUpdate(clusterName, storeName, store -> { - preCheckStorePartitionCountUpdate(clusterName, store, partitionCount); - // Do not update the partitionCount on the store.version as version config is immutable. The - // version.getPartitionCount() - // is read only in getRealTimeTopic and createInternalStore creation, so modifying currentVersion should not have - // any effect. - if (partitionCount != 0) { - store.setPartitionCount(partitionCount); - } else { - store.setPartitionCount(clusterConfig.getMinNumberOfPartitions()); - } - + UpdateStoreUtils.validateStorePartitionCountUpdate(this, multiClusterConfigs, clusterName, store, partitionCount); + store.setPartitionCount(partitionCount); return store; }); } - void preCheckStorePartitionCountUpdate(String clusterName, Store store, int newPartitionCount) { - String errorMessagePrefix = "Store update error for " + store.getName() + " in cluster: " + clusterName + ": "; - VeniceControllerClusterConfig clusterConfig = getHelixVeniceClusterResources(clusterName).getConfig(); - if (store.isHybrid() && store.getPartitionCount() != newPartitionCount) { - // Allow the update if partition count is not configured and the new partition count matches RT partition count - if (store.getPartitionCount() == 0) { - TopicManager topicManager; - if (isParent()) { - // RT might not exist in parent colo. Get RT partition count from a child colo. - String childDatacenter = clusterConfig.getChildDatacenters().iterator().next(); - topicManager = getTopicManager(multiClusterConfigs.getChildDataCenterKafkaUrlMap().get(childDatacenter)); - } else { - topicManager = getTopicManager(); - } - PubSubTopic realTimeTopic = pubSubTopicRepository.getTopic(Version.composeRealTimeTopic(store.getName())); - if (topicManager.containsTopic(realTimeTopic) - && topicManager.getPartitionCount(realTimeTopic) == newPartitionCount) { - LOGGER.info("Allow updating store " + store.getName() + " partition count to " + newPartitionCount); - return; - } - } - String errorMessage = errorMessagePrefix + "Cannot change partition count for this hybrid store"; - LOGGER.error(errorMessage); - throw new VeniceHttpException(HttpStatus.SC_BAD_REQUEST, errorMessage, ErrorType.INVALID_CONFIG); - } - - int maxPartitionNum = clusterConfig.getMaxNumberOfPartitions(); - if (newPartitionCount > maxPartitionNum) { - String errorMessage = - errorMessagePrefix + "Partition count: " + newPartitionCount + " should be less than max: " + maxPartitionNum; - LOGGER.error(errorMessage); - throw new VeniceHttpException(HttpStatus.SC_BAD_REQUEST, errorMessage, ErrorType.INVALID_CONFIG); - } - if (newPartitionCount < 0) { - String errorMessage = errorMessagePrefix + "Partition count: " + newPartitionCount + " should NOT be negative"; - LOGGER.error(errorMessage); - throw new VeniceHttpException(HttpStatus.SC_BAD_REQUEST, errorMessage, ErrorType.INVALID_CONFIG); - } - } - void setStorePartitionerConfig(String clusterName, String storeName, PartitionerConfig partitionerConfig) { storeMetadataUpdate(clusterName, storeName, store -> { // Only amplification factor is allowed to be changed if the store is a hybrid store. @@ -4352,32 +4274,6 @@ public void setStoreReadWriteability(String clusterName, String storeName, boole }); } - /** - * We will not expose this interface to Spark server. Updating quota can only be done by #updateStore - * TODO: remove all store attribute setters. - */ - private void setStoreStorageQuota(String clusterName, String storeName, long storageQuotaInByte) { - storeMetadataUpdate(clusterName, storeName, store -> { - if (storageQuotaInByte < 0 && storageQuotaInByte != Store.UNLIMITED_STORAGE_QUOTA) { - throw new VeniceException("storage quota can not be less than 0"); - } - store.setStorageQuotaInByte(storageQuotaInByte); - - return store; - }); - } - - private void setStoreReadQuota(String clusterName, String storeName, long readQuotaInCU) { - storeMetadataUpdate(clusterName, storeName, store -> { - if (readQuotaInCU < 0) { - throw new VeniceException("read quota can not be less than 0"); - } - store.setReadQuotaInCU(readQuotaInCU); - - return store; - }); - } - void setAccessControl(String clusterName, String storeName, boolean accessControlled) { storeMetadataUpdate(clusterName, storeName, store -> { store.setAccessControlled(accessControlled); @@ -4445,194 +4341,6 @@ public void deleteValueSchemas(String clusterName, String storeName, Set { - store.setCompressionStrategy(compressionStrategy); - - return store; - }); - } - - private void setClientDecompressionEnabled(String clusterName, String storeName, boolean clientDecompressionEnabled) { - storeMetadataUpdate(clusterName, storeName, store -> { - store.setClientDecompressionEnabled(clientDecompressionEnabled); - return store; - }); - } - - private void setChunkingEnabled(String clusterName, String storeName, boolean chunkingEnabled) { - storeMetadataUpdate(clusterName, storeName, store -> { - store.setChunkingEnabled(chunkingEnabled); - return store; - }); - } - - private void setRmdChunkingEnabled(String clusterName, String storeName, boolean rmdChunkingEnabled) { - storeMetadataUpdate(clusterName, storeName, store -> { - store.setRmdChunkingEnabled(rmdChunkingEnabled); - return store; - }); - } - - void setIncrementalPushEnabled(String clusterName, String storeName, boolean incrementalPushEnabled) { - storeMetadataUpdate(clusterName, storeName, store -> { - VeniceControllerClusterConfig config = getHelixVeniceClusterResources(clusterName).getConfig(); - if (incrementalPushEnabled || store.isHybrid()) { - // Enabling incremental push - store.setNativeReplicationSourceFabric(config.getNativeReplicationSourceFabricAsDefaultForHybrid()); - store.setActiveActiveReplicationEnabled( - store.isActiveActiveReplicationEnabled() - || (config.isActiveActiveReplicationEnabledAsDefaultForHybrid() && !store.isSystemStore())); - } else { - // Disabling incremental push - // This is only possible when hybrid settings are set to null before turning of incremental push for the store. - store.setNativeReplicationSourceFabric(config.getNativeReplicationSourceFabricAsDefaultForBatchOnly()); - store.setActiveActiveReplicationEnabled(false); - } - store.setIncrementalPushEnabled(incrementalPushEnabled); - - return store; - }); - } - - void setSeparateRealTimeTopicEnabled(String clusterName, String storeName, boolean separateRealTimeTopicEnabled) { - storeMetadataUpdate(clusterName, storeName, store -> { - store.setSeparateRealTimeTopicEnabled(separateRealTimeTopicEnabled); - return store; - }); - } - - private void setReplicationFactor(String clusterName, String storeName, int replicaFactor) { - storeMetadataUpdate(clusterName, storeName, store -> { - store.setReplicationFactor(replicaFactor); - - return store; - }); - } - - private void setBatchGetLimit(String clusterName, String storeName, int batchGetLimit) { - storeMetadataUpdate(clusterName, storeName, store -> { - store.setBatchGetLimit(batchGetLimit); - - return store; - }); - } - - private void setNumVersionsToPreserve(String clusterName, String storeName, int numVersionsToPreserve) { - storeMetadataUpdate(clusterName, storeName, store -> { - store.setNumVersionsToPreserve(numVersionsToPreserve); - - return store; - }); - } - - private void setStoreMigration(String clusterName, String storeName, boolean migrating) { - storeMetadataUpdate(clusterName, storeName, store -> { - store.setMigrating(migrating); - return store; - }); - } - - private void setMigrationDuplicateStore(String clusterName, String storeName, boolean migrationDuplicateStore) { - storeMetadataUpdate(clusterName, storeName, store -> { - store.setMigrationDuplicateStore(migrationDuplicateStore); - return store; - }); - } - - private void setWriteComputationEnabled(String clusterName, String storeName, boolean writeComputationEnabled) { - storeMetadataUpdate(clusterName, storeName, store -> { - store.setWriteComputationEnabled(writeComputationEnabled); - return store; - }); - } - - void setReplicationMetadataVersionID(String clusterName, String storeName, int rmdVersion) { - storeMetadataUpdate(clusterName, storeName, store -> { - store.setRmdVersion(rmdVersion); - return store; - }); - } - - private void setReadComputationEnabled(String clusterName, String storeName, boolean computationEnabled) { - storeMetadataUpdate(clusterName, storeName, store -> { - store.setReadComputationEnabled(computationEnabled); - return store; - }); - } - - void setBootstrapToOnlineTimeoutInHours(String clusterName, String storeName, int bootstrapToOnlineTimeoutInHours) { - storeMetadataUpdate(clusterName, storeName, store -> { - store.setBootstrapToOnlineTimeoutInHours(bootstrapToOnlineTimeoutInHours); - return store; - }); - } - - private void setNativeReplicationEnabled(String clusterName, String storeName, boolean nativeReplicationEnabled) { - storeMetadataUpdate(clusterName, storeName, store -> { - store.setNativeReplicationEnabled(nativeReplicationEnabled); - return store; - }); - } - - private void setPushStreamSourceAddress(String clusterName, String storeName, String pushStreamSourceAddress) { - storeMetadataUpdate(clusterName, storeName, store -> { - store.setPushStreamSourceAddress(pushStreamSourceAddress); - return store; - }); - } - - private void addStoreViews(String clusterName, String storeName, Map viewConfigMap) { - storeMetadataUpdate(clusterName, storeName, store -> { - store.setViewConfigs(StoreViewUtils.convertStringMapViewToViewConfigMap(viewConfigMap)); - return store; - }); - } - - private void setBackupStrategy(String clusterName, String storeName, BackupStrategy backupStrategy) { - storeMetadataUpdate(clusterName, storeName, store -> { - store.setBackupStrategy(backupStrategy); - return store; - }); - } - - private void setAutoSchemaRegisterPushJobEnabled( - String clusterName, - String storeName, - boolean autoSchemaRegisterPushJobEnabled) { - storeMetadataUpdate(clusterName, storeName, store -> { - store.setSchemaAutoRegisterFromPushJobEnabled(autoSchemaRegisterPushJobEnabled); - return store; - }); - } - - void setHybridStoreDiskQuotaEnabled(String clusterName, String storeName, boolean hybridStoreDiskQuotaEnabled) { - storeMetadataUpdate(clusterName, storeName, store -> { - store.setHybridStoreDiskQuotaEnabled(hybridStoreDiskQuotaEnabled); - return store; - }); - } - - private void setBackupVersionRetentionMs(String clusterName, String storeName, long backupVersionRetentionMs) { - storeMetadataUpdate(clusterName, storeName, store -> { - store.setBackupVersionRetentionMs(backupVersionRetentionMs); - return store; - }); - } - - private void setNativeReplicationSourceFabric( - String clusterName, - String storeName, - String nativeReplicationSourceFabric) { - storeMetadataUpdate(clusterName, storeName, store -> { - store.setNativeReplicationSourceFabric(nativeReplicationSourceFabric); - return store; - }); - } - void setActiveActiveReplicationEnabled(String clusterName, String storeName, boolean activeActiveReplicationEnabled) { storeMetadataUpdate(clusterName, storeName, store -> { store.setActiveActiveReplicationEnabled(activeActiveReplicationEnabled); @@ -4640,40 +4348,6 @@ void setActiveActiveReplicationEnabled(String clusterName, String storeName, boo }); } - private void disableMetaSystemStore(String clusterName, String storeName) { - LOGGER.info("Disabling meta system store for store: {} of cluster: {}", storeName, clusterName); - storeMetadataUpdate(clusterName, storeName, store -> { - store.setStoreMetaSystemStoreEnabled(false); - store.setStoreMetadataSystemStoreEnabled(false); - return store; - }); - } - - private void disableDavinciPushStatusStore(String clusterName, String storeName) { - LOGGER.info("Disabling davinci push status store for store: {} of cluster: {}", storeName, clusterName); - storeMetadataUpdate(clusterName, storeName, store -> { - store.setDaVinciPushStatusStoreEnabled(false); - return store; - }); - } - - private void setLatestSupersetSchemaId(String clusterName, String storeName, int latestSupersetSchemaId) { - storeMetadataUpdate(clusterName, storeName, store -> { - store.setLatestSuperSetValueSchemaId(latestSupersetSchemaId); - return store; - }); - } - - private void setStorageNodeReadQuotaEnabled( - String clusterName, - String storeName, - boolean storageNodeReadQuotaEnabled) { - storeMetadataUpdate(clusterName, storeName, store -> { - store.setStorageNodeReadQuotaEnabled(storageNodeReadQuotaEnabled); - return store; - }); - } - /** * TODO: some logics are in parent controller {@link VeniceParentHelixAdmin} #updateStore and * some are in the child controller here. Need to unify them in the future. @@ -4687,6 +4361,15 @@ public void updateStore(String clusterName, String storeName, UpdateStoreQueryPa } } + @Override + public SupersetSchemaGenerator getSupersetSchemaGenerator(String clusterName) { + if (externalSupersetSchemaGenerator.isPresent() + && getMultiClusterConfigs().getControllerConfig(clusterName).isExternalSupersetSchemaGenerationEnabled()) { + return externalSupersetSchemaGenerator.get(); + } + return defaultSupersetSchemaGenerator; + } + /** * Update the {@linkplain LiveClusterConfig} at runtime for a specified cluster. * @param clusterName name of the Venice cluster. @@ -4722,392 +4405,55 @@ public void updateClusterConfig(String clusterName, UpdateClusterConfigQueryPara } private void internalUpdateStore(String clusterName, String storeName, UpdateStoreQueryParams params) { - // There are certain configs that are only allowed to be updated in child regions. We might still want the ability - // to update such configs in the parent region via the Admin tool for operational reasons. So, we allow such updates - // if the regions filter only specifies one region, which is the parent region. - boolean onlyParentRegionFilter = false; - - // Check whether the command affects this region. - if (params.getRegionsFilter().isPresent()) { - Set regionsFilter = parseRegionsFilterList(params.getRegionsFilter().get()); - if (!regionsFilter.contains(multiClusterConfigs.getRegionName())) { - LOGGER.info( - "UpdateStore command will be skipped for store: {} in cluster: {}, because the region filter is {}" - + " which doesn't include the current region: {}", - storeName, - clusterName, - regionsFilter, - multiClusterConfigs.getRegionName()); - return; - } - - if (isParent() && regionsFilter.size() == 1) { - onlyParentRegionFilter = true; - } + UpdateStoreWrapper updatedStoreWrapper = + UpdateStoreUtils.getStoreUpdate(this, clusterName, storeName, params, true); + if (updatedStoreWrapper == null) { + return; } - Store originalStore = getStore(clusterName, storeName); - if (originalStore == null) { - throw new VeniceNoStoreException(storeName, clusterName); - } - if (originalStore.isHybrid()) { - // If this is a hybrid store, always try to disable compaction if RT topic exists. - try { - PubSubTopic rtTopic = pubSubTopicRepository.getTopic(Version.composeRealTimeTopic(storeName)); - getTopicManager().updateTopicCompactionPolicy(rtTopic, false); - } catch (PubSubTopicDoesNotExistException e) { - LOGGER.error("Could not find realtime topic for hybrid store {}", storeName); - } - } - - Optional owner = params.getOwner(); - Optional readability = params.getEnableReads(); - Optional writeability = params.getEnableWrites(); - Optional partitionCount = params.getPartitionCount(); - Optional partitionerClass = params.getPartitionerClass(); - Optional> partitionerParams = params.getPartitionerParams(); - Optional amplificationFactor = params.getAmplificationFactor(); - Optional storageQuotaInByte = params.getStorageQuotaInByte(); - Optional readQuotaInCU = params.getReadQuotaInCU(); - Optional currentVersion = params.getCurrentVersion(); - Optional largestUsedVersionNumber = params.getLargestUsedVersionNumber(); - Optional hybridRewindSeconds = params.getHybridRewindSeconds(); - Optional hybridOffsetLagThreshold = params.getHybridOffsetLagThreshold(); - Optional hybridTimeLagThreshold = params.getHybridTimeLagThreshold(); - Optional hybridDataReplicationPolicy = params.getHybridDataReplicationPolicy(); - Optional hybridBufferReplayPolicy = params.getHybridBufferReplayPolicy(); - Optional accessControlled = params.getAccessControlled(); - Optional compressionStrategy = params.getCompressionStrategy(); - Optional clientDecompressionEnabled = params.getClientDecompressionEnabled(); - Optional chunkingEnabled = params.getChunkingEnabled(); - Optional rmdChunkingEnabled = params.getRmdChunkingEnabled(); - Optional batchGetLimit = params.getBatchGetLimit(); - Optional numVersionsToPreserve = params.getNumVersionsToPreserve(); - Optional incrementalPushEnabled = params.getIncrementalPushEnabled(); - Optional separateRealTimeTopicEnabled = params.getSeparateRealTimeTopicEnabled(); - Optional storeMigration = params.getStoreMigration(); - Optional writeComputationEnabled = params.getWriteComputationEnabled(); - Optional replicationMetadataVersionID = params.getReplicationMetadataVersionID(); - Optional readComputationEnabled = params.getReadComputationEnabled(); - Optional bootstrapToOnlineTimeoutInHours = params.getBootstrapToOnlineTimeoutInHours(); - Optional backupStrategy = params.getBackupStrategy(); - Optional autoSchemaRegisterPushJobEnabled = params.getAutoSchemaRegisterPushJobEnabled(); - Optional hybridStoreDiskQuotaEnabled = params.getHybridStoreDiskQuotaEnabled(); - Optional regularVersionETLEnabled = params.getRegularVersionETLEnabled(); - Optional futureVersionETLEnabled = params.getFutureVersionETLEnabled(); - Optional etledUserProxyAccount = params.getETLedProxyUserAccount(); - Optional nativeReplicationEnabled = params.getNativeReplicationEnabled(); - Optional pushStreamSourceAddress = params.getPushStreamSourceAddress(); - Optional backupVersionRetentionMs = params.getBackupVersionRetentionMs(); - Optional replicationFactor = params.getReplicationFactor(); - Optional migrationDuplicateStore = params.getMigrationDuplicateStore(); - Optional nativeReplicationSourceFabric = params.getNativeReplicationSourceFabric(); - Optional activeActiveReplicationEnabled = params.getActiveActiveReplicationEnabled(); - Optional personaName = params.getStoragePersona(); - Optional> storeViews = params.getStoreViews(); - Optional latestSupersetSchemaId = params.getLatestSupersetSchemaId(); - Optional storageNodeReadQuotaEnabled = params.getStorageNodeReadQuotaEnabled(); - Optional minCompactionLagSeconds = params.getMinCompactionLagSeconds(); - Optional maxCompactionLagSeconds = params.getMaxCompactionLagSeconds(); - Optional maxRecordSizeBytes = params.getMaxRecordSizeBytes(); - Optional maxNearlineRecordSizeBytes = params.getMaxNearlineRecordSizeBytes(); - Optional unusedSchemaDeletionEnabled = params.getUnusedSchemaDeletionEnabled(); - Optional blobTransferEnabled = params.getBlobTransferEnabled(); - Optional nearlineProducerCompressionEnabled = params.getNearlineProducerCompressionEnabled(); - Optional nearlineProducerCountPerWriter = params.getNearlineProducerCountPerWriter(); - - final Optional newHybridStoreConfig; - if (hybridRewindSeconds.isPresent() || hybridOffsetLagThreshold.isPresent() || hybridTimeLagThreshold.isPresent() - || hybridDataReplicationPolicy.isPresent() || hybridBufferReplayPolicy.isPresent()) { - HybridStoreConfig hybridConfig = mergeNewSettingsIntoOldHybridStoreConfig( - originalStore, - hybridRewindSeconds, - hybridOffsetLagThreshold, - hybridTimeLagThreshold, - hybridDataReplicationPolicy, - hybridBufferReplayPolicy); - newHybridStoreConfig = Optional.ofNullable(hybridConfig); - } else { - newHybridStoreConfig = Optional.empty(); + Store originalStore = updatedStoreWrapper.originalStore; + Store updatedStore = updatedStoreWrapper.updatedStore; + + if (updatedStore == originalStore) { + return; } try { - if (owner.isPresent()) { - setStoreOwner(clusterName, storeName, owner.get()); - } - - if (readability.isPresent()) { - setStoreReadability(clusterName, storeName, readability.get()); - } - - if (writeability.isPresent()) { - setStoreWriteability(clusterName, storeName, writeability.get()); - } - - if (partitionCount.isPresent()) { - setStorePartitionCount(clusterName, storeName, partitionCount.get()); - } - - /** - * If either of these three fields is not present, we should use store's original value to construct correct - * updated partitioner config. - */ - if (partitionerClass.isPresent() || partitionerParams.isPresent() || amplificationFactor.isPresent()) { - PartitionerConfig updatedPartitionerConfig = mergeNewSettingsIntoOldPartitionerConfig( - originalStore, - partitionerClass, - partitionerParams, - amplificationFactor); - setStorePartitionerConfig(clusterName, storeName, updatedPartitionerConfig); - } - - if (storageQuotaInByte.isPresent()) { - setStoreStorageQuota(clusterName, storeName, storageQuotaInByte.get()); - } - - if (readQuotaInCU.isPresent()) { - HelixVeniceClusterResources resources = getHelixVeniceClusterResources(clusterName); - ZkRoutersClusterManager routersClusterManager = resources.getRoutersClusterManager(); - int routerCount = routersClusterManager.getLiveRoutersCount(); - VeniceControllerClusterConfig clusterConfig = getHelixVeniceClusterResources(clusterName).getConfig(); - int defaultReadQuotaPerRouter = clusterConfig.getDefaultReadQuotaPerRouter(); - - if (Math.max(defaultReadQuotaPerRouter, routerCount * defaultReadQuotaPerRouter) < readQuotaInCU.get()) { - throw new VeniceException( - "Cannot update read quota for store " + storeName + " in cluster " + clusterName + ". Read quota " - + readQuotaInCU.get() + " requested is more than the cluster quota."); + if (originalStore.isHybrid()) { + // If this is a hybrid store, always try to disable compaction if RT topic exists. + try { + PubSubTopic rtTopic = pubSubTopicRepository.getTopic(Version.composeRealTimeTopic(storeName)); + getTopicManager().updateTopicCompactionPolicy(rtTopic, false); + } catch (PubSubTopicDoesNotExistException e) { + LOGGER.error("Could not find realtime topic for hybrid store {}", storeName); } - setStoreReadQuota(clusterName, storeName, readQuotaInCU.get()); - } - - if (currentVersion.isPresent()) { - setStoreCurrentVersion(clusterName, storeName, currentVersion.get(), onlyParentRegionFilter); - } - - if (largestUsedVersionNumber.isPresent()) { - setStoreLargestUsedVersion(clusterName, storeName, largestUsedVersionNumber.get()); - } - - if (bootstrapToOnlineTimeoutInHours.isPresent()) { - setBootstrapToOnlineTimeoutInHours(clusterName, storeName, bootstrapToOnlineTimeoutInHours.get()); - } - - VeniceControllerClusterConfig clusterConfig = getHelixVeniceClusterResources(clusterName).getConfig(); - if (newHybridStoreConfig.isPresent()) { - // To fix the final variable problem in the lambda expression - final HybridStoreConfig finalHybridConfig = newHybridStoreConfig.get(); - storeMetadataUpdate(clusterName, storeName, store -> { - if (!isHybrid(finalHybridConfig)) { - /** - * If all the hybrid config values are negative, it indicates that the store is being set back to batch-only store. - * We cannot remove the RT topic immediately because with NR and AA, existing current version is - * still consuming the RT topic. - */ - store.setHybridStoreConfig(null); - store.setIncrementalPushEnabled(false); - // Enable/disable native replication for batch-only stores if the cluster level config for new batch - // stores is on - store.setNativeReplicationSourceFabric( - clusterConfig.getNativeReplicationSourceFabricAsDefaultForBatchOnly()); - store.setActiveActiveReplicationEnabled(false); - } else { - // Batch-only store is being converted to hybrid store. - if (!store.isHybrid()) { - /* - * Enable/disable native replication for hybrid stores if the cluster level config - * for new hybrid stores is on - */ - store - .setNativeReplicationSourceFabric(clusterConfig.getNativeReplicationSourceFabricAsDefaultForHybrid()); - /* - * Enable/disable active-active replication for user hybrid stores if the cluster level config - * for new hybrid stores is on - */ - store.setActiveActiveReplicationEnabled( - store.isActiveActiveReplicationEnabled() - || (clusterConfig.isActiveActiveReplicationEnabledAsDefaultForHybrid() - && !store.isSystemStore())); - } - store.setHybridStoreConfig(finalHybridConfig); - PubSubTopic rtTopic = pubSubTopicRepository.getTopic(Version.composeRealTimeTopic(storeName)); - if (getTopicManager().containsTopicAndAllPartitionsAreOnline(rtTopic)) { - // RT already exists, ensure the retention is correct - getTopicManager() - .updateTopicRetention(rtTopic, StoreUtils.getExpectedRetentionTimeInMs(store, finalHybridConfig)); - } - } - return store; - }); - } - - if (accessControlled.isPresent()) { - setAccessControl(clusterName, storeName, accessControlled.get()); } - if (compressionStrategy.isPresent()) { - setStoreCompressionStrategy(clusterName, storeName, compressionStrategy.get()); - } - - if (clientDecompressionEnabled.isPresent()) { - setClientDecompressionEnabled(clusterName, storeName, clientDecompressionEnabled.get()); - } - - if (chunkingEnabled.isPresent()) { - setChunkingEnabled(clusterName, storeName, chunkingEnabled.get()); - } - - if (rmdChunkingEnabled.isPresent()) { - setRmdChunkingEnabled(clusterName, storeName, rmdChunkingEnabled.get()); - } - - if (batchGetLimit.isPresent()) { - setBatchGetLimit(clusterName, storeName, batchGetLimit.get()); - } - - if (numVersionsToPreserve.isPresent()) { - setNumVersionsToPreserve(clusterName, storeName, numVersionsToPreserve.get()); - } - - if (incrementalPushEnabled.isPresent()) { - if (incrementalPushEnabled.get()) { - enableHybridModeOrUpdateSettings(clusterName, storeName); + if (updatedStore.isHybrid()) { + PubSubTopic rtTopic = pubSubTopicRepository.getTopic(Version.composeRealTimeTopic(storeName)); + if (getTopicManager().containsTopicAndAllPartitionsAreOnline(rtTopic)) { + // RT already exists, ensure the retention is correct + getTopicManager().updateTopicRetention( + rtTopic, + StoreUtils.getExpectedRetentionTimeInMs(updatedStore, updatedStore.getHybridStoreConfig())); } - setIncrementalPushEnabled(clusterName, storeName, incrementalPushEnabled.get()); } - if (separateRealTimeTopicEnabled.isPresent()) { - setSeparateRealTimeTopicEnabled(clusterName, storeName, separateRealTimeTopicEnabled.get()); - } - - if (replicationFactor.isPresent()) { - setReplicationFactor(clusterName, storeName, replicationFactor.get()); - } - - if (storeMigration.isPresent()) { - setStoreMigration(clusterName, storeName, storeMigration.get()); - } - - if (migrationDuplicateStore.isPresent()) { - setMigrationDuplicateStore(clusterName, storeName, migrationDuplicateStore.get()); - } - - if (writeComputationEnabled.isPresent()) { - setWriteComputationEnabled(clusterName, storeName, writeComputationEnabled.get()); - } - - if (replicationMetadataVersionID.isPresent()) { - setReplicationMetadataVersionID(clusterName, storeName, replicationMetadataVersionID.get()); - } - - if (readComputationEnabled.isPresent()) { - setReadComputationEnabled(clusterName, storeName, readComputationEnabled.get()); - } - - if (nativeReplicationEnabled.isPresent()) { - setNativeReplicationEnabled(clusterName, storeName, nativeReplicationEnabled.get()); - } - - if (activeActiveReplicationEnabled.isPresent()) { - setActiveActiveReplicationEnabled(clusterName, storeName, activeActiveReplicationEnabled.get()); - } - - if (pushStreamSourceAddress.isPresent()) { - setPushStreamSourceAddress(clusterName, storeName, pushStreamSourceAddress.get()); - } - - if (backupStrategy.isPresent()) { - setBackupStrategy(clusterName, storeName, backupStrategy.get()); - } - - autoSchemaRegisterPushJobEnabled - .ifPresent(value -> setAutoSchemaRegisterPushJobEnabled(clusterName, storeName, value)); - hybridStoreDiskQuotaEnabled.ifPresent(value -> setHybridStoreDiskQuotaEnabled(clusterName, storeName, value)); - if (regularVersionETLEnabled.isPresent() || futureVersionETLEnabled.isPresent() - || etledUserProxyAccount.isPresent()) { - ETLStoreConfig etlStoreConfig = new ETLStoreConfigImpl( - etledUserProxyAccount.orElse(originalStore.getEtlStoreConfig().getEtledUserProxyAccount()), - regularVersionETLEnabled.orElse(originalStore.getEtlStoreConfig().isRegularVersionETLEnabled()), - futureVersionETLEnabled.orElse(originalStore.getEtlStoreConfig().isFutureVersionETLEnabled())); - storeMetadataUpdate(clusterName, storeName, store -> { - store.setEtlStoreConfig(etlStoreConfig); - return store; - }); - } - if (backupVersionRetentionMs.isPresent()) { - setBackupVersionRetentionMs(clusterName, storeName, backupVersionRetentionMs.get()); - } - - if (nativeReplicationSourceFabric.isPresent()) { - setNativeReplicationSourceFabric(clusterName, storeName, nativeReplicationSourceFabric.get()); - } - - if (params.disableMetaStore().isPresent() && params.disableMetaStore().get()) { - disableMetaSystemStore(clusterName, storeName); - } - - if (params.disableDavinciPushStatusStore().isPresent() && params.disableDavinciPushStatusStore().get()) { - disableDavinciPushStatusStore(clusterName, storeName); - } + // All validations are done. We are ready to perform the persist the update on Zk + storeMetadataUpdate(clusterName, storeName, store -> updatedStore); + Optional personaName = params.getStoragePersona(); if (personaName.isPresent()) { StoragePersonaRepository repository = getHelixVeniceClusterResources(clusterName).getStoragePersonaRepository(); - repository.addStoresToPersona(personaName.get(), Arrays.asList(storeName)); + repository.addStoresToPersona(personaName.get(), Collections.singletonList(storeName)); } - if (storeViews.isPresent()) { - addStoreViews(clusterName, storeName, storeViews.get()); + // Since we expect the parent controller to emit the actions to the Admin channel where necessary, we need to run + // it within the context of VeniceParentHelixAdmin. So, here, we only run it for the child controller if it is + // running in a single-region mode. + if (!isParent() && isPrimary()) { + UpdateStoreUtils.handlePostUpdateActions(this, clusterName, storeName); } - - if (latestSupersetSchemaId.isPresent()) { - setLatestSupersetSchemaId(clusterName, storeName, latestSupersetSchemaId.get()); - } - - if (minCompactionLagSeconds.isPresent()) { - storeMetadataUpdate(clusterName, storeName, store -> { - store.setMinCompactionLagSeconds(minCompactionLagSeconds.get()); - return store; - }); - } - if (maxCompactionLagSeconds.isPresent()) { - storeMetadataUpdate(clusterName, storeName, store -> { - store.setMaxCompactionLagSeconds(maxCompactionLagSeconds.get()); - return store; - }); - } - - maxRecordSizeBytes.ifPresent(aInt -> storeMetadataUpdate(clusterName, storeName, store -> { - store.setMaxRecordSizeBytes(aInt); - return store; - })); - - maxNearlineRecordSizeBytes.ifPresent(aInt -> storeMetadataUpdate(clusterName, storeName, store -> { - store.setMaxNearlineRecordSizeBytes(aInt); - return store; - })); - - unusedSchemaDeletionEnabled.ifPresent(aBoolean -> storeMetadataUpdate(clusterName, storeName, store -> { - store.setUnusedSchemaDeletionEnabled(aBoolean); - return store; - })); - - storageNodeReadQuotaEnabled - .ifPresent(aBoolean -> setStorageNodeReadQuotaEnabled(clusterName, storeName, aBoolean)); - - blobTransferEnabled.ifPresent(aBoolean -> storeMetadataUpdate(clusterName, storeName, store -> { - store.setBlobTransferEnabled(aBoolean); - return store; - })); - - nearlineProducerCompressionEnabled.ifPresent(aBoolean -> storeMetadataUpdate(clusterName, storeName, store -> { - store.setNearlineProducerCompressionEnabled(aBoolean); - return store; - })); - - nearlineProducerCountPerWriter.ifPresent(aInt -> storeMetadataUpdate(clusterName, storeName, store -> { - store.setNearlineProducerCountPerWriter(aInt); - return store; - })); - LOGGER.info("Finished updating store: {} in cluster: {}", storeName, clusterName); } catch (VeniceException e) { LOGGER.error( @@ -5118,8 +4464,7 @@ private void internalUpdateStore(String clusterName, String storeName, UpdateSto // rollback to original store storeMetadataUpdate(clusterName, storeName, store -> originalStore); PubSubTopic rtTopic = pubSubTopicRepository.getTopic(Version.composeRealTimeTopic(storeName)); - if (originalStore.isHybrid() && newHybridStoreConfig.isPresent() - && getTopicManager().containsTopicAndAllPartitionsAreOnline(rtTopic)) { + if (originalStore.isHybrid() && getTopicManager().containsTopicAndAllPartitionsAreOnline(rtTopic)) { // Ensure the topic retention is rolled back too getTopicManager().updateTopicRetention( rtTopic, @@ -5134,35 +4479,6 @@ && getTopicManager().containsTopicAndAllPartitionsAreOnline(rtTopic)) { } } - /** - * Enabling hybrid mode for incremental push store is moved into - * {@link VeniceParentHelixAdmin#updateStore(String, String, UpdateStoreQueryParams)} - * TODO: Remove the method and its usage after the deployment of parent controller updateStore change. - */ - private void enableHybridModeOrUpdateSettings(String clusterName, String storeName) { - storeMetadataUpdate(clusterName, storeName, store -> { - HybridStoreConfig hybridStoreConfig = store.getHybridStoreConfig(); - if (hybridStoreConfig == null) { - store.setHybridStoreConfig( - new HybridStoreConfigImpl( - DEFAULT_REWIND_TIME_IN_SECONDS, - DEFAULT_HYBRID_OFFSET_LAG_THRESHOLD, - DEFAULT_HYBRID_TIME_LAG_THRESHOLD, - DataReplicationPolicy.NON_AGGREGATE, - null)); - } else if (hybridStoreConfig.getDataReplicationPolicy() == null) { - store.setHybridStoreConfig( - new HybridStoreConfigImpl( - hybridStoreConfig.getRewindTimeInSeconds(), - hybridStoreConfig.getOffsetLagThresholdToGoOnline(), - hybridStoreConfig.getProducerTimestampLagThresholdToGoOnlineInSeconds(), - DataReplicationPolicy.NON_AGGREGATE, - hybridStoreConfig.getBufferReplayPolicy())); - } - return store; - }); - } - /** * This method is invoked in parent controllers for store migration. */ @@ -5204,132 +4520,6 @@ public void replicateUpdateStore(String clusterName, String storeName, UpdateSto } } - /** - * Used by both the {@link VeniceHelixAdmin} and the {@link VeniceParentHelixAdmin} - * - * @param oldStore Existing Store that is the source for updates. This object will not be modified by this method. - * @param hybridRewindSeconds Optional is present if the returned object should include a new rewind time - * @param hybridOffsetLagThreshold Optional is present if the returned object should include a new offset lag threshold - * @return null if oldStore has no hybrid configs and optionals are not present, - * otherwise a fully specified {@link HybridStoreConfig} - */ - protected static HybridStoreConfig mergeNewSettingsIntoOldHybridStoreConfig( - Store oldStore, - Optional hybridRewindSeconds, - Optional hybridOffsetLagThreshold, - Optional hybridTimeLagThreshold, - Optional hybridDataReplicationPolicy, - Optional bufferReplayPolicy) { - if (!hybridRewindSeconds.isPresent() && !hybridOffsetLagThreshold.isPresent() && !oldStore.isHybrid()) { - return null; // For the nullable union in the avro record - } - HybridStoreConfig mergedHybridStoreConfig; - if (oldStore.isHybrid()) { // for an existing hybrid store, just replace any specified values - HybridStoreConfig oldHybridConfig = oldStore.getHybridStoreConfig().clone(); - mergedHybridStoreConfig = new HybridStoreConfigImpl( - hybridRewindSeconds.isPresent() ? hybridRewindSeconds.get() : oldHybridConfig.getRewindTimeInSeconds(), - hybridOffsetLagThreshold.isPresent() - ? hybridOffsetLagThreshold.get() - : oldHybridConfig.getOffsetLagThresholdToGoOnline(), - hybridTimeLagThreshold.isPresent() - ? hybridTimeLagThreshold.get() - : oldHybridConfig.getProducerTimestampLagThresholdToGoOnlineInSeconds(), - hybridDataReplicationPolicy.isPresent() - ? hybridDataReplicationPolicy.get() - : oldHybridConfig.getDataReplicationPolicy(), - bufferReplayPolicy.isPresent() ? bufferReplayPolicy.get() : oldHybridConfig.getBufferReplayPolicy()); - } else { - // switching a non-hybrid store to hybrid; must specify: - // 1. rewind time - // 2. either offset lag threshold or time lag threshold, or both - if (!(hybridRewindSeconds.isPresent() - && (hybridOffsetLagThreshold.isPresent() || hybridTimeLagThreshold.isPresent()))) { - throw new VeniceException( - oldStore.getName() + " was not a hybrid store. In order to make it a hybrid store both " - + " rewind time in seconds and offset or time lag threshold must be specified"); - } - mergedHybridStoreConfig = new HybridStoreConfigImpl( - hybridRewindSeconds.get(), - // If not specified, offset/time lag threshold will be -1 and will not be used to determine whether - // a partition is ready to serve - hybridOffsetLagThreshold.orElse(DEFAULT_HYBRID_OFFSET_LAG_THRESHOLD), - hybridTimeLagThreshold.orElse(DEFAULT_HYBRID_TIME_LAG_THRESHOLD), - hybridDataReplicationPolicy.orElse(DataReplicationPolicy.NON_AGGREGATE), - bufferReplayPolicy.orElse(BufferReplayPolicy.REWIND_FROM_EOP)); - } - if (mergedHybridStoreConfig.getRewindTimeInSeconds() > 0 - && mergedHybridStoreConfig.getOffsetLagThresholdToGoOnline() < 0 - && mergedHybridStoreConfig.getProducerTimestampLagThresholdToGoOnlineInSeconds() < 0) { - throw new VeniceException( - "Both offset lag threshold and time lag threshold are negative when setting hybrid" + " configs for store " - + oldStore.getName()); - } - return mergedHybridStoreConfig; - } - - static PartitionerConfig mergeNewSettingsIntoOldPartitionerConfig( - Store oldStore, - Optional partitionerClass, - Optional> partitionerParams, - Optional amplificationFactor) { - PartitionerConfig originalPartitionerConfig; - if (oldStore.getPartitionerConfig() == null) { - originalPartitionerConfig = new PartitionerConfigImpl(); - } else { - originalPartitionerConfig = oldStore.getPartitionerConfig(); - } - return new PartitionerConfigImpl( - partitionerClass.orElse(originalPartitionerConfig.getPartitionerClass()), - partitionerParams.orElse(originalPartitionerConfig.getPartitionerParams()), - amplificationFactor.orElse(originalPartitionerConfig.getAmplificationFactor())); - } - - static Map mergeNewViewConfigsIntoOldConfigs( - Store oldStore, - Map viewParameters) throws VeniceException { - // Merge the existing configs with the incoming configs. The new configs will override existing ones which share the - // same key. - Map oldViewConfigMap = oldStore.getViewConfigs(); - if (oldViewConfigMap == null) { - oldViewConfigMap = new HashMap<>(); - } - Map mergedConfigs = - StoreViewUtils.convertViewConfigMapToStoreViewRecordMap(oldViewConfigMap); - mergedConfigs.putAll(StoreViewUtils.convertStringMapViewToStoreViewConfigRecordMap(viewParameters)); - return mergedConfigs; - } - - static Map addNewViewConfigsIntoOldConfigs( - Store oldStore, - String viewClass, - ViewConfig viewConfig) throws VeniceException { - // Add new view config into the existing config map. The new configs will override existing ones which share the - // same key. - Map oldViewConfigMap = oldStore.getViewConfigs(); - if (oldViewConfigMap == null) { - oldViewConfigMap = new HashMap<>(); - } - Map mergedConfigs = - StoreViewUtils.convertViewConfigMapToStoreViewRecordMap(oldViewConfigMap); - - StoreViewConfigRecord newStoreViewConfigRecord = - StoreViewUtils.convertViewConfigToStoreViewConfigRecord(viewConfig); - mergedConfigs.put(viewClass, newStoreViewConfigRecord); - return mergedConfigs; - } - - static Map removeViewConfigFromStoreViewConfigMap(Store oldStore, String viewClass) - throws VeniceException { - Map oldViewConfigMap = oldStore.getViewConfigs(); - if (oldViewConfigMap == null) { - oldViewConfigMap = new HashMap<>(); - } - Map mergedConfigs = - StoreViewUtils.convertViewConfigMapToStoreViewRecordMap(oldViewConfigMap); - mergedConfigs.remove(viewClass); - return mergedConfigs; - } - /** * Update the store metadata by applying provided operation. * @param clusterName name of the cluster. @@ -5623,8 +4813,9 @@ public SchemaEntry addValueSchema( DirectionalSchemaCompatibilityType expectedCompatibilityType) { checkControllerLeadershipFor(clusterName); ReadWriteSchemaRepository schemaRepository = getHelixVeniceClusterResources(clusterName).getSchemaRepository(); - schemaRepository.addValueSchema(storeName, valueSchemaStr, expectedCompatibilityType); - return new SchemaEntry(schemaRepository.getValueSchemaId(storeName, valueSchemaStr), valueSchemaStr); + int newValueSchemaId = + schemaRepository.preCheckValueSchemaAndGetNextAvailableId(storeName, valueSchemaStr, expectedCompatibilityType); + return addValueSchema(clusterName, storeName, valueSchemaStr, newValueSchemaId, expectedCompatibilityType); } /** @@ -5641,8 +4832,14 @@ public SchemaEntry addValueSchema( DirectionalSchemaCompatibilityType compatibilityType) { checkControllerLeadershipFor(clusterName); ReadWriteSchemaRepository schemaRepository = getHelixVeniceClusterResources(clusterName).getSchemaRepository(); + + if (schemaId == SchemaData.DUPLICATE_VALUE_SCHEMA_CODE) { + return new SchemaEntry(schemaRepository.getValueSchemaId(storeName, valueSchemaStr), valueSchemaStr); + } + int newValueSchemaId = schemaRepository.preCheckValueSchemaAndGetNextAvailableId(storeName, valueSchemaStr, compatibilityType); + if (newValueSchemaId != SchemaData.DUPLICATE_VALUE_SCHEMA_CODE && newValueSchemaId != schemaId) { throw new VeniceException( "Inconsistent value schema id between the caller and the local schema repository." @@ -5650,7 +4847,15 @@ public SchemaEntry addValueSchema( + newValueSchemaId + " for store " + storeName + " in cluster " + clusterName + " Schema: " + valueSchemaStr); } - return schemaRepository.addValueSchema(storeName, valueSchemaStr, newValueSchemaId); + SchemaEntry addedSchemaEntry = schemaRepository.addValueSchema(storeName, valueSchemaStr, schemaId); + + if (isPrimary() && !isParent() && newValueSchemaId != SchemaData.DUPLICATE_VALUE_SCHEMA_CODE) { + // Now register all inferred schemas for the store if this is a child controller in single-region mode. + // Parent in multi-region mode will register all inferred schemas via the admin channel. + PrimaryControllerConfigUpdateUtils.registerInferredSchemas(this, clusterName, storeName); + } + + return addedSchemaEntry; } /** @@ -5704,14 +4909,8 @@ public DerivedSchemaEntry removeDerivedSchema( .removeDerivedSchema(storeName, valueSchemaId, derivedSchemaId); } - /** - * Add a new superset schema for the given store with all specified properties. - *

- * Generate the superset schema off the current schema and latest superset schema (if any, if not pick the latest value schema) existing in the store. - * If the newly generated superset schema is unique add it to the store and update latestSuperSetValueSchemaId of the store. - */ @Override - public SchemaEntry addSupersetSchema( + public void addSupersetSchema( String clusterName, String storeName, String valueSchema, @@ -5721,12 +4920,16 @@ public SchemaEntry addSupersetSchema( checkControllerLeadershipFor(clusterName); ReadWriteSchemaRepository schemaRepository = getHelixVeniceClusterResources(clusterName).getSchemaRepository(); + if (valueSchemaId != SchemaData.INVALID_VALUE_SCHEMA_ID) { + // add the value schema + schemaRepository.addValueSchema(storeName, valueSchema, valueSchemaId); + } + final SchemaEntry existingSupersetSchemaEntry = schemaRepository.getValueSchema(storeName, supersetSchemaId); if (existingSupersetSchemaEntry == null) { // If the new superset schema does not exist in the schema repo, add it LOGGER.info("Adding superset schema: {} for store: {}", supersetSchemaStr, storeName); schemaRepository.addValueSchema(storeName, supersetSchemaStr, supersetSchemaId); - } else { final Schema newSupersetSchema = AvroSchemaParseUtils.parseSchemaFromJSONStrictValidation(supersetSchemaStr); if (!AvroSchemaUtils.compareSchemaIgnoreFieldOrder(existingSupersetSchemaEntry.getSchema(), newSupersetSchema)) { @@ -5736,25 +4939,10 @@ public SchemaEntry addSupersetSchema( } } - // add the value schema - return schemaRepository.addValueSchema(storeName, valueSchema, valueSchemaId); - } - - int getValueSchemaIdIgnoreFieldOrder( - String clusterName, - String storeName, - String valueSchemaStr, - Comparator schemaComparator) { - checkControllerLeadershipFor(clusterName); - SchemaEntry valueSchemaEntry = new SchemaEntry(SchemaData.UNKNOWN_SCHEMA_ID, valueSchemaStr); - - for (SchemaEntry schemaEntry: getValueSchemas(clusterName, storeName)) { - if (schemaComparator.compare(schemaEntry.getSchema(), valueSchemaEntry.getSchema()) == 0) { - return schemaEntry.getId(); - } - } - return SchemaData.INVALID_VALUE_SCHEMA_ID; - + storeMetadataUpdate(clusterName, storeName, store -> { + store.setLatestSuperSetValueSchemaId(supersetSchemaId); + return store; + }); } int checkPreConditionForAddValueSchemaAndGetNewSchemaId( @@ -5790,28 +4978,7 @@ public Collection getReplicationMetadataSchemas(String clusterNa return schemaRepo.getReplicationMetadataSchemas(storeName); } - boolean checkIfValueSchemaAlreadyHasRmdSchema( - String clusterName, - String storeName, - final int valueSchemaID, - final int replicationMetadataVersionId) { - checkControllerLeadershipFor(clusterName); - Collection schemaEntries = - getHelixVeniceClusterResources(clusterName).getSchemaRepository().getReplicationMetadataSchemas(storeName); - for (RmdSchemaEntry rmdSchemaEntry: schemaEntries) { - if (rmdSchemaEntry.getValueSchemaID() == valueSchemaID - && rmdSchemaEntry.getId() == replicationMetadataVersionId) { - return true; - } - } - return false; - } - - boolean checkIfMetadataSchemaAlreadyPresent( - String clusterName, - String storeName, - int valueSchemaId, - RmdSchemaEntry rmdSchemaEntry) { + boolean checkIfMetadataSchemaAlreadyPresent(String clusterName, String storeName, RmdSchemaEntry rmdSchemaEntry) { checkControllerLeadershipFor(clusterName); try { Collection schemaEntries = @@ -5843,7 +5010,7 @@ public RmdSchemaEntry addReplicationMetadataSchema( RmdSchemaEntry rmdSchemaEntry = new RmdSchemaEntry(valueSchemaId, replicationMetadataVersionId, replicationMetadataSchemaStr); - if (checkIfMetadataSchemaAlreadyPresent(clusterName, storeName, valueSchemaId, rmdSchemaEntry)) { + if (checkIfMetadataSchemaAlreadyPresent(clusterName, storeName, rmdSchemaEntry)) { LOGGER.info( "Timestamp metadata schema Already present: for store: {} in cluster: {} metadataSchema: {} " + "replicationMetadataVersionId: {} valueSchemaId: {}", @@ -5970,13 +5137,6 @@ public Map getStorageNodesStatus(String clusterName, boolean ena return instancesStatusesMap; } - Schema getSupersetOrLatestValueSchema(String clusterName, Store store) { - ReadWriteSchemaRepository schemaRepository = getHelixVeniceClusterResources(clusterName).getSchemaRepository(); - // If already a superset schema exists, try to generate the new superset from that and the input value schema - SchemaEntry existingSchema = schemaRepository.getSupersetOrLatestValueSchema(store.getName()); - return existingSchema == null ? null : existingSchema.getSchema(); - } - /** * Remove one storage node from the given cluster. *

@@ -7807,38 +6967,19 @@ Store checkPreConditionForAclOp(String clusterName, String storeName) { } /** - * A store is not hybrid in the following two scenarios: - * If hybridStoreConfig is null, it means store is not hybrid. - * If all the hybrid config values are negative, it indicates that the store is being set back to batch-only store. - */ - boolean isHybrid(HybridStoreConfig hybridStoreConfig) { - return hybridStoreConfig != null - && (hybridStoreConfig.getRewindTimeInSeconds() >= 0 || hybridStoreConfig.getOffsetLagThresholdToGoOnline() >= 0 - || hybridStoreConfig.getProducerTimestampLagThresholdToGoOnlineInSeconds() >= 0); - } - - /** - * @see VeniceHelixAdmin#isHybrid(HybridStoreConfig) + * @see Admin#isParent() */ - boolean isHybrid(HybridStoreConfigRecord hybridStoreConfigRecord) { - HybridStoreConfig hybridStoreConfig = null; - if (hybridStoreConfigRecord != null) { - hybridStoreConfig = new HybridStoreConfigImpl( - hybridStoreConfigRecord.rewindTimeInSeconds, - hybridStoreConfigRecord.offsetLagThresholdToGoOnline, - hybridStoreConfigRecord.producerTimestampLagThresholdToGoOnlineInSeconds, - DataReplicationPolicy.valueOf(hybridStoreConfigRecord.dataReplicationPolicy), - BufferReplayPolicy.valueOf(hybridStoreConfigRecord.bufferReplayPolicy)); - } - return isHybrid(hybridStoreConfig); + @Override + public boolean isParent() { + return multiClusterConfigs.isParent(); } /** - * @see Admin#isParent() + * @see Admin#isPrimary() */ @Override - public boolean isParent() { - return multiClusterConfigs.isParent(); + public boolean isPrimary() { + return !multiClusterConfigs.isMultiRegion() || isParent(); } /** @@ -8236,9 +7377,13 @@ public void createStoragePersona( Set storesToEnforce, Set owners) { checkControllerLeadershipFor(clusterName); - HelixVeniceClusterResources resources = getHelixVeniceClusterResources(clusterName); + StoragePersonaRepository repository = getHelixVeniceClusterResources(clusterName).getStoragePersonaRepository(); + if (repository.hasPersona(name)) { + throw new VeniceException("Persona with name " + name + " already exists"); + } + repository.validatePersona(name, quotaNumber, storesToEnforce, owners); + try { - StoragePersonaRepository repository = resources.getStoragePersonaRepository(); repository.addPersona(name, quotaNumber, storesToEnforce, owners); } catch (Exception e) { LOGGER.error("Failed to execute CreateStoragePersonaOperation.", e); @@ -8278,9 +7423,10 @@ public void deleteStoragePersona(String clusterName, String name) { @Override public void updateStoragePersona(String clusterName, String name, UpdateStoragePersonaQueryParams queryParams) { checkControllerLeadershipFor(clusterName); - HelixVeniceClusterResources resources = getHelixVeniceClusterResources(clusterName); + StoragePersonaRepository repository = getHelixVeniceClusterResources(clusterName).getStoragePersonaRepository(); + repository.validatePersonaUpdate(name, queryParams); + try { - StoragePersonaRepository repository = resources.getStoragePersonaRepository(); repository.updatePersona(name, queryParams); } catch (Exception e) { LOGGER.error("Failed to execute UpdateStoragePersonaOperation.", e); @@ -8411,8 +7557,8 @@ public boolean isClusterWipeAllowed(String clusterName) { return multiClusterConfigs.getControllerConfig(clusterName).isClusterWipeAllowed(); } - // Visible for testing - VeniceControllerMultiClusterConfig getMultiClusterConfigs() { + @Override + public VeniceControllerMultiClusterConfig getMultiClusterConfigs() { return multiClusterConfigs; } diff --git a/services/venice-controller/src/main/java/com/linkedin/venice/controller/VeniceParentHelixAdmin.java b/services/venice-controller/src/main/java/com/linkedin/venice/controller/VeniceParentHelixAdmin.java index da12fd4c97..3cd3898f53 100644 --- a/services/venice-controller/src/main/java/com/linkedin/venice/controller/VeniceParentHelixAdmin.java +++ b/services/venice-controller/src/main/java/com/linkedin/venice/controller/VeniceParentHelixAdmin.java @@ -2,7 +2,6 @@ import static com.linkedin.venice.controller.VeniceHelixAdmin.VERSION_ID_UNSET; import static com.linkedin.venice.controller.kafka.consumer.AdminConsumptionTask.IGNORED_CURRENT_VERSION; -import static com.linkedin.venice.controller.util.ParentControllerConfigUpdateUtils.addUpdateSchemaForStore; import static com.linkedin.venice.controllerapi.ControllerApiConstants.ACCESS_CONTROLLED; import static com.linkedin.venice.controllerapi.ControllerApiConstants.ACTIVE_ACTIVE_REPLICATION_ENABLED; import static com.linkedin.venice.controllerapi.ControllerApiConstants.AMPLIFICATION_FACTOR; @@ -60,9 +59,6 @@ import static com.linkedin.venice.controllerapi.ControllerApiConstants.UNUSED_SCHEMA_DELETION_ENABLED; import static com.linkedin.venice.controllerapi.ControllerApiConstants.VERSION; import static com.linkedin.venice.controllerapi.ControllerApiConstants.WRITE_COMPUTATION_ENABLED; -import static com.linkedin.venice.meta.HybridStoreConfigImpl.DEFAULT_HYBRID_OFFSET_LAG_THRESHOLD; -import static com.linkedin.venice.meta.HybridStoreConfigImpl.DEFAULT_HYBRID_TIME_LAG_THRESHOLD; -import static com.linkedin.venice.meta.HybridStoreConfigImpl.DEFAULT_REWIND_TIME_IN_SECONDS; import static com.linkedin.venice.meta.Version.VERSION_SEPARATOR; import static com.linkedin.venice.meta.VersionStatus.ONLINE; import static com.linkedin.venice.meta.VersionStatus.PUSHED; @@ -75,7 +71,6 @@ import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.ObjectNode; -import com.linkedin.venice.ConfigConstants; import com.linkedin.venice.SSLConfig; import com.linkedin.venice.acl.AclException; import com.linkedin.venice.acl.DynamicAccessController; @@ -89,7 +84,6 @@ import com.linkedin.venice.authorization.Resource; import com.linkedin.venice.common.VeniceSystemStoreType; import com.linkedin.venice.common.VeniceSystemStoreUtils; -import com.linkedin.venice.compression.CompressionStrategy; import com.linkedin.venice.controller.authorization.SystemStoreAclSynchronizationTask; import com.linkedin.venice.controller.init.DelegatingClusterLeaderInitializationRoutine; import com.linkedin.venice.controller.init.SharedInternalRTStoreInitializationRoutine; @@ -124,7 +118,6 @@ import com.linkedin.venice.controller.kafka.protocol.admin.SetStoreOwner; import com.linkedin.venice.controller.kafka.protocol.admin.SetStorePartitionCount; import com.linkedin.venice.controller.kafka.protocol.admin.StoreCreation; -import com.linkedin.venice.controller.kafka.protocol.admin.StoreViewConfigRecord; import com.linkedin.venice.controller.kafka.protocol.admin.SupersetSchemaCreation; import com.linkedin.venice.controller.kafka.protocol.admin.UpdateStoragePersona; import com.linkedin.venice.controller.kafka.protocol.admin.UpdateStore; @@ -137,7 +130,10 @@ import com.linkedin.venice.controller.migration.MigrationPushStrategyZKAccessor; import com.linkedin.venice.controller.supersetschema.DefaultSupersetSchemaGenerator; import com.linkedin.venice.controller.supersetschema.SupersetSchemaGenerator; -import com.linkedin.venice.controller.util.ParentControllerConfigUpdateUtils; +import com.linkedin.venice.controller.util.AdminUtils; +import com.linkedin.venice.controller.util.PrimaryControllerConfigUpdateUtils; +import com.linkedin.venice.controller.util.UpdateStoreUtils; +import com.linkedin.venice.controller.util.UpdateStoreWrapper; import com.linkedin.venice.controllerapi.AdminCommandExecution; import com.linkedin.venice.controllerapi.ControllerClient; import com.linkedin.venice.controllerapi.ControllerResponse; @@ -160,7 +156,6 @@ import com.linkedin.venice.exceptions.ConcurrentBatchPushException; import com.linkedin.venice.exceptions.ConfigurationException; import com.linkedin.venice.exceptions.ErrorType; -import com.linkedin.venice.exceptions.PartitionerSchemaMismatchException; import com.linkedin.venice.exceptions.ResourceStillExistsException; import com.linkedin.venice.exceptions.VeniceException; import com.linkedin.venice.exceptions.VeniceHttpException; @@ -173,13 +168,13 @@ import com.linkedin.venice.helix.Replica; import com.linkedin.venice.helix.StoragePersonaRepository; import com.linkedin.venice.helix.ZkStoreConfigAccessor; -import com.linkedin.venice.meta.BackupStrategy; import com.linkedin.venice.meta.BufferReplayPolicy; import com.linkedin.venice.meta.DataReplicationPolicy; import com.linkedin.venice.meta.ETLStoreConfig; import com.linkedin.venice.meta.HybridStoreConfig; import com.linkedin.venice.meta.Instance; import com.linkedin.venice.meta.PartitionerConfig; +import com.linkedin.venice.meta.ReadWriteSchemaRepository; import com.linkedin.venice.meta.ReadWriteStoreRepository; import com.linkedin.venice.meta.RegionPushDetails; import com.linkedin.venice.meta.RoutersClusterConfig; @@ -191,9 +186,6 @@ import com.linkedin.venice.meta.VeniceUserStoreType; import com.linkedin.venice.meta.Version; import com.linkedin.venice.meta.VersionStatus; -import com.linkedin.venice.meta.ViewConfig; -import com.linkedin.venice.meta.ViewConfigImpl; -import com.linkedin.venice.meta.ViewParameterKeys; import com.linkedin.venice.persona.StoragePersona; import com.linkedin.venice.pubsub.PubSubConsumerAdapterFactory; import com.linkedin.venice.pubsub.PubSubTopicRepository; @@ -209,7 +201,6 @@ import com.linkedin.venice.schema.SchemaEntry; import com.linkedin.venice.schema.avro.DirectionalSchemaCompatibilityType; import com.linkedin.venice.schema.rmd.RmdSchemaEntry; -import com.linkedin.venice.schema.rmd.RmdSchemaGenerator; import com.linkedin.venice.schema.writecompute.DerivedSchemaEntry; import com.linkedin.venice.schema.writecompute.WriteComputeSchemaConverter; import com.linkedin.venice.security.SSLFactory; @@ -225,7 +216,6 @@ import com.linkedin.venice.utils.CollectionUtils; import com.linkedin.venice.utils.ObjectMapperFactory; import com.linkedin.venice.utils.Pair; -import com.linkedin.venice.utils.PartitionUtils; import com.linkedin.venice.utils.ReflectUtils; import com.linkedin.venice.utils.RegionUtils; import com.linkedin.venice.utils.SslUtils; @@ -237,7 +227,6 @@ import com.linkedin.venice.utils.locks.AutoCloseableLock; import com.linkedin.venice.views.MaterializedView; import com.linkedin.venice.views.VeniceView; -import com.linkedin.venice.views.ViewUtils; import com.linkedin.venice.writer.VeniceWriter; import com.linkedin.venice.writer.VeniceWriterFactory; import com.linkedin.venice.writer.VeniceWriterOptions; @@ -266,13 +255,12 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; -import java.util.function.Function; import java.util.stream.Collectors; import javax.annotation.Nonnull; import org.apache.avro.Schema; import org.apache.commons.lang.StringUtils; import org.apache.commons.lang.Validate; -import org.apache.http.HttpStatus; +import org.apache.hc.core5.http.HttpStatus; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -1008,7 +996,7 @@ public void addVersionAndStartIngestion( boolean versionSwapDeferred, int repushSourceVersion) { // Parent controller will always pick the replicationMetadataVersionId from configs. - final int replicationMetadataVersionId = getRmdVersionID(storeName, clusterName); + final int replicationMetadataVersionId = AdminUtils.getRmdVersionID(this, storeName, clusterName); Version version = getVeniceHelixAdmin().addVersionOnly( clusterName, storeName, @@ -1019,9 +1007,6 @@ public void addVersionAndStartIngestion( remoteKafkaBootstrapServers, rewindTimeInSecondsOverride, replicationMetadataVersionId); - if (version.isActiveActiveReplicationEnabled()) { - updateReplicationMetadataSchemaForAllValueSchema(clusterName, storeName); - } acquireAdminMessageLock(clusterName, storeName); try { sendAddVersionAdminMessage(clusterName, storeName, pushJobId, version, numberOfPartitions, pushType, null, -1); @@ -1030,34 +1015,6 @@ public void addVersionAndStartIngestion( } } - private int getRmdVersionID(final String storeName, final String clusterName) { - final Store store = getVeniceHelixAdmin().getStore(clusterName, storeName); - if (store == null) { - LOGGER.warn( - "No store found in the store repository. Will get store-level RMD version ID from cluster config. " - + "Store name: {}, cluster: {}", - storeName, - clusterName); - } else if (store.getRmdVersion() == ConfigConstants.UNSPECIFIED_REPLICATION_METADATA_VERSION) { - LOGGER.info("No store-level RMD version ID found for store {} in cluster {}", storeName, clusterName); - } else { - LOGGER.info( - "Found store-level RMD version ID {} for store {} in cluster {}", - store.getRmdVersion(), - storeName, - clusterName); - return store.getRmdVersion(); - } - - final VeniceControllerClusterConfig controllerConfig = getMultiClusterConfigs().getControllerConfig(clusterName); - if (controllerConfig == null) { - throw new VeniceException("No controller cluster config found for cluster " + clusterName); - } - final int rmdVersionID = controllerConfig.getReplicationMetadataVersion(); - LOGGER.info("Use RMD version ID {} for cluster {}", rmdVersionID, clusterName); - return rmdVersionID; - } - /** * Since there is no offline push running in Parent Controller, * the old store versions won't be cleaned up by job completion action, so Parent Controller chooses @@ -1600,7 +1557,7 @@ Version addVersionAndTopicOnly( boolean versionSwapDeferred, String targetedRegions, int repushSourceVersion) { - final int replicationMetadataVersionId = getRmdVersionID(storeName, clusterName); + final int replicationMetadataVersionId = AdminUtils.getRmdVersionID(this, storeName, clusterName); Pair result = getVeniceHelixAdmin().addVersionAndTopicOnly( clusterName, storeName, @@ -1622,9 +1579,6 @@ Version addVersionAndTopicOnly( repushSourceVersion); Version newVersion = result.getSecond(); if (result.getFirst()) { - if (newVersion.isActiveActiveReplicationEnabled()) { - updateReplicationMetadataSchemaForAllValueSchema(clusterName, storeName); - } // Send admin message if the version is newly created. acquireAdminMessageLock(clusterName, storeName); try { @@ -2064,16 +2018,6 @@ public void rollbackToBackupVersion(String clusterName, String storeName, String } } - /** - * Unsupported operation in the parent controller. - */ - @Override - public void setStoreLargestUsedVersion(String clusterName, String storeName, int versionNumber) { - throw new VeniceUnsupportedOperationException( - "setStoreLargestUsedVersion", - "This is only supported in the Child Controller."); - } - /** * Update the owner of a specified store by sending {@link AdminMessageType#SET_STORE_OWNER SET_STORE_OWNER} admin message * to the admin topic. @@ -2217,309 +2161,208 @@ public void setStoreReadWriteability(String clusterName, String storeName, boole public void updateStore(String clusterName, String storeName, UpdateStoreQueryParams params) { acquireAdminMessageLock(clusterName, storeName); try { - Optional owner = params.getOwner(); - Optional readability = params.getEnableReads(); - Optional writeability = params.getEnableWrites(); - Optional partitionCount = params.getPartitionCount(); - Optional partitionerClass = params.getPartitionerClass(); - Optional> partitionerParams = params.getPartitionerParams(); - Optional amplificationFactor = params.getAmplificationFactor(); - Optional storageQuotaInByte = params.getStorageQuotaInByte(); - Optional readQuotaInCU = params.getReadQuotaInCU(); - Optional currentVersion = params.getCurrentVersion(); - Optional largestUsedVersionNumber = params.getLargestUsedVersionNumber(); - Optional hybridRewindSeconds = params.getHybridRewindSeconds(); - Optional hybridOffsetLagThreshold = params.getHybridOffsetLagThreshold(); - Optional hybridTimeLagThreshold = params.getHybridTimeLagThreshold(); - Optional hybridDataReplicationPolicy = params.getHybridDataReplicationPolicy(); - Optional hybridBufferReplayPolicy = params.getHybridBufferReplayPolicy(); - Optional accessControlled = params.getAccessControlled(); - Optional compressionStrategy = params.getCompressionStrategy(); - Optional clientDecompressionEnabled = params.getClientDecompressionEnabled(); - Optional chunkingEnabled = params.getChunkingEnabled(); - Optional rmdChunkingEnabled = params.getRmdChunkingEnabled(); - Optional batchGetLimit = params.getBatchGetLimit(); - Optional numVersionsToPreserve = params.getNumVersionsToPreserve(); - Optional incrementalPushEnabled = params.getIncrementalPushEnabled(); - Optional separateRealTimeTopicEnabled = params.getSeparateRealTimeTopicEnabled(); - Optional storeMigration = params.getStoreMigration(); - Optional writeComputationEnabled = params.getWriteComputationEnabled(); - Optional replicationMetadataVersionID = params.getReplicationMetadataVersionID(); - Optional readComputationEnabled = params.getReadComputationEnabled(); - Optional bootstrapToOnlineTimeoutInHours = params.getBootstrapToOnlineTimeoutInHours(); - Optional backupStrategy = params.getBackupStrategy(); - Optional autoSchemaRegisterPushJobEnabled = params.getAutoSchemaRegisterPushJobEnabled(); - Optional hybridStoreDiskQuotaEnabled = params.getHybridStoreDiskQuotaEnabled(); - Optional regularVersionETLEnabled = params.getRegularVersionETLEnabled(); - Optional futureVersionETLEnabled = params.getFutureVersionETLEnabled(); - Optional etledUserProxyAccount = params.getETLedProxyUserAccount(); - Optional nativeReplicationEnabled = params.getNativeReplicationEnabled(); - Optional pushStreamSourceAddress = params.getPushStreamSourceAddress(); - Optional backupVersionRetentionMs = params.getBackupVersionRetentionMs(); - Optional replicationFactor = params.getReplicationFactor(); - Optional migrationDuplicateStore = params.getMigrationDuplicateStore(); - Optional nativeReplicationSourceFabric = params.getNativeReplicationSourceFabric(); - Optional activeActiveReplicationEnabled = params.getActiveActiveReplicationEnabled(); - Optional regionsFilter = params.getRegionsFilter(); - Optional personaName = params.getStoragePersona(); - Optional> storeViewConfig = params.getStoreViews(); - Optional viewName = params.getViewName(); - Optional viewClassName = params.getViewClassName(); - Optional> viewParams = params.getViewClassParams(); - Optional removeView = params.getDisableStoreView(); - Optional latestSupersetSchemaId = params.getLatestSupersetSchemaId(); - Optional unusedSchemaDeletionEnabled = params.getUnusedSchemaDeletionEnabled(); - /** * Check whether parent controllers will only propagate the update configs to child controller, or all unchanged * configs should be replicated to children too. */ Optional replicateAll = params.getReplicateAllConfigs(); - Optional storageNodeReadQuotaEnabled = params.getStorageNodeReadQuotaEnabled(); - Optional minCompactionLagSeconds = params.getMinCompactionLagSeconds(); - Optional maxCompactionLagSeconds = params.getMaxCompactionLagSeconds(); - Optional maxRecordSizeBytes = params.getMaxRecordSizeBytes(); - Optional maxNearlineRecordSizeBytes = params.getMaxNearlineRecordSizeBytes(); boolean replicateAllConfigs = replicateAll.isPresent() && replicateAll.get(); List updatedConfigsList = new LinkedList<>(); - String errorMessagePrefix = "Store update error for " + storeName + " in cluster: " + clusterName + ": "; - Store currStore = getVeniceHelixAdmin().getStore(clusterName, storeName); - if (currStore == null) { - LOGGER.error(errorMessagePrefix + "store does not exist, and thus cannot be updated."); - throw new VeniceNoStoreException(storeName, clusterName); + UpdateStoreWrapper updateStoreWrapper = + UpdateStoreUtils.getStoreUpdate(this, clusterName, storeName, params, false); + if (updateStoreWrapper == null) { + return; } - UpdateStore setStore = (UpdateStore) AdminMessageType.UPDATE_STORE.getNewInstance(); - setStore.clusterName = clusterName; - setStore.storeName = storeName; - setStore.owner = owner.map(addToUpdatedConfigList(updatedConfigsList, OWNER)).orElseGet(currStore::getOwner); - if (!currStore.isHybrid() && (hybridRewindSeconds.isPresent() || hybridOffsetLagThreshold.isPresent())) { - // Today target colo pushjob cannot handle hybrid stores, so if a batch push is running, fail the request + Store originalStore = updateStoreWrapper.originalStore; + Set updatedConfigs = updateStoreWrapper.updatedConfigs; + Store updatedStore = updateStoreWrapper.updatedStore; + + if (!replicateAllConfigs && updatedConfigs.isEmpty()) { + String errMsg = "UpdateStore command failed for store " + storeName + ". The command didn't change any specific" + + " store config and didn't specify \"--replicate-all-configs\" flag."; + LOGGER.error(errMsg); + throw new VeniceHttpException(HttpStatus.SC_BAD_REQUEST, errMsg, ErrorType.BAD_REQUEST); + } + + if (!originalStore.isHybrid() && updatedStore.isHybrid()) { + // Today target colo push job cannot handle hybrid stores, so if a batch push is running, fail the request Optional currentPushTopic = getTopicForCurrentPushJob(clusterName, storeName, false, false); if (currentPushTopic.isPresent()) { String errorMessage = - "Cannot convert to hybrid as there is already a pushjob running with topic " + currentPushTopic.get(); + "Cannot convert to hybrid as there is already a push job running with topic " + currentPushTopic.get(); LOGGER.error(errorMessage); throw new VeniceHttpException(HttpStatus.SC_BAD_REQUEST, errorMessage, ErrorType.BAD_REQUEST); } } - // Invalid config update on hybrid will not be populated to admin channel so subsequent updates on the store won't - // be blocked by retry mechanism. - if (currStore.isHybrid() && (partitionerClass.isPresent() || partitionerParams.isPresent())) { - String errorMessage = errorMessagePrefix + "Cannot change partitioner class and parameters for hybrid stores"; - LOGGER.error(errorMessage); - throw new VeniceHttpException(HttpStatus.SC_BAD_REQUEST, errorMessage, ErrorType.BAD_REQUEST); + + UpdateStore setStore = (UpdateStore) AdminMessageType.UPDATE_STORE.getNewInstance(); + setStore.clusterName = clusterName; + setStore.storeName = storeName; + + if (updatedConfigs.contains(OWNER)) { + setStore.owner = updatedStore.getOwner(); + updatedConfigsList.add(OWNER); + } else { + setStore.owner = originalStore.getOwner(); } - if (partitionCount.isPresent()) { - getVeniceHelixAdmin().preCheckStorePartitionCountUpdate(clusterName, currStore, partitionCount.get()); - setStore.partitionNum = partitionCount.get(); + if (updatedConfigs.contains(PARTITION_COUNT)) { + setStore.partitionNum = updatedStore.getPartitionCount(); updatedConfigsList.add(PARTITION_COUNT); } else { - setStore.partitionNum = currStore.getPartitionCount(); + setStore.partitionNum = originalStore.getPartitionCount(); } - /** - * TODO: We should build an UpdateStoreHelper that takes current store config and update command as input, and - * return whether the update command is valid. - */ - validateActiveActiveReplicationEnableConfigs(activeActiveReplicationEnabled, nativeReplicationEnabled, currStore); - - setStore.nativeReplicationEnabled = - nativeReplicationEnabled.map(addToUpdatedConfigList(updatedConfigsList, NATIVE_REPLICATION_ENABLED)) - .orElseGet(currStore::isNativeReplicationEnabled); - setStore.pushStreamSourceAddress = - pushStreamSourceAddress.map(addToUpdatedConfigList(updatedConfigsList, PUSH_STREAM_SOURCE_ADDRESS)) - .orElseGet(currStore::getPushStreamSourceAddress); - - if (storeViewConfig.isPresent() && viewName.isPresent()) { - throw new VeniceException("Cannot update a store view and overwrite store view setup together!"); - } - if (viewName.isPresent()) { - Map updatedViewSettings; - if (!removeView.isPresent()) { - if (!viewClassName.isPresent()) { - throw new VeniceException("View class name is required when configuring a view."); - } - // If View parameter is not provided, use emtpy map instead. It does not inherit from existing config. - ViewConfig viewConfig = new ViewConfigImpl(viewClassName.get(), viewParams.orElse(Collections.emptyMap())); - ViewConfig validatedViewConfig = validateAndDecorateStoreViewConfig(currStore, viewConfig, viewName.get()); - updatedViewSettings = - VeniceHelixAdmin.addNewViewConfigsIntoOldConfigs(currStore, viewName.get(), validatedViewConfig); - } else { - updatedViewSettings = VeniceHelixAdmin.removeViewConfigFromStoreViewConfigMap(currStore, viewName.get()); - } - setStore.views = updatedViewSettings; - updatedConfigsList.add(STORE_VIEW); + if (updatedConfigs.contains(NATIVE_REPLICATION_ENABLED)) { + setStore.nativeReplicationEnabled = updatedStore.isNativeReplicationEnabled(); + updatedConfigsList.add(NATIVE_REPLICATION_ENABLED); + } else { + setStore.nativeReplicationEnabled = originalStore.isNativeReplicationEnabled(); } - if (storeViewConfig.isPresent()) { - // Validate and overwrite store views if they're getting set - Map validatedViewConfigs = - validateAndDecorateStoreViewConfigs(storeViewConfig.get(), currStore); - setStore.views = StoreViewUtils.convertViewConfigMapToStoreViewRecordMap(validatedViewConfigs); + if (updatedConfigs.contains(PUSH_STREAM_SOURCE_ADDRESS)) { + setStore.pushStreamSourceAddress = updatedStore.getPushStreamSourceAddress(); + updatedConfigsList.add(PUSH_STREAM_SOURCE_ADDRESS); + } else { + setStore.pushStreamSourceAddress = originalStore.getPushStreamSourceAddress(); + } + + if (updatedConfigs.contains(STORE_VIEW)) { + setStore.views = StoreViewUtils.convertViewConfigMapToStoreViewRecordMap(updatedStore.getViewConfigs()); updatedConfigsList.add(STORE_VIEW); } - // Only update fields that are set, other fields will be read from the original store's partitioner config. - PartitionerConfig updatedPartitionerConfig = VeniceHelixAdmin.mergeNewSettingsIntoOldPartitionerConfig( - currStore, - partitionerClass, - partitionerParams, - amplificationFactor); - if (partitionerClass.isPresent() || partitionerParams.isPresent() || amplificationFactor.isPresent()) { - // Update updatedConfigsList. - partitionerClass.ifPresent(p -> updatedConfigsList.add(PARTITIONER_CLASS)); - partitionerParams.ifPresent(p -> updatedConfigsList.add(PARTITIONER_PARAMS)); - amplificationFactor.ifPresent(p -> updatedConfigsList.add(AMPLIFICATION_FACTOR)); + boolean partitionerChange = false; + + if (updatedConfigs.contains(PARTITIONER_CLASS)) { + updatedConfigsList.add(PARTITIONER_CLASS); + partitionerChange = true; + } + + if (updatedConfigs.contains(PARTITIONER_PARAMS)) { + updatedConfigsList.add(PARTITIONER_PARAMS); + partitionerChange = true; + } + + if (updatedConfigs.contains(AMPLIFICATION_FACTOR)) { + updatedConfigsList.add(AMPLIFICATION_FACTOR); + partitionerChange = true; + } + + if (partitionerChange) { // Create PartitionConfigRecord for admin channel transmission. + PartitionerConfig updatedPartitionerConfig = updatedStore.getPartitionerConfig(); PartitionerConfigRecord partitionerConfigRecord = new PartitionerConfigRecord(); partitionerConfigRecord.partitionerClass = updatedPartitionerConfig.getPartitionerClass(); partitionerConfigRecord.partitionerParams = CollectionUtils.getCharSequenceMapFromStringMap(updatedPartitionerConfig.getPartitionerParams()); partitionerConfigRecord.amplificationFactor = updatedPartitionerConfig.getAmplificationFactor(); - // Before setting partitioner config, verify the updated partitionerConfig can be built - try { - PartitionUtils.getVenicePartitioner( - partitionerConfigRecord.partitionerClass.toString(), - new VeniceProperties(partitionerConfigRecord.partitionerParams), - getKeySchema(clusterName, storeName).getSchema()); - } catch (PartitionerSchemaMismatchException e) { - String errorMessage = errorMessagePrefix + e.getMessage(); - LOGGER.error(errorMessage); - throw new VeniceHttpException(HttpStatus.SC_BAD_REQUEST, errorMessage, ErrorType.INVALID_SCHEMA); - } catch (Exception e) { - String errorMessage = errorMessagePrefix + "Partitioner Configs invalid, please verify that partitioner " - + "configs like classpath and parameters are correct!"; - LOGGER.error(errorMessage); - throw new VeniceHttpException(HttpStatus.SC_BAD_REQUEST, errorMessage, ErrorType.INVALID_CONFIG); - } setStore.partitionerConfig = partitionerConfigRecord; } - setStore.enableReads = - readability.map(addToUpdatedConfigList(updatedConfigsList, ENABLE_READS)).orElseGet(currStore::isEnableReads); - setStore.enableWrites = writeability.map(addToUpdatedConfigList(updatedConfigsList, ENABLE_WRITES)) - .orElseGet(currStore::isEnableWrites); - - setStore.readQuotaInCU = readQuotaInCU.map(addToUpdatedConfigList(updatedConfigsList, READ_QUOTA_IN_CU)) - .orElseGet(currStore::getReadQuotaInCU); - - // We need to be careful when handling currentVersion. - // Since it is not synced between parent and local controller, - // It is very likely to override local values unintentionally. - setStore.currentVersion = - currentVersion.map(addToUpdatedConfigList(updatedConfigsList, VERSION)).orElse(IGNORED_CURRENT_VERSION); - - hybridRewindSeconds.map(addToUpdatedConfigList(updatedConfigsList, REWIND_TIME_IN_SECONDS)); - hybridOffsetLagThreshold.map(addToUpdatedConfigList(updatedConfigsList, OFFSET_LAG_TO_GO_ONLINE)); - hybridTimeLagThreshold.map(addToUpdatedConfigList(updatedConfigsList, TIME_LAG_TO_GO_ONLINE)); - hybridDataReplicationPolicy.map(addToUpdatedConfigList(updatedConfigsList, DATA_REPLICATION_POLICY)); - hybridBufferReplayPolicy.map(addToUpdatedConfigList(updatedConfigsList, BUFFER_REPLAY_POLICY)); - HybridStoreConfig updatedHybridStoreConfig = VeniceHelixAdmin.mergeNewSettingsIntoOldHybridStoreConfig( - currStore, - hybridRewindSeconds, - hybridOffsetLagThreshold, - hybridTimeLagThreshold, - hybridDataReplicationPolicy, - hybridBufferReplayPolicy); - - // Get VeniceControllerClusterConfig for the cluster - VeniceControllerClusterConfig controllerConfig = - veniceHelixAdmin.getHelixVeniceClusterResources(clusterName).getConfig(); - // Check if the store is being converted to a hybrid store - boolean storeBeingConvertedToHybrid = !currStore.isHybrid() && updatedHybridStoreConfig != null - && veniceHelixAdmin.isHybrid(updatedHybridStoreConfig); - // Check if the store is being converted to a batch store - boolean storeBeingConvertedToBatch = currStore.isHybrid() && !veniceHelixAdmin.isHybrid(updatedHybridStoreConfig); - if (storeBeingConvertedToBatch && activeActiveReplicationEnabled.orElse(false)) { - throw new VeniceHttpException( - HttpStatus.SC_BAD_REQUEST, - "Cannot convert store to batch-only and enable Active/Active together.", - ErrorType.BAD_REQUEST); - } - if (storeBeingConvertedToBatch && incrementalPushEnabled.orElse(false)) { - throw new VeniceHttpException( - HttpStatus.SC_BAD_REQUEST, - "Cannot convert store to batch-only and enable incremental push together.", - ErrorType.BAD_REQUEST); - } - // Update active-active replication config. - setStore.activeActiveReplicationEnabled = activeActiveReplicationEnabled - .map(addToUpdatedConfigList(updatedConfigsList, ACTIVE_ACTIVE_REPLICATION_ENABLED)) - .orElseGet(currStore::isActiveActiveReplicationEnabled); - // Enable active-active replication automatically when batch user store being converted to hybrid store and - // active-active replication is enabled for all hybrid store via the cluster config - if (storeBeingConvertedToHybrid && !setStore.activeActiveReplicationEnabled && !currStore.isSystemStore() - && controllerConfig.isActiveActiveReplicationEnabledAsDefaultForHybrid()) { - setStore.activeActiveReplicationEnabled = true; - updatedConfigsList.add(ACTIVE_ACTIVE_REPLICATION_ENABLED); - } - // When turning off hybrid store, we will also turn off A/A store config. - if (storeBeingConvertedToBatch && setStore.activeActiveReplicationEnabled) { - setStore.activeActiveReplicationEnabled = false; - updatedConfigsList.add(ACTIVE_ACTIVE_REPLICATION_ENABLED); + if (updatedConfigs.contains(ENABLE_READS)) { + setStore.enableReads = updatedStore.isEnableReads(); + updatedConfigsList.add(ENABLE_READS); + } else { + setStore.enableReads = originalStore.isEnableReads(); } - // Update incremental push config. - setStore.incrementalPushEnabled = - incrementalPushEnabled.map(addToUpdatedConfigList(updatedConfigsList, INCREMENTAL_PUSH_ENABLED)) - .orElseGet(currStore::isIncrementalPushEnabled); - // Enable incremental push automatically when batch user store being converted to hybrid store and active-active - // replication is enabled or being and the cluster config allows it. - if (!setStore.incrementalPushEnabled && !currStore.isSystemStore() && storeBeingConvertedToHybrid - && setStore.activeActiveReplicationEnabled - && controllerConfig.enabledIncrementalPushForHybridActiveActiveUserStores()) { - setStore.incrementalPushEnabled = true; - updatedConfigsList.add(INCREMENTAL_PUSH_ENABLED); - } - // Enable separate real-time topic automatically when incremental push is enabled and cluster config allows it. - if (setStore.incrementalPushEnabled - && controllerConfig.enabledSeparateRealTimeTopicForStoreWithIncrementalPush()) { - setStore.separateRealTimeTopicEnabled = true; - updatedConfigsList.add(SEPARATE_REAL_TIME_TOPIC_ENABLED); + if (updatedConfigs.contains(ENABLE_WRITES)) { + setStore.enableWrites = updatedStore.isEnableWrites(); + updatedConfigsList.add(ENABLE_WRITES); + } else { + setStore.enableWrites = originalStore.isEnableWrites(); } - // When turning off hybrid store, we will also turn off incremental store config. - if (storeBeingConvertedToBatch && setStore.incrementalPushEnabled) { - setStore.incrementalPushEnabled = false; - updatedConfigsList.add(INCREMENTAL_PUSH_ENABLED); + if (updatedConfigs.contains(READ_QUOTA_IN_CU)) { + setStore.readQuotaInCU = updatedStore.getReadQuotaInCU(); + updatedConfigsList.add(READ_QUOTA_IN_CU); + } else { + setStore.readQuotaInCU = originalStore.getReadQuotaInCU(); } - if (updatedHybridStoreConfig == null) { - setStore.hybridStoreConfig = null; + if (updatedConfigsList.contains(VERSION)) { + setStore.currentVersion = updatedStore.getCurrentVersion(); + updatedConfigsList.add(VERSION); } else { - HybridStoreConfigRecord hybridStoreConfigRecord = new HybridStoreConfigRecord(); - hybridStoreConfigRecord.offsetLagThresholdToGoOnline = - updatedHybridStoreConfig.getOffsetLagThresholdToGoOnline(); - hybridStoreConfigRecord.rewindTimeInSeconds = updatedHybridStoreConfig.getRewindTimeInSeconds(); - hybridStoreConfigRecord.producerTimestampLagThresholdToGoOnlineInSeconds = - updatedHybridStoreConfig.getProducerTimestampLagThresholdToGoOnlineInSeconds(); - hybridStoreConfigRecord.dataReplicationPolicy = updatedHybridStoreConfig.getDataReplicationPolicy().getValue(); - hybridStoreConfigRecord.bufferReplayPolicy = updatedHybridStoreConfig.getBufferReplayPolicy().getValue(); - setStore.hybridStoreConfig = hybridStoreConfigRecord; + setStore.currentVersion = IGNORED_CURRENT_VERSION; } - if (incrementalPushEnabled.orElse(currStore.isIncrementalPushEnabled()) - && !veniceHelixAdmin.isHybrid(currStore.getHybridStoreConfig()) - && !veniceHelixAdmin.isHybrid(updatedHybridStoreConfig)) { - LOGGER.info( - "Enabling incremental push for a batch store:{}. Converting it to a hybrid store with default configs.", - storeName); - HybridStoreConfigRecord hybridStoreConfigRecord = new HybridStoreConfigRecord(); - hybridStoreConfigRecord.rewindTimeInSeconds = DEFAULT_REWIND_TIME_IN_SECONDS; + if (updatedConfigs.contains(REWIND_TIME_IN_SECONDS)) { updatedConfigsList.add(REWIND_TIME_IN_SECONDS); - hybridStoreConfigRecord.offsetLagThresholdToGoOnline = DEFAULT_HYBRID_OFFSET_LAG_THRESHOLD; + } + + if (updatedConfigs.contains(OFFSET_LAG_TO_GO_ONLINE)) { updatedConfigsList.add(OFFSET_LAG_TO_GO_ONLINE); - hybridStoreConfigRecord.producerTimestampLagThresholdToGoOnlineInSeconds = DEFAULT_HYBRID_TIME_LAG_THRESHOLD; + } + + if (updatedConfigs.contains(TIME_LAG_TO_GO_ONLINE)) { updatedConfigsList.add(TIME_LAG_TO_GO_ONLINE); - hybridStoreConfigRecord.dataReplicationPolicy = DataReplicationPolicy.NON_AGGREGATE.getValue(); + } + + if (updatedConfigs.contains(DATA_REPLICATION_POLICY)) { updatedConfigsList.add(DATA_REPLICATION_POLICY); - hybridStoreConfigRecord.bufferReplayPolicy = BufferReplayPolicy.REWIND_FROM_EOP.getValue(); + } + + if (updatedConfigs.contains(BUFFER_REPLAY_POLICY)) { updatedConfigsList.add(BUFFER_REPLAY_POLICY); - setStore.hybridStoreConfig = hybridStoreConfigRecord; + } + + HybridStoreConfig updatedHybridStoreConfig = updatedStore.getHybridStoreConfig(); + setStore.hybridStoreConfig = new HybridStoreConfigRecord(); + if (updatedHybridStoreConfig == null) { + setStore.hybridStoreConfig.offsetLagThresholdToGoOnline = -1; + setStore.hybridStoreConfig.rewindTimeInSeconds = -1; + setStore.hybridStoreConfig.producerTimestampLagThresholdToGoOnlineInSeconds = -1; + setStore.hybridStoreConfig.dataReplicationPolicy = DataReplicationPolicy.NON_AGGREGATE.getValue(); + setStore.hybridStoreConfig.bufferReplayPolicy = BufferReplayPolicy.REWIND_FROM_EOP.getValue(); + } else { + setStore.hybridStoreConfig.offsetLagThresholdToGoOnline = + updatedHybridStoreConfig.getOffsetLagThresholdToGoOnline(); + setStore.hybridStoreConfig.rewindTimeInSeconds = updatedHybridStoreConfig.getRewindTimeInSeconds(); + setStore.hybridStoreConfig.producerTimestampLagThresholdToGoOnlineInSeconds = + updatedHybridStoreConfig.getProducerTimestampLagThresholdToGoOnlineInSeconds(); + setStore.hybridStoreConfig.dataReplicationPolicy = + updatedHybridStoreConfig.getDataReplicationPolicy().getValue(); + setStore.hybridStoreConfig.bufferReplayPolicy = updatedHybridStoreConfig.getBufferReplayPolicy().getValue(); + } + + if (updatedConfigs.contains(ACTIVE_ACTIVE_REPLICATION_ENABLED)) { + setStore.activeActiveReplicationEnabled = updatedStore.isActiveActiveReplicationEnabled(); + updatedConfigsList.add(ACTIVE_ACTIVE_REPLICATION_ENABLED); + } else { + setStore.activeActiveReplicationEnabled = originalStore.isActiveActiveReplicationEnabled(); + } + + if (updatedConfigs.contains(INCREMENTAL_PUSH_ENABLED)) { + setStore.incrementalPushEnabled = updatedStore.isIncrementalPushEnabled(); + updatedConfigsList.add(INCREMENTAL_PUSH_ENABLED); + } else { + setStore.incrementalPushEnabled = originalStore.isIncrementalPushEnabled(); + } + + if (updatedConfigs.contains(SEPARATE_REAL_TIME_TOPIC_ENABLED)) { + setStore.separateRealTimeTopicEnabled = updatedStore.isSeparateRealTimeTopicEnabled(); + updatedConfigsList.add(SEPARATE_REAL_TIME_TOPIC_ENABLED); + } else { + setStore.separateRealTimeTopicEnabled = originalStore.isSeparateRealTimeTopicEnabled(); + } + + if (updatedConfigs.contains(NEARLINE_PRODUCER_COMPRESSION_ENABLED)) { + setStore.nearlineProducerCompressionEnabled = updatedStore.isNearlineProducerCompressionEnabled(); + updatedConfigsList.add(NEARLINE_PRODUCER_COMPRESSION_ENABLED); + } else { + setStore.nearlineProducerCompressionEnabled = originalStore.isNearlineProducerCompressionEnabled(); + } + + if (updatedConfigs.contains(NEARLINE_PRODUCER_COUNT_PER_WRITER)) { + setStore.nearlineProducerCountPerWriter = updatedStore.getNearlineProducerCountPerWriter(); + updatedConfigsList.add(NEARLINE_PRODUCER_COUNT_PER_WRITER); + } else { + setStore.nearlineProducerCountPerWriter = originalStore.getNearlineProducerCountPerWriter(); } /** @@ -2527,219 +2370,255 @@ public void updateStore(String clusterName, String storeName, UpdateStoreQueryPa * do append-only and compaction will happen later. * We expose actual disk usage to users, instead of multiplying/dividing the overhead ratio by situations. */ - setStore.storageQuotaInByte = - storageQuotaInByte.map(addToUpdatedConfigList(updatedConfigsList, STORAGE_QUOTA_IN_BYTE)) - .orElseGet(currStore::getStorageQuotaInByte); - - setStore.accessControlled = accessControlled.map(addToUpdatedConfigList(updatedConfigsList, ACCESS_CONTROLLED)) - .orElseGet(currStore::isAccessControlled); - setStore.compressionStrategy = - compressionStrategy.map(addToUpdatedConfigList(updatedConfigsList, COMPRESSION_STRATEGY)) - .map(CompressionStrategy::getValue) - .orElse(currStore.getCompressionStrategy().getValue()); - setStore.clientDecompressionEnabled = - clientDecompressionEnabled.map(addToUpdatedConfigList(updatedConfigsList, CLIENT_DECOMPRESSION_ENABLED)) - .orElseGet(currStore::getClientDecompressionEnabled); - setStore.batchGetLimit = batchGetLimit.map(addToUpdatedConfigList(updatedConfigsList, BATCH_GET_LIMIT)) - .orElseGet(currStore::getBatchGetLimit); - setStore.numVersionsToPreserve = - numVersionsToPreserve.map(addToUpdatedConfigList(updatedConfigsList, NUM_VERSIONS_TO_PRESERVE)) - .orElseGet(currStore::getNumVersionsToPreserve); - setStore.isMigrating = storeMigration.map(addToUpdatedConfigList(updatedConfigsList, STORE_MIGRATION)) - .orElseGet(currStore::isMigrating); - setStore.replicationMetadataVersionID = replicationMetadataVersionID - .map(addToUpdatedConfigList(updatedConfigsList, REPLICATION_METADATA_PROTOCOL_VERSION_ID)) - .orElse(currStore.getRmdVersion()); - setStore.readComputationEnabled = - readComputationEnabled.map(addToUpdatedConfigList(updatedConfigsList, READ_COMPUTATION_ENABLED)) - .orElseGet(currStore::isReadComputationEnabled); - setStore.bootstrapToOnlineTimeoutInHours = bootstrapToOnlineTimeoutInHours - .map(addToUpdatedConfigList(updatedConfigsList, BOOTSTRAP_TO_ONLINE_TIMEOUT_IN_HOURS)) - .orElseGet(currStore::getBootstrapToOnlineTimeoutInHours); + if (updatedConfigs.contains(STORAGE_QUOTA_IN_BYTE)) { + setStore.storageQuotaInByte = updatedStore.getStorageQuotaInByte(); + updatedConfigsList.add(STORAGE_QUOTA_IN_BYTE); + } else { + setStore.storageQuotaInByte = originalStore.getStorageQuotaInByte(); + } + + if (updatedConfigs.contains(ACCESS_CONTROLLED)) { + setStore.accessControlled = updatedStore.isAccessControlled(); + updatedConfigsList.add(ACCESS_CONTROLLED); + } else { + setStore.accessControlled = originalStore.isAccessControlled(); + } + + if (updatedConfigs.contains(COMPRESSION_STRATEGY)) { + setStore.compressionStrategy = updatedStore.getCompressionStrategy().getValue(); + updatedConfigsList.add(COMPRESSION_STRATEGY); + } else { + setStore.compressionStrategy = originalStore.getCompressionStrategy().getValue(); + } + + if (updatedConfigs.contains(CLIENT_DECOMPRESSION_ENABLED)) { + setStore.clientDecompressionEnabled = updatedStore.getClientDecompressionEnabled(); + updatedConfigsList.add(CLIENT_DECOMPRESSION_ENABLED); + } else { + setStore.clientDecompressionEnabled = originalStore.getClientDecompressionEnabled(); + } + + if (updatedConfigs.contains(BATCH_GET_LIMIT)) { + setStore.batchGetLimit = updatedStore.getBatchGetLimit(); + updatedConfigsList.add(BATCH_GET_LIMIT); + } else { + setStore.batchGetLimit = originalStore.getBatchGetLimit(); + } + + if (updatedConfigs.contains(NUM_VERSIONS_TO_PRESERVE)) { + setStore.numVersionsToPreserve = updatedStore.getNumVersionsToPreserve(); + updatedConfigsList.add(NUM_VERSIONS_TO_PRESERVE); + } else { + setStore.numVersionsToPreserve = originalStore.getNumVersionsToPreserve(); + } + + if (updatedConfigs.contains(STORE_MIGRATION)) { + setStore.isMigrating = updatedStore.isMigrating(); + updatedConfigsList.add(STORE_MIGRATION); + } else { + setStore.isMigrating = originalStore.isMigrating(); + } + + if (updatedConfigs.contains(REPLICATION_METADATA_PROTOCOL_VERSION_ID)) { + setStore.replicationMetadataVersionID = updatedStore.getRmdVersion(); + updatedConfigsList.add(REPLICATION_METADATA_PROTOCOL_VERSION_ID); + } else { + setStore.replicationMetadataVersionID = originalStore.getRmdVersion(); + } + + if (updatedConfigs.contains(READ_COMPUTATION_ENABLED)) { + setStore.readComputationEnabled = updatedStore.isReadComputationEnabled(); + updatedConfigsList.add(READ_COMPUTATION_ENABLED); + } else { + setStore.readComputationEnabled = originalStore.isReadComputationEnabled(); + } + + if (updatedConfigs.contains(BOOTSTRAP_TO_ONLINE_TIMEOUT_IN_HOURS)) { + setStore.bootstrapToOnlineTimeoutInHours = updatedStore.getBootstrapToOnlineTimeoutInHours(); + updatedConfigsList.add(BOOTSTRAP_TO_ONLINE_TIMEOUT_IN_HOURS); + } else { + setStore.bootstrapToOnlineTimeoutInHours = originalStore.getBootstrapToOnlineTimeoutInHours(); + } + setStore.leaderFollowerModelEnabled = true; // do not mess up during upgrades - setStore.backupStrategy = (backupStrategy.map(addToUpdatedConfigList(updatedConfigsList, BACKUP_STRATEGY)) - .orElse(currStore.getBackupStrategy())).ordinal(); - - setStore.schemaAutoRegisterFromPushJobEnabled = autoSchemaRegisterPushJobEnabled - .map(addToUpdatedConfigList(updatedConfigsList, AUTO_SCHEMA_REGISTER_FOR_PUSHJOB_ENABLED)) - .orElse(currStore.isSchemaAutoRegisterFromPushJobEnabled()); - - setStore.hybridStoreDiskQuotaEnabled = - hybridStoreDiskQuotaEnabled.map(addToUpdatedConfigList(updatedConfigsList, HYBRID_STORE_DISK_QUOTA_ENABLED)) - .orElse(currStore.isHybridStoreDiskQuotaEnabled()); - - regularVersionETLEnabled.map(addToUpdatedConfigList(updatedConfigsList, REGULAR_VERSION_ETL_ENABLED)); - futureVersionETLEnabled.map(addToUpdatedConfigList(updatedConfigsList, FUTURE_VERSION_ETL_ENABLED)); - etledUserProxyAccount.map(addToUpdatedConfigList(updatedConfigsList, ETLED_PROXY_USER_ACCOUNT)); - setStore.ETLStoreConfig = mergeNewSettingIntoOldETLStoreConfig( - currStore, - regularVersionETLEnabled, - futureVersionETLEnabled, - etledUserProxyAccount); - - setStore.largestUsedVersionNumber = - largestUsedVersionNumber.map(addToUpdatedConfigList(updatedConfigsList, LARGEST_USED_VERSION_NUMBER)) - .orElseGet(currStore::getLargestUsedVersionNumber); - - setStore.backupVersionRetentionMs = - backupVersionRetentionMs.map(addToUpdatedConfigList(updatedConfigsList, BACKUP_VERSION_RETENTION_MS)) - .orElseGet(currStore::getBackupVersionRetentionMs); - setStore.replicationFactor = replicationFactor.map(addToUpdatedConfigList(updatedConfigsList, REPLICATION_FACTOR)) - .orElseGet(currStore::getReplicationFactor); - setStore.migrationDuplicateStore = - migrationDuplicateStore.map(addToUpdatedConfigList(updatedConfigsList, MIGRATION_DUPLICATE_STORE)) - .orElseGet(currStore::isMigrationDuplicateStore); - setStore.nativeReplicationSourceFabric = nativeReplicationSourceFabric - .map(addToUpdatedConfigList(updatedConfigsList, NATIVE_REPLICATION_SOURCE_FABRIC)) - .orElseGet((currStore::getNativeReplicationSourceFabric)); - - setStore.disableMetaStore = - params.disableMetaStore().map(addToUpdatedConfigList(updatedConfigsList, DISABLE_META_STORE)).orElse(false); - - setStore.disableDavinciPushStatusStore = params.disableDavinciPushStatusStore() - .map(addToUpdatedConfigList(updatedConfigsList, DISABLE_DAVINCI_PUSH_STATUS_STORE)) - .orElse(false); - - setStore.storagePersona = personaName.map(addToUpdatedConfigList(updatedConfigsList, PERSONA_NAME)).orElse(null); - - setStore.blobTransferEnabled = params.getBlobTransferEnabled() - .map(addToUpdatedConfigList(updatedConfigsList, BLOB_TRANSFER_ENABLED)) - .orElseGet(currStore::isBlobTransferEnabled); - - setStore.separateRealTimeTopicEnabled = - separateRealTimeTopicEnabled.map(addToUpdatedConfigList(updatedConfigsList, SEPARATE_REAL_TIME_TOPIC_ENABLED)) - .orElseGet(currStore::isSeparateRealTimeTopicEnabled); - - setStore.nearlineProducerCompressionEnabled = params.getNearlineProducerCompressionEnabled() - .map(addToUpdatedConfigList(updatedConfigsList, NEARLINE_PRODUCER_COMPRESSION_ENABLED)) - .orElseGet(currStore::isNearlineProducerCompressionEnabled); - - setStore.nearlineProducerCountPerWriter = params.getNearlineProducerCountPerWriter() - .map(addToUpdatedConfigList(updatedConfigsList, NEARLINE_PRODUCER_COUNT_PER_WRITER)) - .orElseGet(currStore::getNearlineProducerCountPerWriter); - - // Check whether the passed param is valid or not - if (latestSupersetSchemaId.isPresent()) { - if (latestSupersetSchemaId.get() != SchemaData.INVALID_VALUE_SCHEMA_ID) { - if (veniceHelixAdmin.getValueSchema(clusterName, storeName, latestSupersetSchemaId.get()) == null) { - throw new VeniceException( - "Unknown value schema id: " + latestSupersetSchemaId.get() + " in store: " + storeName); - } - } + + if (updatedConfigs.contains(BACKUP_STRATEGY)) { + setStore.backupStrategy = updatedStore.getBackupStrategy().getValue(); + updatedConfigsList.add(BACKUP_STRATEGY); + } else { + setStore.backupStrategy = originalStore.getBackupStrategy().getValue(); } - setStore.latestSuperSetValueSchemaId = - latestSupersetSchemaId.map(addToUpdatedConfigList(updatedConfigsList, LATEST_SUPERSET_SCHEMA_ID)) - .orElseGet(currStore::getLatestSuperSetValueSchemaId); - setStore.storageNodeReadQuotaEnabled = - storageNodeReadQuotaEnabled.map(addToUpdatedConfigList(updatedConfigsList, STORAGE_NODE_READ_QUOTA_ENABLED)) - .orElseGet(currStore::isStorageNodeReadQuotaEnabled); - setStore.unusedSchemaDeletionEnabled = - unusedSchemaDeletionEnabled.map(addToUpdatedConfigList(updatedConfigsList, UNUSED_SCHEMA_DELETION_ENABLED)) - .orElseGet(currStore::isUnusedSchemaDeletionEnabled); - setStore.minCompactionLagSeconds = - minCompactionLagSeconds.map(addToUpdatedConfigList(updatedConfigsList, MIN_COMPACTION_LAG_SECONDS)) - .orElseGet(currStore::getMinCompactionLagSeconds); - setStore.maxCompactionLagSeconds = - maxCompactionLagSeconds.map(addToUpdatedConfigList(updatedConfigsList, MAX_COMPACTION_LAG_SECONDS)) - .orElseGet(currStore::getMaxCompactionLagSeconds); - if (setStore.maxCompactionLagSeconds < setStore.minCompactionLagSeconds) { - throw new VeniceException( - "Store's max compaction lag seconds: " + setStore.maxCompactionLagSeconds + " shouldn't be smaller than " - + "store's min compaction lag seconds: " + setStore.minCompactionLagSeconds); - } - setStore.maxRecordSizeBytes = - maxRecordSizeBytes.map(addToUpdatedConfigList(updatedConfigsList, MAX_RECORD_SIZE_BYTES)) - .orElseGet(currStore::getMaxRecordSizeBytes); - setStore.maxNearlineRecordSizeBytes = - maxNearlineRecordSizeBytes.map(addToUpdatedConfigList(updatedConfigsList, MAX_NEARLINE_RECORD_SIZE_BYTES)) - .orElseGet(currStore::getMaxNearlineRecordSizeBytes); - - StoragePersonaRepository repository = - getVeniceHelixAdmin().getHelixVeniceClusterResources(clusterName).getStoragePersonaRepository(); - StoragePersona personaToValidate = null; - StoragePersona existingPersona = repository.getPersonaContainingStore(currStore.getName()); - - if (params.getStoragePersona().isPresent()) { - personaToValidate = getVeniceHelixAdmin().getStoragePersona(clusterName, params.getStoragePersona().get()); - if (personaToValidate == null) { - String errMsg = "UpdateStore command failed for store " + storeName + ". The provided StoragePersona " - + params.getStoragePersona().get() + " does not exist."; - throw new VeniceException(errMsg); - } - } else if (existingPersona != null) { - personaToValidate = existingPersona; + + if (updatedConfigs.contains(AUTO_SCHEMA_REGISTER_FOR_PUSHJOB_ENABLED)) { + setStore.schemaAutoRegisterFromPushJobEnabled = updatedStore.isSchemaAutoRegisterFromPushJobEnabled(); + updatedConfigsList.add(AUTO_SCHEMA_REGISTER_FOR_PUSHJOB_ENABLED); + } else { + setStore.schemaAutoRegisterFromPushJobEnabled = originalStore.isSchemaAutoRegisterFromPushJobEnabled(); } - if (personaToValidate != null) { - /** - * Create a new copy of the store with an updated quota, and validate this. - */ - Store updatedQuotaStore = getVeniceHelixAdmin().getStore(clusterName, storeName); - updatedQuotaStore.setStorageQuotaInByte(setStore.getStorageQuotaInByte()); - repository.validateAddUpdatedStore(personaToValidate, Optional.of(updatedQuotaStore)); + if (updatedConfigs.contains(HYBRID_STORE_DISK_QUOTA_ENABLED)) { + setStore.hybridStoreDiskQuotaEnabled = updatedStore.isHybridStoreDiskQuotaEnabled(); + updatedConfigsList.add(HYBRID_STORE_DISK_QUOTA_ENABLED); + } else { + setStore.hybridStoreDiskQuotaEnabled = originalStore.isHybridStoreDiskQuotaEnabled(); } - /** - * Fabrics filter is not a store config, so we don't need to add it into {@link UpdateStore#updatedConfigsList} - */ - setStore.regionsFilter = regionsFilter.orElse(null); + if (updatedConfigs.contains(REGULAR_VERSION_ETL_ENABLED)) { + updatedConfigsList.add(REGULAR_VERSION_ETL_ENABLED); + } - // Update Partial Update config. - boolean partialUpdateConfigUpdated = ParentControllerConfigUpdateUtils.checkAndMaybeApplyPartialUpdateConfig( - this, - clusterName, - storeName, - writeComputationEnabled, - setStore, - storeBeingConvertedToHybrid); - if (partialUpdateConfigUpdated) { + if (updatedConfigs.contains(FUTURE_VERSION_ETL_ENABLED)) { + updatedConfigsList.add(FUTURE_VERSION_ETL_ENABLED); + } + + if (updatedConfigs.contains(ETLED_PROXY_USER_ACCOUNT)) { + updatedConfigsList.add(ETLED_PROXY_USER_ACCOUNT); + } + + ETLStoreConfig etlStoreConfig = updatedStore.getEtlStoreConfig(); + ETLStoreConfigRecord etlStoreConfigRecord = new ETLStoreConfigRecord(); + etlStoreConfigRecord.regularVersionETLEnabled = etlStoreConfig.isRegularVersionETLEnabled(); + etlStoreConfigRecord.futureVersionETLEnabled = etlStoreConfig.isFutureVersionETLEnabled(); + etlStoreConfigRecord.etledUserProxyAccount = etlStoreConfig.getEtledUserProxyAccount(); + setStore.ETLStoreConfig = etlStoreConfigRecord; + + if (updatedConfigs.contains(LARGEST_USED_VERSION_NUMBER)) { + setStore.largestUsedVersionNumber = updatedStore.getLargestUsedVersionNumber(); + updatedConfigsList.add(LARGEST_USED_VERSION_NUMBER); + } else { + setStore.largestUsedVersionNumber = originalStore.getLargestUsedVersionNumber(); + } + + if (updatedConfigs.contains(BACKUP_VERSION_RETENTION_MS)) { + setStore.backupVersionRetentionMs = updatedStore.getBackupVersionRetentionMs(); + updatedConfigsList.add(BACKUP_VERSION_RETENTION_MS); + } else { + setStore.backupVersionRetentionMs = originalStore.getBackupVersionRetentionMs(); + } + + if (updatedConfigs.contains(REPLICATION_FACTOR)) { + setStore.replicationFactor = updatedStore.getReplicationFactor(); + updatedConfigsList.add(REPLICATION_FACTOR); + } else { + setStore.replicationFactor = originalStore.getReplicationFactor(); + } + + if (updatedConfigs.contains(MIGRATION_DUPLICATE_STORE)) { + setStore.migrationDuplicateStore = updatedStore.isMigrationDuplicateStore(); + updatedConfigsList.add(MIGRATION_DUPLICATE_STORE); + } else { + setStore.migrationDuplicateStore = originalStore.isMigrationDuplicateStore(); + } + + if (updatedConfigs.contains(NATIVE_REPLICATION_SOURCE_FABRIC)) { + setStore.nativeReplicationSourceFabric = updatedStore.getNativeReplicationSourceFabric(); + updatedConfigsList.add(NATIVE_REPLICATION_SOURCE_FABRIC); + } else { + setStore.nativeReplicationSourceFabric = originalStore.getNativeReplicationSourceFabric(); + } + + if (updatedConfigs.contains(DISABLE_META_STORE)) { + setStore.disableMetaStore = !updatedStore.isStoreMetaSystemStoreEnabled(); + updatedConfigsList.add(DISABLE_META_STORE); + } else { + setStore.disableMetaStore = !originalStore.isStoreMetaSystemStoreEnabled(); + } + + if (updatedConfigs.contains(DISABLE_DAVINCI_PUSH_STATUS_STORE)) { + setStore.disableDavinciPushStatusStore = !updatedStore.isDaVinciPushStatusStoreEnabled(); + updatedConfigsList.add(DISABLE_DAVINCI_PUSH_STATUS_STORE); + } else { + setStore.disableDavinciPushStatusStore = !originalStore.isDaVinciPushStatusStoreEnabled(); + } + + if (updatedConfigs.contains(PERSONA_NAME)) { + setStore.storagePersona = params.getStoragePersona().get(); + updatedConfigsList.add(PERSONA_NAME); + } else { + setStore.storagePersona = null; + } + + if (updatedConfigs.contains(BLOB_TRANSFER_ENABLED)) { + setStore.blobTransferEnabled = updatedStore.isBlobTransferEnabled(); + updatedConfigsList.add(BLOB_TRANSFER_ENABLED); + } else { + setStore.blobTransferEnabled = originalStore.isBlobTransferEnabled(); + } + + if (updatedConfigs.contains(MAX_RECORD_SIZE_BYTES)) { + setStore.maxRecordSizeBytes = updatedStore.getMaxRecordSizeBytes(); + updatedConfigsList.add(MAX_RECORD_SIZE_BYTES); + } else { + setStore.maxRecordSizeBytes = originalStore.getMaxRecordSizeBytes(); + } + + if (updatedConfigs.contains(MAX_NEARLINE_RECORD_SIZE_BYTES)) { + setStore.maxNearlineRecordSizeBytes = updatedStore.getMaxNearlineRecordSizeBytes(); + updatedConfigsList.add(MAX_NEARLINE_RECORD_SIZE_BYTES); + } else { + setStore.maxNearlineRecordSizeBytes = originalStore.getMaxRecordSizeBytes(); + } + + if (updatedConfigs.contains(LATEST_SUPERSET_SCHEMA_ID)) { + setStore.latestSuperSetValueSchemaId = updatedStore.getLatestSuperSetValueSchemaId(); + updatedConfigsList.add(LATEST_SUPERSET_SCHEMA_ID); + } else { + setStore.latestSuperSetValueSchemaId = originalStore.getLatestSuperSetValueSchemaId(); + } + + if (updatedConfigs.contains(STORAGE_NODE_READ_QUOTA_ENABLED)) { + setStore.storageNodeReadQuotaEnabled = updatedStore.isStorageNodeReadQuotaEnabled(); + updatedConfigsList.add(STORAGE_NODE_READ_QUOTA_ENABLED); + } else { + setStore.storageNodeReadQuotaEnabled = originalStore.isStorageNodeReadQuotaEnabled(); + } + + if (updatedConfigs.contains(UNUSED_SCHEMA_DELETION_ENABLED)) { + setStore.unusedSchemaDeletionEnabled = updatedStore.isUnusedSchemaDeletionEnabled(); + updatedConfigsList.add(UNUSED_SCHEMA_DELETION_ENABLED); + } else { + setStore.unusedSchemaDeletionEnabled = originalStore.isUnusedSchemaDeletionEnabled(); + } + + if (updatedConfigs.contains(MIN_COMPACTION_LAG_SECONDS)) { + setStore.minCompactionLagSeconds = updatedStore.getMinCompactionLagSeconds(); + updatedConfigsList.add(MIN_COMPACTION_LAG_SECONDS); + } else { + setStore.minCompactionLagSeconds = originalStore.getMinCompactionLagSeconds(); + } + + if (updatedConfigs.contains(MAX_COMPACTION_LAG_SECONDS)) { + setStore.maxCompactionLagSeconds = updatedStore.getMaxCompactionLagSeconds(); + updatedConfigsList.add(MAX_COMPACTION_LAG_SECONDS); + } else { + setStore.maxCompactionLagSeconds = originalStore.getMaxCompactionLagSeconds(); + } + + if (updatedConfigs.contains(WRITE_COMPUTATION_ENABLED)) { + setStore.writeComputationEnabled = updatedStore.isWriteComputationEnabled(); updatedConfigsList.add(WRITE_COMPUTATION_ENABLED); + } else { + setStore.writeComputationEnabled = originalStore.isWriteComputationEnabled(); } - boolean partialUpdateJustEnabled = setStore.writeComputationEnabled && !currStore.isWriteComputationEnabled(); - // Update Chunking config. - boolean chunkingConfigUpdated = ParentControllerConfigUpdateUtils - .checkAndMaybeApplyChunkingConfigChange(this, clusterName, storeName, chunkingEnabled, setStore); - if (chunkingConfigUpdated) { + + if (updatedConfigs.contains(CHUNKING_ENABLED)) { + setStore.chunkingEnabled = updatedStore.isChunkingEnabled(); updatedConfigsList.add(CHUNKING_ENABLED); + } else { + setStore.chunkingEnabled = originalStore.isChunkingEnabled(); } - // Update RMD Chunking config. - boolean rmdChunkingConfigUpdated = ParentControllerConfigUpdateUtils - .checkAndMaybeApplyRmdChunkingConfigChange(this, clusterName, storeName, rmdChunkingEnabled, setStore); - if (rmdChunkingConfigUpdated) { + if (updatedConfigs.contains(RMD_CHUNKING_ENABLED)) { + setStore.rmdChunkingEnabled = updatedStore.isRmdChunkingEnabled(); updatedConfigsList.add(RMD_CHUNKING_ENABLED); + } else { + setStore.rmdChunkingEnabled = originalStore.isRmdChunkingEnabled(); } - // Validate Amplification Factor config based on latest A/A and partial update status. - if ((setStore.getActiveActiveReplicationEnabled() || setStore.getWriteComputationEnabled()) - && updatedPartitionerConfig.getAmplificationFactor() > 1) { - throw new VeniceHttpException( - HttpStatus.SC_BAD_REQUEST, - "Non-default amplification factor is not compatible with active-active replication and/or partial update.", - ErrorType.BAD_REQUEST); - } - - if (!getVeniceHelixAdmin().isHybrid(currStore.getHybridStoreConfig()) - && getVeniceHelixAdmin().isHybrid(setStore.getHybridStoreConfig()) && setStore.getPartitionNum() == 0) { - // This is a new hybrid store and partition count is not specified. - VeniceControllerClusterConfig config = - getVeniceHelixAdmin().getHelixVeniceClusterResources(clusterName).getConfig(); - setStore.setPartitionNum( - PartitionUtils.calculatePartitionCount( - storeName, - setStore.getStorageQuotaInByte(), - 0, - config.getPartitionSize(), - config.getMinNumberOfPartitionsForHybrid(), - config.getMaxNumberOfPartitions(), - config.isPartitionCountRoundUpEnabled(), - config.getPartitionCountRoundUpSize())); - LOGGER.info( - "Enforcing default hybrid partition count:{} for a new hybrid store:{}.", - setStore.getPartitionNum(), - storeName); - updatedConfigsList.add(PARTITION_COUNT); - } + /** + * Fabrics filter is not a store config, so we don't need to add it into {@link UpdateStore#updatedConfigsList} + */ + setStore.regionsFilter = params.getRegionsFilter().orElse(null); /** * By default, parent controllers will not try to replicate the unchanged store configs to child controllers; @@ -2747,124 +2626,30 @@ && getVeniceHelixAdmin().isHybrid(setStore.getHybridStoreConfig()) && setStore.g */ setStore.replicateAllConfigs = replicateAllConfigs; if (!replicateAllConfigs) { - if (updatedConfigsList.isEmpty()) { - String errMsg = - "UpdateStore command failed for store " + storeName + ". The command didn't change any specific" - + " store config and didn't specify \"--replicate-all-configs\" flag."; - LOGGER.error(errMsg); - throw new VeniceException(errMsg); - } setStore.updatedConfigsList = new ArrayList<>(updatedConfigsList); } else { setStore.updatedConfigsList = Collections.emptyList(); } - final boolean readComputeJustEnabled = - readComputationEnabled.orElse(false) && !currStore.isReadComputationEnabled(); - boolean needToGenerateSupersetSchema = - !currStore.isSystemStore() && (readComputeJustEnabled || partialUpdateJustEnabled); - if (needToGenerateSupersetSchema) { - // dry run to make sure superset schema generation can work - getSupersetSchemaGenerator(clusterName) - .generateSupersetSchemaFromSchemas(getValueSchemas(clusterName, storeName)); - } - AdminOperation message = new AdminOperation(); message.operationType = AdminMessageType.UPDATE_STORE.getValue(); message.payloadUnion = setStore; sendAdminMessageAndWaitForConsumed(clusterName, storeName, message); - - if (needToGenerateSupersetSchema) { - addSupersetSchemaForStore(clusterName, storeName, currStore.isActiveActiveReplicationEnabled()); - } - if (partialUpdateJustEnabled) { - LOGGER.info("Enabling partial update for the first time on store: {} in cluster: {}", storeName, clusterName); - addUpdateSchemaForStore(this, clusterName, storeName, false); - } - - /** - * If active-active replication is getting enabled for the store, generate and register the Replication metadata schema - * for all existing value schemas. - */ - final boolean activeActiveReplicationJustEnabled = - activeActiveReplicationEnabled.orElse(false) && !currStore.isActiveActiveReplicationEnabled(); - if (activeActiveReplicationJustEnabled) { - updateReplicationMetadataSchemaForAllValueSchema(clusterName, storeName); - } + UpdateStoreUtils.handlePostUpdateActions(this, clusterName, storeName); } finally { releaseAdminMessageLock(clusterName, storeName); } } - private Map validateAndDecorateStoreViewConfigs(Map stringMap, Store store) { - Map configs = StoreViewUtils.convertStringMapViewToViewConfigMap(stringMap); - Map validatedConfigs = new HashMap<>(); - for (Map.Entry viewConfigEntry: configs.entrySet()) { - ViewConfig validatedViewConfig = - validateAndDecorateStoreViewConfig(store, viewConfigEntry.getValue(), viewConfigEntry.getKey()); - validatedConfigs.put(viewConfigEntry.getKey(), validatedViewConfig); - } - return validatedConfigs; - } - - private ViewConfig validateAndDecorateStoreViewConfig(Store store, ViewConfig viewConfig, String viewName) { - // TODO: Pass a proper properties object here. Today this isn't used in this context - if (viewConfig.getViewClassName().equals(MaterializedView.class.getCanonicalName())) { - if (viewName.contains(VERSION_SEPARATOR)) { - throw new VeniceException( - String.format("Materialized View name cannot contain version separator: %s", VERSION_SEPARATOR)); - } - Map viewParams = viewConfig.getViewParameters(); - viewParams.put(ViewParameterKeys.MATERIALIZED_VIEW_NAME.name(), viewName); - if (!viewParams.containsKey(ViewParameterKeys.MATERIALIZED_VIEW_PARTITIONER.name())) { - viewParams.put( - ViewParameterKeys.MATERIALIZED_VIEW_PARTITIONER.name(), - store.getPartitionerConfig().getPartitionerClass()); - if (!store.getPartitionerConfig().getPartitionerParams().isEmpty()) { - try { - viewParams.put( - ViewParameterKeys.MATERIALIZED_VIEW_PARTITIONER_PARAMS.name(), - ObjectMapperFactory.getInstance() - .writeValueAsString(store.getPartitionerConfig().getPartitionerParams())); - } catch (JsonProcessingException e) { - throw new VeniceException("Failed to convert store partitioner params to string", e); - } - } - } - if (!viewParams.containsKey(ViewParameterKeys.MATERIALIZED_VIEW_PARTITION_COUNT.name())) { - viewParams.put( - ViewParameterKeys.MATERIALIZED_VIEW_PARTITION_COUNT.name(), - Integer.toString(store.getPartitionCount())); - } - viewConfig.setViewParameters(viewParams); - } - VeniceView view = - ViewUtils.getVeniceView(viewConfig.getViewClassName(), new Properties(), store, viewConfig.getViewParameters()); - view.validateConfigs(); - return viewConfig; - } - - private SupersetSchemaGenerator getSupersetSchemaGenerator(String clusterName) { - if (externalSupersetSchemaGenerator.isPresent() && getMultiClusterConfigs().getControllerConfig(clusterName) - .isParentExternalSupersetSchemaGenerationEnabled()) { + @Override + public SupersetSchemaGenerator getSupersetSchemaGenerator(String clusterName) { + if (externalSupersetSchemaGenerator.isPresent() + && getMultiClusterConfigs().getControllerConfig(clusterName).isExternalSupersetSchemaGenerationEnabled()) { return externalSupersetSchemaGenerator.get(); } return defaultSupersetSchemaGenerator; } - private void addSupersetSchemaForStore(String clusterName, String storeName, boolean activeActiveReplicationEnabled) { - // Generate a superset schema and add it. - SchemaEntry supersetSchemaEntry = getSupersetSchemaGenerator(clusterName) - .generateSupersetSchemaFromSchemas(getValueSchemas(clusterName, storeName)); - final Schema supersetSchema = supersetSchemaEntry.getSchema(); - final int supersetSchemaID = supersetSchemaEntry.getId(); - addValueSchemaEntry(clusterName, storeName, supersetSchema.toString(), supersetSchemaID, true); - - if (activeActiveReplicationEnabled) { - updateReplicationMetadataSchema(clusterName, storeName, supersetSchema, supersetSchemaID); - } - } - /** * @see VeniceHelixAdmin#updateClusterConfig(String, UpdateClusterConfigQueryParams) */ @@ -2873,28 +2658,6 @@ public void updateClusterConfig(String clusterName, UpdateClusterConfigQueryPara getVeniceHelixAdmin().updateClusterConfig(clusterName, params); } - private void validateActiveActiveReplicationEnableConfigs( - Optional activeActiveReplicationEnabledOptional, - Optional nativeReplicationEnabledOptional, - Store store) { - final boolean activeActiveReplicationEnabled = activeActiveReplicationEnabledOptional.orElse(false); - if (!activeActiveReplicationEnabled) { - return; - } - - final boolean nativeReplicationEnabled = nativeReplicationEnabledOptional.isPresent() - ? nativeReplicationEnabledOptional.get() - : store.isNativeReplicationEnabled(); - - if (!nativeReplicationEnabled) { - throw new VeniceHttpException( - HttpStatus.SC_BAD_REQUEST, - "Active/Active Replication cannot be enabled for store " + store.getName() - + " since Native Replication is not enabled on it.", - ErrorType.INVALID_CONFIG); - } - } - /** * @see VeniceHelixAdmin#getStorageEngineOverheadRatio(String) */ @@ -2986,102 +2749,11 @@ public SchemaEntry addValueSchema( } } - private SchemaEntry addValueAndSupersetSchemaEntries( - String clusterName, - String storeName, - SchemaEntry newValueSchemaEntry, - SchemaEntry newSupersetSchemaEntry, - final boolean isWriteComputationEnabled) { - validateNewSupersetAndValueSchemaEntries(storeName, clusterName, newValueSchemaEntry, newSupersetSchemaEntry); - LOGGER.info( - "Adding value schema {} and superset schema {} to store: {} in cluster: {}", - newValueSchemaEntry, - newSupersetSchemaEntry, - storeName, - clusterName); - - SupersetSchemaCreation supersetSchemaCreation = - (SupersetSchemaCreation) AdminMessageType.SUPERSET_SCHEMA_CREATION.getNewInstance(); - supersetSchemaCreation.clusterName = clusterName; - supersetSchemaCreation.storeName = storeName; - SchemaMeta valueSchemaMeta = new SchemaMeta(); - valueSchemaMeta.definition = newValueSchemaEntry.getSchemaStr(); - valueSchemaMeta.schemaType = SchemaType.AVRO_1_4.getValue(); - supersetSchemaCreation.valueSchema = valueSchemaMeta; - supersetSchemaCreation.valueSchemaId = newValueSchemaEntry.getId(); - - SchemaMeta supersetSchemaMeta = new SchemaMeta(); - supersetSchemaMeta.definition = newSupersetSchemaEntry.getSchemaStr(); - supersetSchemaMeta.schemaType = SchemaType.AVRO_1_4.getValue(); - supersetSchemaCreation.supersetSchema = supersetSchemaMeta; - supersetSchemaCreation.supersetSchemaId = newSupersetSchemaEntry.getId(); - - AdminOperation message = new AdminOperation(); - message.operationType = AdminMessageType.SUPERSET_SCHEMA_CREATION.getValue(); - message.payloadUnion = supersetSchemaCreation; - - sendAdminMessageAndWaitForConsumed(clusterName, storeName, message); - // Need to add RMD schemas for both new value schema and new superset schema. - updateReplicationMetadataSchema( - clusterName, - storeName, - newValueSchemaEntry.getSchema(), - newValueSchemaEntry.getId()); - updateReplicationMetadataSchema( - clusterName, - storeName, - newSupersetSchemaEntry.getSchema(), - newSupersetSchemaEntry.getId()); - if (isWriteComputationEnabled) { - Schema newValueWriteComputeSchema = - writeComputeSchemaConverter.convertFromValueRecordSchema(newValueSchemaEntry.getSchema()); - Schema newSuperSetWriteComputeSchema = - writeComputeSchemaConverter.convertFromValueRecordSchema(newSupersetSchemaEntry.getSchema()); - addDerivedSchema(clusterName, storeName, newValueSchemaEntry.getId(), newValueWriteComputeSchema.toString()); - addDerivedSchema( - clusterName, - storeName, - newSupersetSchemaEntry.getId(), - newSuperSetWriteComputeSchema.toString()); - } - updateStore( - clusterName, - storeName, - new UpdateStoreQueryParams().setLatestSupersetSchemaId(newSupersetSchemaEntry.getId())); - return newValueSchemaEntry; - } - - private void validateNewSupersetAndValueSchemaEntries( - String storeName, - String clusterName, - SchemaEntry newValueSchemaEntry, - SchemaEntry newSupersetSchemaEntry) { - if (newValueSchemaEntry.getId() == newSupersetSchemaEntry.getId()) { - throw new IllegalArgumentException( - String.format( - "Superset schema ID and value schema ID are expected to be different for store %s in cluster %s. " - + "Got ID: %d", - storeName, - clusterName, - newValueSchemaEntry.getId())); - } - if (AvroSchemaUtils - .compareSchemaIgnoreFieldOrder(newValueSchemaEntry.getSchema(), newSupersetSchemaEntry.getSchema())) { - throw new IllegalArgumentException( - String.format( - "Superset and value schemas are expected to be different for store %s in cluster %s. Got schema: %s", - storeName, - clusterName, - newValueSchemaEntry.getSchema())); - } - } - private SchemaEntry addValueSchemaEntry( String clusterName, String storeName, String valueSchemaStr, - final int newValueSchemaId, - final boolean doUpdateSupersetSchemaID) { + final int newValueSchemaId) { LOGGER.info("Adding value schema: {} to store: {} in cluster: {}", valueSchemaStr, storeName, clusterName); ValueSchemaCreation valueSchemaCreation = @@ -3107,25 +2779,71 @@ private SchemaEntry addValueSchemaEntry( + actualValueSchemaId); } - if (doUpdateSupersetSchemaID) { - updateStore(clusterName, storeName, new UpdateStoreQueryParams().setLatestSupersetSchemaId(newValueSchemaId)); - } - return new SchemaEntry(actualValueSchemaId, valueSchemaStr); } - /** - * Unsupported operation in the parent controller. - */ @Override - public SchemaEntry addSupersetSchema( + public void addSupersetSchema( String clusterName, String storeName, String valueSchemaStr, int valueSchemaId, String supersetSchemaStr, int supersetSchemaId) { - throw new VeniceUnsupportedOperationException("addSupersetSchema"); + acquireAdminMessageLock(clusterName, storeName); + try { + if (supersetSchemaId == SchemaData.INVALID_VALUE_SCHEMA_ID) { + throw new VeniceException("Invalid superset schema id: " + supersetSchemaId); + } + + ReadWriteSchemaRepository schemaRepository = getHelixVeniceClusterResources(clusterName).getSchemaRepository(); + final SchemaEntry existingSupersetSchemaEntry = schemaRepository.getValueSchema(storeName, supersetSchemaId); + if (existingSupersetSchemaEntry != null) { + final Schema newSupersetSchema = AvroSchemaParseUtils.parseSchemaFromJSONStrictValidation(supersetSchemaStr); + if (!AvroSchemaUtils + .compareSchemaIgnoreFieldOrder(existingSupersetSchemaEntry.getSchema(), newSupersetSchema)) { + throw new VeniceException( + "Existing schema with id " + existingSupersetSchemaEntry.getId() + " does not match with new schema " + + supersetSchemaStr); + } + } + + if (valueSchemaId == SchemaData.INVALID_VALUE_SCHEMA_ID) { + LOGGER.info( + "Adding superset schema {} with id {} to store: {} in cluster: {}", + supersetSchemaStr, + supersetSchemaId, + storeName, + clusterName); + valueSchemaStr = ""; + } else if (StringUtils.isEmpty(valueSchemaStr)) { + throw new VeniceException("Invalid value schema string: " + valueSchemaStr); + } + + SupersetSchemaCreation supersetSchemaCreation = + (SupersetSchemaCreation) AdminMessageType.SUPERSET_SCHEMA_CREATION.getNewInstance(); + supersetSchemaCreation.clusterName = clusterName; + supersetSchemaCreation.storeName = storeName; + SchemaMeta valueSchemaMeta = new SchemaMeta(); + valueSchemaMeta.definition = valueSchemaStr; + valueSchemaMeta.schemaType = SchemaType.AVRO_1_4.getValue(); + supersetSchemaCreation.valueSchema = valueSchemaMeta; + supersetSchemaCreation.valueSchemaId = valueSchemaId; + + SchemaMeta supersetSchemaMeta = new SchemaMeta(); + supersetSchemaMeta.definition = supersetSchemaStr; + supersetSchemaMeta.schemaType = SchemaType.AVRO_1_4.getValue(); + supersetSchemaCreation.supersetSchema = supersetSchemaMeta; + supersetSchemaCreation.supersetSchemaId = supersetSchemaId; + + AdminOperation message = new AdminOperation(); + message.operationType = AdminMessageType.SUPERSET_SCHEMA_CREATION.getValue(); + message.payloadUnion = supersetSchemaCreation; + + sendAdminMessageAndWaitForConsumed(clusterName, storeName, message); + } finally { + releaseAdminMessageLock(clusterName, storeName); + } } @Override @@ -3137,78 +2855,14 @@ public SchemaEntry addValueSchema( DirectionalSchemaCompatibilityType expectedCompatibilityType) { acquireAdminMessageLock(clusterName, storeName); try { - Schema newValueSchema = AvroSchemaParseUtils.parseSchemaFromJSONStrictValidation(newValueSchemaStr); - - final Store store = getVeniceHelixAdmin().getStore(clusterName, storeName); - Schema existingValueSchema = getVeniceHelixAdmin().getSupersetOrLatestValueSchema(clusterName, store); - - final boolean doUpdateSupersetSchemaID; - if (existingValueSchema != null && (store.isReadComputationEnabled() || store.isWriteComputationEnabled())) { - SupersetSchemaGenerator supersetSchemaGenerator = getSupersetSchemaGenerator(clusterName); - Schema newSuperSetSchema = supersetSchemaGenerator.generateSupersetSchema(existingValueSchema, newValueSchema); - String newSuperSetSchemaStr = newSuperSetSchema.toString(); - if (supersetSchemaGenerator.compareSchema(newSuperSetSchema, newValueSchema)) { - doUpdateSupersetSchemaID = true; - - } else if (supersetSchemaGenerator.compareSchema(newSuperSetSchema, existingValueSchema)) { - doUpdateSupersetSchemaID = false; - - } else if (store.isSystemStore()) { - /** - * Do not register superset schema for system store for now. Because some system stores specify the schema ID - * explicitly, which may conflict with the superset schema generated internally, the new value schema registration - * could fail. - * - * TODO: Design a long-term plan. - */ - doUpdateSupersetSchemaID = false; - - } else { - // Register superset schema only if it does not match with existing or new schema. - - // validate compatibility of the new superset schema - getVeniceHelixAdmin().checkPreConditionForAddValueSchemaAndGetNewSchemaId( - clusterName, - storeName, - newSuperSetSchemaStr, - expectedCompatibilityType); - // Check if the superset schema already exists or not. If exists use the same ID, else bump the value ID by - // one. - int supersetSchemaId = getVeniceHelixAdmin().getValueSchemaIdIgnoreFieldOrder( - clusterName, - storeName, - newSuperSetSchemaStr, - (s1, s2) -> supersetSchemaGenerator.compareSchema(s1, s2) ? 0 : 1); - if (supersetSchemaId == SchemaData.INVALID_VALUE_SCHEMA_ID) { - supersetSchemaId = schemaId + 1; - } - return addValueAndSupersetSchemaEntries( - clusterName, - storeName, - new SchemaEntry(schemaId, newValueSchema), - new SchemaEntry(supersetSchemaId, newSuperSetSchema), - store.isWriteComputationEnabled()); - } - } else { - doUpdateSupersetSchemaID = false; + if (schemaId == SchemaData.DUPLICATE_VALUE_SCHEMA_CODE) { + return new SchemaEntry(getValueSchemaId(clusterName, storeName, newValueSchemaStr), newValueSchemaStr); } - SchemaEntry addedSchemaEntry = - addValueSchemaEntry(clusterName, storeName, newValueSchemaStr, schemaId, doUpdateSupersetSchemaID); - /** - * if active-active replication is enabled for the store then generate and register the new Replication metadata schema - * for this newly added value schema. - */ - if (store.isActiveActiveReplicationEnabled()) { - Schema latestValueSchema = getVeniceHelixAdmin().getSupersetOrLatestValueSchema(clusterName, store); - final int valueSchemaId = getValueSchemaId(clusterName, storeName, latestValueSchema.toString()); - updateReplicationMetadataSchema(clusterName, storeName, latestValueSchema, valueSchemaId); - } - if (store.isWriteComputationEnabled()) { - Schema newWriteComputeSchema = - writeComputeSchemaConverter.convertFromValueRecordSchema(addedSchemaEntry.getSchema()); - addDerivedSchema(clusterName, storeName, addedSchemaEntry.getId(), newWriteComputeSchema.toString()); - } + SchemaEntry addedSchemaEntry = addValueSchemaEntry(clusterName, storeName, newValueSchemaStr, schemaId); + + // Now register all inferred schemas for the store. + PrimaryControllerConfigUpdateUtils.registerInferredSchemas(this, clusterName, storeName); return addedSchemaEntry; } finally { @@ -3334,8 +2988,8 @@ public RmdSchemaEntry addReplicationMetadataSchema( try { RmdSchemaEntry rmdSchemaEntry = new RmdSchemaEntry(valueSchemaId, replicationMetadataVersionId, replicationMetadataSchemaStr); - final boolean replicationMetadataSchemaAlreadyPresent = getVeniceHelixAdmin() - .checkIfMetadataSchemaAlreadyPresent(clusterName, storeName, valueSchemaId, rmdSchemaEntry); + final boolean replicationMetadataSchemaAlreadyPresent = + getVeniceHelixAdmin().checkIfMetadataSchemaAlreadyPresent(clusterName, storeName, rmdSchemaEntry); if (replicationMetadataSchemaAlreadyPresent) { LOGGER.info( "Replication metadata schema already exists for store: {} in cluster: {} metadataSchema: {} " @@ -3440,36 +3094,6 @@ public void validateAndMaybeRetrySystemStoreAutoCreation( throw new VeniceUnsupportedOperationException("validateAndMaybeRetrySystemStoreAutoCreation"); } - private void updateReplicationMetadataSchemaForAllValueSchema(String clusterName, String storeName) { - final Collection valueSchemas = getValueSchemas(clusterName, storeName); - for (SchemaEntry valueSchemaEntry: valueSchemas) { - updateReplicationMetadataSchema(clusterName, storeName, valueSchemaEntry.getSchema(), valueSchemaEntry.getId()); - } - } - - private void updateReplicationMetadataSchema( - String clusterName, - String storeName, - Schema valueSchema, - int valueSchemaId) { - final int rmdVersionId = getRmdVersionID(storeName, clusterName); - final boolean valueSchemaAlreadyHasRmdSchema = getVeniceHelixAdmin() - .checkIfValueSchemaAlreadyHasRmdSchema(clusterName, storeName, valueSchemaId, rmdVersionId); - if (valueSchemaAlreadyHasRmdSchema) { - LOGGER.info( - "Store {} in cluster {} already has a replication metadata schema for its value schema with ID {} and " - + "replication metadata version ID {}. So skip updating this value schema's RMD schema.", - storeName, - clusterName, - valueSchemaId, - rmdVersionId); - return; - } - String replicationMetadataSchemaStr = - RmdSchemaGenerator.generateMetadataSchema(valueSchema, rmdVersionId).toString(); - addReplicationMetadataSchema(clusterName, storeName, valueSchemaId, rmdVersionId, replicationMetadataSchemaStr); - } - /** * Unsupported operation in the parent controller. */ @@ -3916,7 +3540,7 @@ public NodeRemovableResult isInstanceRemovable( */ @Override public Pair> nodeReplicaReadiness(String cluster, String helixNodeId) { - throw new VeniceUnsupportedOperationException("nodeReplicaReadiness is not supported"); + throw new VeniceUnsupportedOperationException("nodeReplicaReadiness"); } private StoreInfo getStoreInChildRegion(String regionName, String clusterName, String storeName) { @@ -4559,36 +4183,6 @@ public StoreMetaValue getMetaStoreValue(StoreMetaKey metaKey, String storeName) throw new VeniceException("Not implemented in parent"); } - /** - * Check if etled proxy account is set before enabling any ETL and return a {@link ETLStoreConfigRecord} - */ - private ETLStoreConfigRecord mergeNewSettingIntoOldETLStoreConfig( - Store store, - Optional regularVersionETLEnabled, - Optional futureVersionETLEnabled, - Optional etledUserProxyAccount) { - ETLStoreConfig etlStoreConfig = store.getEtlStoreConfig(); - /** - * If etl enabled is true (either current version or future version), then account name must be specified in the command - * and it's not empty, or the store metadata already contains a non-empty account name. - */ - if (regularVersionETLEnabled.orElse(false) || futureVersionETLEnabled.orElse(false)) { - if ((!etledUserProxyAccount.isPresent() || etledUserProxyAccount.get().isEmpty()) - && (etlStoreConfig.getEtledUserProxyAccount() == null - || etlStoreConfig.getEtledUserProxyAccount().isEmpty())) { - throw new VeniceException("Cannot enable ETL for this store because etled user proxy account is not set"); - } - } - ETLStoreConfigRecord etlStoreConfigRecord = new ETLStoreConfigRecord(); - etlStoreConfigRecord.etledUserProxyAccount = - etledUserProxyAccount.orElse(etlStoreConfig.getEtledUserProxyAccount()); - etlStoreConfigRecord.regularVersionETLEnabled = - regularVersionETLEnabled.orElse(etlStoreConfig.isRegularVersionETLEnabled()); - etlStoreConfigRecord.futureVersionETLEnabled = - futureVersionETLEnabled.orElse(etlStoreConfig.isFutureVersionETLEnabled()); - return etlStoreConfigRecord; - } - /** * This parses the input accessPermission string to create ACL's and provision them using the authorizerService interface. * @@ -4739,7 +4333,7 @@ public void updateAclForStore(String clusterName, String storeName, String acces try (AutoCloseableLock ignore = resources.getClusterLockManager().createStoreWriteLock(storeName)) { LOGGER.info("ACLProvisioning: UpdateAcl for store: {} in cluster: {}", storeName, clusterName); if (!authorizerService.isPresent()) { - throw new VeniceUnsupportedOperationException("updateAclForStore is not supported yet!"); + throw new VeniceUnsupportedOperationException("updateAclForStore"); } Store store = getVeniceHelixAdmin().checkPreConditionForAclOp(clusterName, storeName); provisionAclsForStore( @@ -4759,7 +4353,7 @@ public void updateSystemStoreAclForStore( HelixVeniceClusterResources resources = getVeniceHelixAdmin().getHelixVeniceClusterResources(clusterName); try (AutoCloseableLock ignore = resources.getClusterLockManager().createStoreWriteLock(regularStoreName)) { if (!authorizerService.isPresent()) { - throw new VeniceUnsupportedOperationException("updateAclForStore is not supported yet!"); + throw new VeniceUnsupportedOperationException("updateAclForStore"); } getVeniceHelixAdmin().checkPreConditionForAclOp(clusterName, regularStoreName); authorizerService.get().setAcls(systemStoreAclBinding); @@ -4775,7 +4369,7 @@ public String getAclForStore(String clusterName, String storeName) { try (AutoCloseableLock ignore = resources.getClusterLockManager().createStoreReadLock(storeName)) { LOGGER.info("ACLProvisioning: GetAcl for store: {} in cluster: {}", storeName, clusterName); if (!authorizerService.isPresent()) { - throw new VeniceUnsupportedOperationException("getAclForStore is not supported yet!"); + throw new VeniceUnsupportedOperationException("getAclForStore"); } getVeniceHelixAdmin().checkPreConditionForAclOp(clusterName, storeName); String accessPerms = fetchAclsForStore(storeName); @@ -4792,7 +4386,7 @@ public void deleteAclForStore(String clusterName, String storeName) { try (AutoCloseableLock ignore = resources.getClusterLockManager().createStoreWriteLock(storeName)) { LOGGER.info("ACLProvisioning: DeleteAcl for store: {} in cluster: {}", storeName, clusterName); if (!authorizerService.isPresent()) { - throw new VeniceUnsupportedOperationException("deleteAclForStore is not supported yet!"); + throw new VeniceUnsupportedOperationException("deleteAclForStore"); } Store store = getVeniceHelixAdmin().checkPreConditionForAclOp(clusterName, storeName); if (!store.isMigrating()) { @@ -4957,6 +4551,14 @@ public boolean isParent() { return getVeniceHelixAdmin().isParent(); } + /** + * @see Admin#isPrimary() + */ + @Override + public boolean isPrimary() { + return getVeniceHelixAdmin().isPrimary(); + } + /** * @see Admin#getParentControllerRegionState() */ @@ -5059,13 +4661,6 @@ public VeniceHelixAdmin getVeniceHelixAdmin() { return veniceHelixAdmin; } - private Function addToUpdatedConfigList(List updatedConfigList, String config) { - return (configValue) -> { - updatedConfigList.add(config); - return configValue; - }; - } - /** * @see Admin#getBackupVersionDefaultRetentionMs() */ @@ -5332,7 +4927,8 @@ LingeringStoreVersionChecker getLingeringStoreVersionChecker() { return lingeringStoreVersionChecker; } - VeniceControllerMultiClusterConfig getMultiClusterConfigs() { + @Override + public VeniceControllerMultiClusterConfig getMultiClusterConfigs() { return multiClusterConfigs; } @@ -5384,6 +4980,13 @@ public void createStoragePersona( Set owners) { getVeniceHelixAdmin().checkControllerLeadershipFor(clusterName); + StoragePersonaRepository repository = + getVeniceHelixAdmin().getHelixVeniceClusterResources(clusterName).getStoragePersonaRepository(); + if (repository.hasPersona(name)) { + throw new VeniceException("Persona with name " + name + " already exists"); + } + repository.validatePersona(name, quotaNumber, storesToEnforce, owners); + CreateStoragePersona createStoragePersona = (CreateStoragePersona) AdminMessageType.CREATE_STORAGE_PERSONA.getNewInstance(); createStoragePersona.setClusterName(clusterName); @@ -5396,12 +4999,6 @@ public void createStoragePersona( message.operationType = AdminMessageType.CREATE_STORAGE_PERSONA.getValue(); message.payloadUnion = createStoragePersona; - StoragePersonaRepository repository = - getVeniceHelixAdmin().getHelixVeniceClusterResources(clusterName).getStoragePersonaRepository(); - if (repository.hasPersona(name)) { - throw new VeniceException("Persona with name " + name + " already exists"); - } - repository.validatePersona(name, quotaNumber, storesToEnforce, owners); sendAdminMessageAndWaitForConsumed(clusterName, null, message); } @@ -5438,6 +5035,11 @@ public void deleteStoragePersona(String clusterName, String name) { @Override public void updateStoragePersona(String clusterName, String name, UpdateStoragePersonaQueryParams queryParams) { getVeniceHelixAdmin().checkControllerLeadershipFor(clusterName); + + StoragePersonaRepository repository = + getVeniceHelixAdmin().getHelixVeniceClusterResources(clusterName).getStoragePersonaRepository(); + repository.validatePersonaUpdate(name, queryParams); + UpdateStoragePersona updateStoragePersona = (UpdateStoragePersona) AdminMessageType.UPDATE_STORAGE_PERSONA.getNewInstance(); updateStoragePersona.setClusterName(clusterName); @@ -5449,9 +5051,6 @@ public void updateStoragePersona(String clusterName, String name, UpdateStorageP message.operationType = AdminMessageType.UPDATE_STORAGE_PERSONA.getValue(); message.payloadUnion = updateStoragePersona; - StoragePersonaRepository repository = - getVeniceHelixAdmin().getHelixVeniceClusterResources(clusterName).getStoragePersonaRepository(); - repository.validatePersonaUpdate(name, queryParams); sendAdminMessageAndWaitForConsumed(clusterName, null, message); } diff --git a/services/venice-controller/src/main/java/com/linkedin/venice/controller/init/SystemSchemaInitializationRoutine.java b/services/venice-controller/src/main/java/com/linkedin/venice/controller/init/SystemSchemaInitializationRoutine.java index f3a179c730..de50543b28 100644 --- a/services/venice-controller/src/main/java/com/linkedin/venice/controller/init/SystemSchemaInitializationRoutine.java +++ b/services/venice-controller/src/main/java/com/linkedin/venice/controller/init/SystemSchemaInitializationRoutine.java @@ -5,14 +5,13 @@ import com.linkedin.venice.VeniceConstants; import com.linkedin.venice.controller.VeniceControllerMultiClusterConfig; import com.linkedin.venice.controller.VeniceHelixAdmin; +import com.linkedin.venice.controller.util.PrimaryControllerConfigUpdateUtils; import com.linkedin.venice.controllerapi.UpdateStoreQueryParams; import com.linkedin.venice.exceptions.VeniceException; import com.linkedin.venice.exceptions.VeniceNoStoreException; import com.linkedin.venice.meta.Store; -import com.linkedin.venice.schema.GeneratedSchemaID; import com.linkedin.venice.schema.SchemaEntry; import com.linkedin.venice.schema.avro.DirectionalSchemaCompatibilityType; -import com.linkedin.venice.schema.writecompute.WriteComputeSchemaConverter; import com.linkedin.venice.serialization.avro.AvroProtocolDefinition; import com.linkedin.venice.utils.Pair; import com.linkedin.venice.utils.Utils; @@ -34,13 +33,12 @@ public class SystemSchemaInitializationRoutine implements ClusterLeaderInitializ private final VeniceHelixAdmin admin; private final Optional keySchema; private final Optional storeMetadataUpdate; - private final boolean autoRegisterDerivedComputeSchema; public SystemSchemaInitializationRoutine( AvroProtocolDefinition protocolDefinition, VeniceControllerMultiClusterConfig multiClusterConfigs, VeniceHelixAdmin admin) { - this(protocolDefinition, multiClusterConfigs, admin, Optional.empty(), Optional.empty(), false); + this(protocolDefinition, multiClusterConfigs, admin, Optional.empty(), Optional.empty()); } public SystemSchemaInitializationRoutine( @@ -48,14 +46,12 @@ public SystemSchemaInitializationRoutine( VeniceControllerMultiClusterConfig multiClusterConfigs, VeniceHelixAdmin admin, Optional keySchema, - Optional storeMetadataUpdate, - boolean autoRegisterDerivedComputeSchema) { + Optional storeMetadataUpdate) { this.protocolDefinition = protocolDefinition; this.multiClusterConfigs = multiClusterConfigs; this.admin = admin; this.keySchema = keySchema; this.storeMetadataUpdate = storeMetadataUpdate; - this.autoRegisterDerivedComputeSchema = autoRegisterDerivedComputeSchema; } /** @@ -194,31 +190,13 @@ public void execute(String clusterToInit) { schemaInLocalResources.toString(true)); } } - if (autoRegisterDerivedComputeSchema) { - // Check and register Write Compute schema - String writeComputeSchema = - WriteComputeSchemaConverter.getInstance().convertFromValueRecordSchema(schemaInLocalResources).toString(); - GeneratedSchemaID derivedSchemaInfo = - admin.getDerivedSchemaId(clusterToInit, systemStoreName, writeComputeSchema); - if (!derivedSchemaInfo.isValid()) { - /** - * The derived schema doesn't exist right now, try to register it. - */ - try { - admin.addDerivedSchema(clusterToInit, systemStoreName, valueSchemaVersion, writeComputeSchema); - } catch (Exception e) { - LOGGER.error( - "Caught Exception when attempting to register the derived compute schema for '{}' schema version '{}'. Will bubble up.", - protocolDefinition.name(), - valueSchemaVersion, - e); - throw e; - } - LOGGER.info( - "Added the derived compute schema for the new schema v{} to system store '{}'.", - valueSchemaVersion, - systemStoreName); - } + + boolean writeComputationEnabled = + storeMetadataUpdate.map(params -> params.getWriteComputationEnabled().orElse(false)).orElse(false); + + if (writeComputationEnabled) { + // Register partial update schemas (aka derived schemas) + PrimaryControllerConfigUpdateUtils.addUpdateSchemaForStore(admin, clusterToInit, systemStoreName, false); } } } diff --git a/services/venice-controller/src/main/java/com/linkedin/venice/controller/init/SystemStoreInitializationHelper.java b/services/venice-controller/src/main/java/com/linkedin/venice/controller/init/SystemStoreInitializationHelper.java index 60b2589ef0..24ded137d3 100644 --- a/services/venice-controller/src/main/java/com/linkedin/venice/controller/init/SystemStoreInitializationHelper.java +++ b/services/venice-controller/src/main/java/com/linkedin/venice/controller/init/SystemStoreInitializationHelper.java @@ -3,6 +3,7 @@ import com.linkedin.venice.VeniceConstants; import com.linkedin.venice.controller.Admin; import com.linkedin.venice.controller.VeniceControllerMultiClusterConfig; +import com.linkedin.venice.controller.util.PrimaryControllerConfigUpdateUtils; import com.linkedin.venice.controllerapi.UpdateStoreQueryParams; import com.linkedin.venice.exceptions.VeniceException; import com.linkedin.venice.meta.Store; @@ -162,6 +163,11 @@ public static void setupSystemStore( } if (updateStoreQueryParams != null && updateStoreCheckSupplier.apply(store)) { + if (store.getPartitionCount() == 0 && !updateStoreQueryParams.getPartitionCount().isPresent()) { + updateStoreQueryParams + .setPartitionCount(multiClusterConfigs.getControllerConfig(clusterName).getMinNumberOfPartitions()); + } + admin.updateStore(clusterName, systemStoreName, updateStoreQueryParams); store = RetryUtils.executeWithMaxAttempt(() -> { @@ -173,16 +179,31 @@ public static void setupSystemStore( throw new VeniceException("Unable to update store " + systemStoreName); } + if (internalStore.getPartitionCount() == 0) { + throw new VeniceException("Partition count is still 0 after updating store " + systemStoreName); + } + return internalStore; }, 5, delayBetweenStoreUpdateRetries, Collections.singletonList(VeniceException.class)); LOGGER.info("Updated internal store " + systemStoreName + " in cluster " + clusterName); } + boolean activeActiveReplicationEnabled = false; + if (updateStoreQueryParams != null) { + activeActiveReplicationEnabled = updateStoreQueryParams.getActiveActiveReplicationEnabled().orElse(false); + } + + if (activeActiveReplicationEnabled) { + // Now that store has enabled A/A and all value schemas are registered, register RMD schemas + PrimaryControllerConfigUpdateUtils + .updateReplicationMetadataSchemaForAllValueSchema(admin, clusterName, systemStoreName); + } + long onlineVersionCount = store.getVersions().stream().filter(version -> version.getStatus() == VersionStatus.ONLINE).count(); if (onlineVersionCount == 0) { - int partitionCount = multiClusterConfigs.getControllerConfig(clusterName).getMinNumberOfPartitions(); + int partitionCount = store.getPartitionCount(); int replicationFactor = admin.getReplicationFactor(clusterName, systemStoreName); Version version = admin.incrementVersionIdempotent( clusterName, diff --git a/services/venice-controller/src/main/java/com/linkedin/venice/controller/kafka/consumer/AdminConsumptionTask.java b/services/venice-controller/src/main/java/com/linkedin/venice/controller/kafka/consumer/AdminConsumptionTask.java index 4b7299f7f2..1ee373f592 100644 --- a/services/venice-controller/src/main/java/com/linkedin/venice/controller/kafka/consumer/AdminConsumptionTask.java +++ b/services/venice-controller/src/main/java/com/linkedin/venice/controller/kafka/consumer/AdminConsumptionTask.java @@ -323,6 +323,8 @@ public void run() { while (isRunning.get()) { try { Utils.sleep(READ_CYCLE_DELAY_MS); + // We don't really need to check if the controller is the leader for the cluster here, because it is checked in + // isAdminTopicConsumptionEnabled. However, we still check it here because it helps in testing. if (!admin.isLeaderControllerFor(clusterName) || !admin.isAdminTopicConsumptionEnabled(clusterName)) { unSubscribe(); continue; diff --git a/services/venice-controller/src/main/java/com/linkedin/venice/controller/kafka/consumer/AdminExecutionTask.java b/services/venice-controller/src/main/java/com/linkedin/venice/controller/kafka/consumer/AdminExecutionTask.java index 4ffa4d1b4c..39744c4450 100644 --- a/services/venice-controller/src/main/java/com/linkedin/venice/controller/kafka/consumer/AdminExecutionTask.java +++ b/services/venice-controller/src/main/java/com/linkedin/venice/controller/kafka/consumer/AdminExecutionTask.java @@ -426,8 +426,6 @@ private void handleSetStoreCurrentVersion(SetStoreCurrentVersion message) { String storeName = message.storeName.toString(); int version = message.currentVersion; admin.setStoreCurrentVersion(clusterName, storeName, version); - - LOGGER.info("Set store: {} version to {} in cluster: {}", storeName, version, clusterName); } private void handleSetStoreOwner(SetStoreOwner message) { diff --git a/services/venice-controller/src/main/java/com/linkedin/venice/controller/supersetschema/SupersetSchemaGeneratorWithCustomProp.java b/services/venice-controller/src/main/java/com/linkedin/venice/controller/supersetschema/SupersetSchemaGeneratorWithCustomProp.java index f29fcd612c..edbe4eadc9 100644 --- a/services/venice-controller/src/main/java/com/linkedin/venice/controller/supersetschema/SupersetSchemaGeneratorWithCustomProp.java +++ b/services/venice-controller/src/main/java/com/linkedin/venice/controller/supersetschema/SupersetSchemaGeneratorWithCustomProp.java @@ -1,10 +1,13 @@ package com.linkedin.venice.controller.supersetschema; +import com.linkedin.avroutil1.compatibility.AvroCompatibilityHelper; import com.linkedin.venice.schema.AvroSchemaParseUtils; import com.linkedin.venice.schema.SchemaEntry; import com.linkedin.venice.utils.AvroSchemaUtils; import com.linkedin.venice.utils.AvroSupersetSchemaUtils; +import java.util.ArrayList; import java.util.Collection; +import java.util.List; import org.apache.avro.Schema; @@ -41,12 +44,30 @@ public SchemaEntry generateSupersetSchemaFromSchemas(Collection sch * Check whether the latest value schema contains {@link #customProp} or not. */ String customPropInLatestValueSchema = latestValueSchemaEntry.getSchema().getProp(customProp); - if (customPropInLatestValueSchema != null && supersetSchemaEntry.getSchema().getProp(customProp) == null) { + Schema existingSupersetSchema = supersetSchemaEntry.getSchema(); + if (customPropInLatestValueSchema != null + && !customPropInLatestValueSchema.equals(existingSupersetSchema.getProp(customProp))) { + List existingSupersetSchemaFields = existingSupersetSchema.getFields(); + List fieldList = new ArrayList<>(existingSupersetSchemaFields.size()); + for (Schema.Field field: existingSupersetSchemaFields) { + fieldList.add(AvroCompatibilityHelper.newField(field).build()); + } + Schema newSupersetSchema = Schema.createRecord( + existingSupersetSchema.getName(), + existingSupersetSchema.getDoc(), + existingSupersetSchema.getNamespace(), + existingSupersetSchema.isError(), + fieldList); + /** - * The 'supersetSchemaEntry' can contain a different custom prop value than the latest value schema, and - * custom prop value is not mutable. + * Custom props are not mutable, hence we need to copy all the existing props to the new schema */ - Schema newSupersetSchema = supersetSchemaEntry.clone().getSchema(); + AvroCompatibilityHelper.getAllPropNames(existingSupersetSchema).forEach(prop -> { + if (!prop.equals(customProp)) { + newSupersetSchema.addProp(prop, existingSupersetSchema.getProp(prop)); + } + }); + // Not empty, then copy it to the superset schema newSupersetSchema.addProp(customProp, customPropInLatestValueSchema); // Check whether this new schema exists or not diff --git a/services/venice-controller/src/main/java/com/linkedin/venice/controller/util/AdminUtils.java b/services/venice-controller/src/main/java/com/linkedin/venice/controller/util/AdminUtils.java new file mode 100644 index 0000000000..5546bd2ddf --- /dev/null +++ b/services/venice-controller/src/main/java/com/linkedin/venice/controller/util/AdminUtils.java @@ -0,0 +1,113 @@ +package com.linkedin.venice.controller.util; + +import com.linkedin.venice.ConfigConstants; +import com.linkedin.venice.controller.Admin; +import com.linkedin.venice.controller.VeniceControllerClusterConfig; +import com.linkedin.venice.controller.kafka.protocol.admin.HybridStoreConfigRecord; +import com.linkedin.venice.exceptions.VeniceException; +import com.linkedin.venice.meta.BufferReplayPolicy; +import com.linkedin.venice.meta.DataReplicationPolicy; +import com.linkedin.venice.meta.HybridStoreConfig; +import com.linkedin.venice.meta.HybridStoreConfigImpl; +import com.linkedin.venice.meta.Store; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + + +public class AdminUtils { + private static final Logger LOGGER = LogManager.getLogger(AdminUtils.class); + + private AdminUtils() { + } + + public static boolean isHybrid(HybridStoreConfigRecord hybridStoreConfigRecord) { + HybridStoreConfig hybridStoreConfig = null; + if (hybridStoreConfigRecord != null) { + hybridStoreConfig = new HybridStoreConfigImpl( + hybridStoreConfigRecord.rewindTimeInSeconds, + hybridStoreConfigRecord.offsetLagThresholdToGoOnline, + hybridStoreConfigRecord.producerTimestampLagThresholdToGoOnlineInSeconds, + DataReplicationPolicy.valueOf(hybridStoreConfigRecord.dataReplicationPolicy), + BufferReplayPolicy.valueOf(hybridStoreConfigRecord.bufferReplayPolicy)); + } + return isHybrid(hybridStoreConfig); + } + + /** + * A store is not hybrid in the following two scenarios: + * If hybridStoreConfig is null, it means store is not hybrid. + * If all the hybrid config values are negative, it indicates that the store is being set back to batch-only store. + */ + public static boolean isHybrid(HybridStoreConfig hybridStoreConfig) { + return hybridStoreConfig != null && hybridStoreConfig.isHybrid(); + } + + public static int getRmdVersionID(Admin admin, String storeName, String clusterName) { + final Store store = admin.getStore(clusterName, storeName); + if (store == null) { + LOGGER.warn( + "No store found in the store repository. Will get store-level RMD version ID from cluster config. " + + "Store name: {}, cluster: {}", + storeName, + clusterName); + } else if (store.getRmdVersion() == ConfigConstants.UNSPECIFIED_REPLICATION_METADATA_VERSION) { + LOGGER.info("No store-level RMD version ID found for store {} in cluster {}", storeName, clusterName); + } else { + LOGGER.info( + "Found store-level RMD version ID {} for store {} in cluster {}", + store.getRmdVersion(), + storeName, + clusterName); + return store.getRmdVersion(); + } + + final VeniceControllerClusterConfig controllerClusterConfig = + admin.getMultiClusterConfigs().getControllerConfig(clusterName); + if (controllerClusterConfig == null) { + throw new VeniceException("No controller cluster config found for cluster " + clusterName); + } + final int rmdVersionID = controllerClusterConfig.getReplicationMetadataVersion(); + LOGGER.info("Use RMD version ID {} for cluster {}", rmdVersionID, clusterName); + return rmdVersionID; + } + + /** + * Check if a store can support incremental pushes based on other configs. The following rules define when incremental + * push is allowed: + *

    + *
  1. If the system is running in single-region mode, the store must by hybrid
  2. + *
  3. If the system is running in multi-region mode,
  4. + *
      + *
    1. Hybrid + Active-Active
    2. + *
    3. Hybrid + !Active-Active + {@link DataReplicationPolicy} is {@link DataReplicationPolicy#AGGREGATE}
    4. + *
    5. Hybrid + !Active-Active + {@link DataReplicationPolicy} is {@link DataReplicationPolicy#NONE}
    6. + *
    + *
      + * @param multiRegion whether the system is running in multi-region mode + * @param hybridStoreConfig The hybrid store config after applying all updates + * @return {@code true} if incremental push is allowed, {@code false} otherwise + */ + public static boolean isIncrementalPushSupported( + boolean multiRegion, + boolean activeActiveReplicationEnabled, + HybridStoreConfig hybridStoreConfig) { + // Only hybrid stores can support incremental push + if (!AdminUtils.isHybrid(hybridStoreConfig)) { + return false; + } + + // If the system is running in multi-region mode, we need to validate the data replication policies + if (!multiRegion) { + return true; + } + + // A/A can always support incremental push + if (activeActiveReplicationEnabled) { + return true; + } + + DataReplicationPolicy dataReplicationPolicy = hybridStoreConfig.getDataReplicationPolicy(); + return dataReplicationPolicy == DataReplicationPolicy.AGGREGATE + || dataReplicationPolicy == DataReplicationPolicy.NONE; + } +} diff --git a/services/venice-controller/src/main/java/com/linkedin/venice/controller/util/ParentControllerConfigUpdateUtils.java b/services/venice-controller/src/main/java/com/linkedin/venice/controller/util/ParentControllerConfigUpdateUtils.java deleted file mode 100644 index cbd1cc9241..0000000000 --- a/services/venice-controller/src/main/java/com/linkedin/venice/controller/util/ParentControllerConfigUpdateUtils.java +++ /dev/null @@ -1,171 +0,0 @@ -package com.linkedin.venice.controller.util; - -import com.linkedin.venice.controller.VeniceControllerClusterConfig; -import com.linkedin.venice.controller.VeniceParentHelixAdmin; -import com.linkedin.venice.controller.kafka.protocol.admin.UpdateStore; -import com.linkedin.venice.exceptions.VeniceException; -import com.linkedin.venice.meta.Store; -import com.linkedin.venice.schema.SchemaEntry; -import com.linkedin.venice.schema.writecompute.WriteComputeSchemaConverter; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Comparator; -import java.util.List; -import java.util.Optional; -import org.apache.avro.Schema; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; - - -/** - * This class is a utility class for Parent Controller store update logics. - * The method here aims to take in current status and request params to determine if certain feature is updated / should - * be updated based on some customized logics. - */ -public class ParentControllerConfigUpdateUtils { - public static final Logger LOGGER = LogManager.getLogger(ParentControllerConfigUpdateUtils.class); - public static final WriteComputeSchemaConverter updateSchemaConverter = WriteComputeSchemaConverter.getInstance(); - - /** - * This method takes in current status and request and try to determine whether to change partial update config. - * The check logic is: - * Step (1): If there is explict request, we will respect the request and maybe update config if new request value is - * different from existing config value. In this step, if we are enabling partial update, we will also perform a dry - * run to validate schema. If validation fails, it will throw exception and fail the whole request. - * Step (2): If there is NO explict request and store is being converted into hybrid store, we will check the cluster - * config and store's latest A/A config to see whether we should by default enable partial update. If so, we will also - * perform a dry on to validate schema. If validation fails, it will swallow the exception and log warning message. It - * will not turn on partial update and will not fail the whole request. - */ - public static boolean checkAndMaybeApplyPartialUpdateConfig( - VeniceParentHelixAdmin parentHelixAdmin, - String clusterName, - String storeName, - Optional partialUpdateRequest, - UpdateStore setStore, - boolean storeBeingConvertedToHybrid) { - Store currentStore = parentHelixAdmin.getVeniceHelixAdmin().getStore(clusterName, storeName); - VeniceControllerClusterConfig controllerConfig = - parentHelixAdmin.getVeniceHelixAdmin().getHelixVeniceClusterResources(clusterName).getConfig(); - boolean partialUpdateConfigChanged = false; - setStore.writeComputationEnabled = currentStore.isWriteComputationEnabled(); - if (partialUpdateRequest.isPresent()) { - setStore.writeComputationEnabled = partialUpdateRequest.get(); - if (partialUpdateRequest.get() && !currentStore.isWriteComputationEnabled()) { - // Dry-run generating update schemas before sending admin messages to enable partial update because - // update schema generation may fail due to some reasons. If that happens, abort the store update process. - addUpdateSchemaForStore(parentHelixAdmin, clusterName, storeName, true); - } - // Explicit request to change partial update config has the highest priority. - return true; - } - /** - * If a store: - * (1) Is being converted to hybrid; - * (2) Is not partial update enabled for now; - * (3) Does not request to change partial update config; - * It means partial update is not enabled, and there is no explict intention to change it. In this case, we will - * check cluster default config based on the replication policy to determine whether to try to enable partial update. - */ - final boolean shouldEnablePartialUpdateBasedOnClusterConfig = - storeBeingConvertedToHybrid && (setStore.activeActiveReplicationEnabled - ? controllerConfig.isEnablePartialUpdateForHybridActiveActiveUserStores() - : controllerConfig.isEnablePartialUpdateForHybridNonActiveActiveUserStores()); - if (!currentStore.isWriteComputationEnabled() && shouldEnablePartialUpdateBasedOnClusterConfig) { - LOGGER.info("Controller will try to enable partial update based on cluster config for store: " + storeName); - /** - * When trying to turn on partial update based on cluster config, if schema generation failed, we will not fail the - * whole request, but just do NOT turn on partial update, as other config update should still be respected. - */ - try { - addUpdateSchemaForStore(parentHelixAdmin, clusterName, storeName, true); - setStore.writeComputationEnabled = true; - partialUpdateConfigChanged = true; - } catch (Exception e) { - LOGGER.warn( - "Caught exception when trying to enable partial update base on cluster config, will not enable partial update for store: " - + storeName, - e); - } - } - return partialUpdateConfigChanged; - } - - public static boolean checkAndMaybeApplyChunkingConfigChange( - VeniceParentHelixAdmin parentHelixAdmin, - String clusterName, - String storeName, - Optional chunkingRequest, - UpdateStore setStore) { - Store currentStore = parentHelixAdmin.getVeniceHelixAdmin().getStore(clusterName, storeName); - setStore.chunkingEnabled = currentStore.isChunkingEnabled(); - if (chunkingRequest.isPresent()) { - setStore.chunkingEnabled = chunkingRequest.get(); - // Explicit request to change chunking config has the highest priority. - return true; - } - // If partial update is just enabled, we will by default enable chunking, if no explict request to update chunking - // config. - if (!currentStore.isWriteComputationEnabled() && setStore.writeComputationEnabled - && !currentStore.isChunkingEnabled()) { - setStore.chunkingEnabled = true; - return true; - } - return false; - } - - public static boolean checkAndMaybeApplyRmdChunkingConfigChange( - VeniceParentHelixAdmin parentHelixAdmin, - String clusterName, - String storeName, - Optional rmdChunkingRequest, - UpdateStore setStore) { - Store currentStore = parentHelixAdmin.getVeniceHelixAdmin().getStore(clusterName, storeName); - setStore.rmdChunkingEnabled = currentStore.isRmdChunkingEnabled(); - if (rmdChunkingRequest.isPresent()) { - setStore.rmdChunkingEnabled = rmdChunkingRequest.get(); - // Explicit request to change RMD chunking config has the highest priority. - return true; - } - // If partial update is just enabled and A/A is enabled, we will by default enable RMD chunking, if no explict - // request to update RMD chunking config. - if (!currentStore.isWriteComputationEnabled() && setStore.writeComputationEnabled - && setStore.activeActiveReplicationEnabled && !currentStore.isRmdChunkingEnabled()) { - setStore.rmdChunkingEnabled = true; - return true; - } - return false; - } - - public static void addUpdateSchemaForStore( - VeniceParentHelixAdmin parentHelixAdmin, - String clusterName, - String storeName, - boolean dryRun) { - Collection valueSchemaEntries = parentHelixAdmin.getValueSchemas(clusterName, storeName); - List updateSchemaEntries = new ArrayList<>(valueSchemaEntries.size()); - int maxId = valueSchemaEntries.stream().map(SchemaEntry::getId).max(Comparator.naturalOrder()).get(); - for (SchemaEntry valueSchemaEntry: valueSchemaEntries) { - try { - Schema updateSchema = updateSchemaConverter.convertFromValueRecordSchema(valueSchemaEntry.getSchema()); - updateSchemaEntries.add(new SchemaEntry(valueSchemaEntry.getId(), updateSchema)); - } catch (Exception e) { - // Allow failure in update schema generation in all schema except the latest value schema - if (valueSchemaEntry.getId() == maxId) { - throw new VeniceException( - "For store " + storeName + " cannot generate update schema for value schema ID :" - + valueSchemaEntry.getId() + ", top level field probably missing defaults.", - e); - } - } - } - // Add update schemas only after all update schema generation succeeded. - if (dryRun) { - return; - } - for (SchemaEntry updateSchemaEntry: updateSchemaEntries) { - parentHelixAdmin - .addDerivedSchema(clusterName, storeName, updateSchemaEntry.getId(), updateSchemaEntry.getSchemaStr()); - } - } -} diff --git a/services/venice-controller/src/main/java/com/linkedin/venice/controller/util/PrimaryControllerConfigUpdateUtils.java b/services/venice-controller/src/main/java/com/linkedin/venice/controller/util/PrimaryControllerConfigUpdateUtils.java new file mode 100644 index 0000000000..cecc7a955e --- /dev/null +++ b/services/venice-controller/src/main/java/com/linkedin/venice/controller/util/PrimaryControllerConfigUpdateUtils.java @@ -0,0 +1,170 @@ +package com.linkedin.venice.controller.util; + +import com.linkedin.venice.controller.Admin; +import com.linkedin.venice.controller.supersetschema.SupersetSchemaGenerator; +import com.linkedin.venice.exceptions.VeniceException; +import com.linkedin.venice.meta.Store; +import com.linkedin.venice.schema.SchemaData; +import com.linkedin.venice.schema.SchemaEntry; +import com.linkedin.venice.schema.rmd.RmdSchemaEntry; +import com.linkedin.venice.schema.rmd.RmdSchemaGenerator; +import com.linkedin.venice.schema.writecompute.WriteComputeSchemaConverter; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Comparator; +import java.util.List; +import org.apache.avro.Schema; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + + +/** + * This class is a utility class for Primary Controller store update logics. + * Primary controller is the Parent controller in a multi-region deployment, and it is the Child Controller in a single-region deployment. + * The method here aims to take in current status and request params to determine if certain feature is updated / should + * be updated based on some customized logics. + */ +public class PrimaryControllerConfigUpdateUtils { + public static final Logger LOGGER = LogManager.getLogger(PrimaryControllerConfigUpdateUtils.class); + public static final WriteComputeSchemaConverter UPDATE_SCHEMA_CONVERTER = WriteComputeSchemaConverter.getInstance(); + + /** + * A store can have various schemas that are inferred based on the store's other properties (store configs, existing schemas, etc) + * This function is expected to register all such inferred schemas and it should be invoked on updates to the store's + * configs or schemas. + * + * This should only be executed in the primary controller. In a multi-region mode, the child controller is expected to + * get these updates via the admin channel. + */ + public static void registerInferredSchemas(Admin admin, String clusterName, String storeName) { + if (!UpdateStoreUtils.isInferredStoreUpdateAllowed(admin, storeName)) { + return; + } + + Store store = admin.getStore(clusterName, storeName); + + /** + * Register new superset schemas if either of the following conditions are met: + * 1. There is an existing superset schema + * 2. Read computation is enabled + * 3. Write computation is enabled + */ + if (store.isReadComputationEnabled() || store.isWriteComputationEnabled() + || store.getLatestSuperSetValueSchemaId() != SchemaData.INVALID_VALUE_SCHEMA_ID) { + addSupersetSchemaForStore(admin, clusterName, store); + } + + if (store.isWriteComputationEnabled()) { + // Register partial update schemas (aka derived schemas) + addUpdateSchemaForStore(admin, clusterName, storeName, false); + } + + if (store.isActiveActiveReplicationEnabled()) { + // Register RMD schemas + updateReplicationMetadataSchemaForAllValueSchema(admin, clusterName, storeName); + } + } + + private static void addSupersetSchemaForStore(Admin admin, String clusterName, Store store) { + String storeName = store.getName(); + SupersetSchemaGenerator supersetSchemaGenerator = admin.getSupersetSchemaGenerator(clusterName); + SchemaEntry supersetSchemaEntry = + supersetSchemaGenerator.generateSupersetSchemaFromSchemas(admin.getValueSchemas(clusterName, storeName)); + admin.addSupersetSchema( + clusterName, + storeName, + null, + SchemaData.INVALID_VALUE_SCHEMA_ID, + supersetSchemaEntry.getSchemaStr(), + supersetSchemaEntry.getId()); + } + + public static void addUpdateSchemaForStore(Admin admin, String clusterName, String storeName, boolean dryRun) { + Collection valueSchemaEntries = admin.getValueSchemas(clusterName, storeName); + List updateSchemaEntries = new ArrayList<>(valueSchemaEntries.size()); + int maxId = valueSchemaEntries.stream().map(SchemaEntry::getId).max(Comparator.naturalOrder()).get(); + for (SchemaEntry valueSchemaEntry: valueSchemaEntries) { + try { + Schema updateSchema = UPDATE_SCHEMA_CONVERTER.convertFromValueRecordSchema(valueSchemaEntry.getSchema()); + updateSchemaEntries.add(new SchemaEntry(valueSchemaEntry.getId(), updateSchema)); + } catch (Exception e) { + // Allow failure in update schema generation in all schema except the latest value schema + if (valueSchemaEntry.getId() == maxId) { + throw new VeniceException( + "For store " + storeName + " cannot generate update schema for value schema ID :" + + valueSchemaEntry.getId() + ", top level field probably missing defaults.", + e); + } + } + } + // Add update schemas only after all update schema generation succeeded. + if (dryRun) { + return; + } + for (SchemaEntry updateSchemaEntry: updateSchemaEntries) { + admin.addDerivedSchema(clusterName, storeName, updateSchemaEntry.getId(), updateSchemaEntry.getSchemaStr()); + } + } + + public static void updateReplicationMetadataSchemaForAllValueSchema( + Admin admin, + String clusterName, + String storeName) { + final Collection valueSchemas = admin.getValueSchemas(clusterName, storeName); + for (SchemaEntry valueSchemaEntry: valueSchemas) { + updateReplicationMetadataSchema( + admin, + clusterName, + storeName, + valueSchemaEntry.getSchema(), + valueSchemaEntry.getId()); + } + } + + private static void updateReplicationMetadataSchema( + Admin admin, + String clusterName, + String storeName, + Schema valueSchema, + int valueSchemaId) { + final int rmdVersionId = AdminUtils.getRmdVersionID(admin, storeName, clusterName); + final boolean valueSchemaAlreadyHasRmdSchema = + checkIfValueSchemaAlreadyHasRmdSchema(admin, clusterName, storeName, valueSchemaId, rmdVersionId); + if (valueSchemaAlreadyHasRmdSchema) { + LOGGER.info( + "Store {} in cluster {} already has a replication metadata schema for its value schema with ID {} and " + + "replication metadata version ID {}. So skip updating this value schema's RMD schema.", + storeName, + clusterName, + valueSchemaId, + rmdVersionId); + return; + } + String replicationMetadataSchemaStr = + RmdSchemaGenerator.generateMetadataSchema(valueSchema, rmdVersionId).toString(); + admin.addReplicationMetadataSchema( + clusterName, + storeName, + valueSchemaId, + rmdVersionId, + replicationMetadataSchemaStr); + } + + private static boolean checkIfValueSchemaAlreadyHasRmdSchema( + Admin admin, + String clusterName, + String storeName, + final int valueSchemaID, + final int replicationMetadataVersionId) { + Collection schemaEntries = admin.getHelixVeniceClusterResources(clusterName) + .getSchemaRepository() + .getReplicationMetadataSchemas(storeName); + for (RmdSchemaEntry rmdSchemaEntry: schemaEntries) { + if (rmdSchemaEntry.getValueSchemaID() == valueSchemaID + && rmdSchemaEntry.getId() == replicationMetadataVersionId) { + return true; + } + } + return false; + } +} diff --git a/services/venice-controller/src/main/java/com/linkedin/venice/controller/util/UpdateStoreUtils.java b/services/venice-controller/src/main/java/com/linkedin/venice/controller/util/UpdateStoreUtils.java new file mode 100644 index 0000000000..f4bbda0969 --- /dev/null +++ b/services/venice-controller/src/main/java/com/linkedin/venice/controller/util/UpdateStoreUtils.java @@ -0,0 +1,1293 @@ +package com.linkedin.venice.controller.util; + +import static com.linkedin.venice.controllerapi.ControllerApiConstants.ACCESS_CONTROLLED; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.ACTIVE_ACTIVE_REPLICATION_ENABLED; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.AMPLIFICATION_FACTOR; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.AUTO_SCHEMA_REGISTER_FOR_PUSHJOB_ENABLED; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.BACKUP_STRATEGY; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.BACKUP_VERSION_RETENTION_MS; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.BATCH_GET_LIMIT; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.BLOB_TRANSFER_ENABLED; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.BOOTSTRAP_TO_ONLINE_TIMEOUT_IN_HOURS; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.BUFFER_REPLAY_POLICY; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.CHUNKING_ENABLED; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.CLIENT_DECOMPRESSION_ENABLED; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.COMPRESSION_STRATEGY; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.DATA_REPLICATION_POLICY; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.DISABLE_DAVINCI_PUSH_STATUS_STORE; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.DISABLE_META_STORE; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.ENABLE_READS; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.ENABLE_WRITES; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.ETLED_PROXY_USER_ACCOUNT; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.FUTURE_VERSION_ETL_ENABLED; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.HYBRID_STORE_DISK_QUOTA_ENABLED; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.INCREMENTAL_PUSH_ENABLED; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.LARGEST_USED_VERSION_NUMBER; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.LATEST_SUPERSET_SCHEMA_ID; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.MAX_COMPACTION_LAG_SECONDS; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.MAX_NEARLINE_RECORD_SIZE_BYTES; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.MAX_RECORD_SIZE_BYTES; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.MIGRATION_DUPLICATE_STORE; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.MIN_COMPACTION_LAG_SECONDS; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.NATIVE_REPLICATION_ENABLED; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.NATIVE_REPLICATION_SOURCE_FABRIC; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.NEARLINE_PRODUCER_COMPRESSION_ENABLED; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.NEARLINE_PRODUCER_COUNT_PER_WRITER; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.NUM_VERSIONS_TO_PRESERVE; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.OFFSET_LAG_TO_GO_ONLINE; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.OWNER; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.PARTITIONER_CLASS; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.PARTITIONER_PARAMS; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.PARTITION_COUNT; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.PERSONA_NAME; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.PUSH_STREAM_SOURCE_ADDRESS; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.READ_COMPUTATION_ENABLED; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.READ_QUOTA_IN_CU; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.REGULAR_VERSION_ETL_ENABLED; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.REPLICATION_FACTOR; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.REPLICATION_METADATA_PROTOCOL_VERSION_ID; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.REWIND_TIME_IN_SECONDS; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.RMD_CHUNKING_ENABLED; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.SEPARATE_REAL_TIME_TOPIC_ENABLED; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.STORAGE_NODE_READ_QUOTA_ENABLED; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.STORAGE_QUOTA_IN_BYTE; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.STORE_MIGRATION; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.STORE_VIEW; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.TIME_LAG_TO_GO_ONLINE; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.UNUSED_SCHEMA_DELETION_ENABLED; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.VERSION; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.WRITE_COMPUTATION_ENABLED; +import static com.linkedin.venice.meta.HybridStoreConfigImpl.DEFAULT_HYBRID_OFFSET_LAG_THRESHOLD; +import static com.linkedin.venice.meta.HybridStoreConfigImpl.DEFAULT_HYBRID_TIME_LAG_THRESHOLD; +import static com.linkedin.venice.meta.HybridStoreConfigImpl.DEFAULT_REWIND_TIME_IN_SECONDS; +import static com.linkedin.venice.meta.Version.VERSION_SEPARATOR; +import static com.linkedin.venice.utils.RegionUtils.parseRegionsFilterList; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.linkedin.venice.common.VeniceSystemStoreUtils; +import com.linkedin.venice.compression.CompressionStrategy; +import com.linkedin.venice.controller.Admin; +import com.linkedin.venice.controller.HelixVeniceClusterResources; +import com.linkedin.venice.controller.StoreViewUtils; +import com.linkedin.venice.controller.VeniceControllerClusterConfig; +import com.linkedin.venice.controller.VeniceControllerMultiClusterConfig; +import com.linkedin.venice.controller.VeniceHelixAdmin; +import com.linkedin.venice.controller.VeniceParentHelixAdmin; +import com.linkedin.venice.controllerapi.UpdateStoreQueryParams; +import com.linkedin.venice.exceptions.ErrorType; +import com.linkedin.venice.exceptions.PartitionerSchemaMismatchException; +import com.linkedin.venice.exceptions.VeniceException; +import com.linkedin.venice.exceptions.VeniceHttpException; +import com.linkedin.venice.exceptions.VeniceNoStoreException; +import com.linkedin.venice.helix.StoragePersonaRepository; +import com.linkedin.venice.helix.ZkRoutersClusterManager; +import com.linkedin.venice.meta.BackupStrategy; +import com.linkedin.venice.meta.BufferReplayPolicy; +import com.linkedin.venice.meta.DataReplicationPolicy; +import com.linkedin.venice.meta.ETLStoreConfig; +import com.linkedin.venice.meta.ETLStoreConfigImpl; +import com.linkedin.venice.meta.HybridStoreConfig; +import com.linkedin.venice.meta.HybridStoreConfigImpl; +import com.linkedin.venice.meta.PartitionerConfig; +import com.linkedin.venice.meta.PartitionerConfigImpl; +import com.linkedin.venice.meta.Store; +import com.linkedin.venice.meta.Version; +import com.linkedin.venice.meta.ViewConfig; +import com.linkedin.venice.meta.ViewConfigImpl; +import com.linkedin.venice.meta.ViewParameterKeys; +import com.linkedin.venice.persona.StoragePersona; +import com.linkedin.venice.pubsub.api.PubSubTopic; +import com.linkedin.venice.pubsub.manager.TopicManager; +import com.linkedin.venice.schema.SchemaData; +import com.linkedin.venice.utils.ObjectMapperFactory; +import com.linkedin.venice.utils.PartitionUtils; +import com.linkedin.venice.utils.VeniceProperties; +import com.linkedin.venice.views.MaterializedView; +import com.linkedin.venice.views.VeniceView; +import com.linkedin.venice.views.ViewUtils; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.Properties; +import java.util.Set; +import java.util.function.Consumer; +import org.apache.commons.lang.StringUtils; +import org.apache.http.HttpStatus; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + + +public class UpdateStoreUtils { + private static final Logger LOGGER = LogManager.getLogger(UpdateStoreUtils.class); + + private UpdateStoreUtils() { + } + + public static UpdateStoreWrapper getStoreUpdate( + Admin admin, + String clusterName, + String storeName, + UpdateStoreQueryParams params, + boolean checkRegionFilter) { + VeniceControllerMultiClusterConfig multiClusterConfigs = admin.getMultiClusterConfigs(); + + // There are certain configs that are only allowed to be updated in child regions. We might still want the ability + // to update such configs in the parent region via the Admin tool for operational reasons. So, we allow such updates + // if the regions filter only specifies one region, which is the parent region. + boolean onlyParentRegionFilter = false; + + // Check whether the command affects this region. + if (params.getRegionsFilter().isPresent()) { + Set regionsFilter = parseRegionsFilterList(params.getRegionsFilter().get()); + if (checkRegionFilter && !regionsFilter.contains(multiClusterConfigs.getRegionName())) { + LOGGER.info( + "UpdateStore command will be skipped for store: {} in cluster: {}, because the region filter is {}" + + " which doesn't include the current region: {}", + storeName, + clusterName, + regionsFilter, + multiClusterConfigs.getRegionName()); + return null; + } + + if (admin.isParent() && regionsFilter.size() == 1) { + onlyParentRegionFilter = true; + } + } + + Store originalStore = admin.getStore(clusterName, storeName); + if (originalStore == null) { + throw new VeniceNoStoreException(storeName, clusterName); + } + + UpdateStoreWrapper updateStoreWrapper = new UpdateStoreWrapper(originalStore); + Set updatedConfigs = updateStoreWrapper.updatedConfigs; + Store updatedStore = updateStoreWrapper.updatedStore; + + Optional owner = params.getOwner(); + Optional readability = params.getEnableReads(); + Optional writeability = params.getEnableWrites(); + Optional partitionCount = params.getPartitionCount(); + Optional partitionerClass = params.getPartitionerClass(); + Optional> partitionerParams = params.getPartitionerParams(); + Optional amplificationFactor = params.getAmplificationFactor(); + Optional storageQuotaInByte = params.getStorageQuotaInByte(); + Optional readQuotaInCU = params.getReadQuotaInCU(); + Optional currentVersion = params.getCurrentVersion(); + Optional largestUsedVersionNumber = params.getLargestUsedVersionNumber(); + Optional hybridRewindSeconds = params.getHybridRewindSeconds(); + Optional hybridOffsetLagThreshold = params.getHybridOffsetLagThreshold(); + Optional hybridTimeLagThreshold = params.getHybridTimeLagThreshold(); + Optional hybridDataReplicationPolicy = params.getHybridDataReplicationPolicy(); + Optional hybridBufferReplayPolicy = params.getHybridBufferReplayPolicy(); + Optional accessControlled = params.getAccessControlled(); + Optional compressionStrategy = params.getCompressionStrategy(); + Optional clientDecompressionEnabled = params.getClientDecompressionEnabled(); + Optional chunkingEnabled = params.getChunkingEnabled(); + Optional rmdChunkingEnabled = params.getRmdChunkingEnabled(); + Optional batchGetLimit = params.getBatchGetLimit(); + Optional numVersionsToPreserve = params.getNumVersionsToPreserve(); + Optional incrementalPushEnabled = params.getIncrementalPushEnabled(); + Optional separateRealTimeTopicEnabled = params.getSeparateRealTimeTopicEnabled(); + Optional storeMigration = params.getStoreMigration(); + Optional writeComputationEnabled = params.getWriteComputationEnabled(); + Optional replicationMetadataVersionID = params.getReplicationMetadataVersionID(); + Optional readComputationEnabled = params.getReadComputationEnabled(); + Optional bootstrapToOnlineTimeoutInHours = params.getBootstrapToOnlineTimeoutInHours(); + Optional backupStrategy = params.getBackupStrategy(); + Optional autoSchemaRegisterPushJobEnabled = params.getAutoSchemaRegisterPushJobEnabled(); + Optional hybridStoreDiskQuotaEnabled = params.getHybridStoreDiskQuotaEnabled(); + Optional regularVersionETLEnabled = params.getRegularVersionETLEnabled(); + Optional futureVersionETLEnabled = params.getFutureVersionETLEnabled(); + Optional etledUserProxyAccount = params.getETLedProxyUserAccount(); + Optional nativeReplicationEnabled = params.getNativeReplicationEnabled(); + Optional pushStreamSourceAddress = params.getPushStreamSourceAddress(); + Optional backupVersionRetentionMs = params.getBackupVersionRetentionMs(); + Optional replicationFactor = params.getReplicationFactor(); + Optional migrationDuplicateStore = params.getMigrationDuplicateStore(); + Optional nativeReplicationSourceFabric = params.getNativeReplicationSourceFabric(); + Optional activeActiveReplicationEnabled = params.getActiveActiveReplicationEnabled(); + Optional personaName = params.getStoragePersona(); + Optional> storeViewConfig = params.getStoreViews(); + Optional viewName = params.getViewName(); + Optional viewClassName = params.getViewClassName(); + Optional> viewParams = params.getViewClassParams(); + Optional removeView = params.getDisableStoreView(); + Optional latestSupersetSchemaId = params.getLatestSupersetSchemaId(); + Optional storageNodeReadQuotaEnabled = params.getStorageNodeReadQuotaEnabled(); + Optional minCompactionLagSeconds = params.getMinCompactionLagSeconds(); + Optional maxCompactionLagSeconds = params.getMaxCompactionLagSeconds(); + Optional maxRecordSizeBytes = params.getMaxRecordSizeBytes(); + Optional maxNearlineRecordSizeBytes = params.getMaxNearlineRecordSizeBytes(); + Optional unusedSchemaDeletionEnabled = params.getUnusedSchemaDeletionEnabled(); + Optional blobTransferEnabled = params.getBlobTransferEnabled(); + Optional nearlineProducerCompressionEnabled = params.getNearlineProducerCompressionEnabled(); + Optional nearlineProducerCountPerWriter = params.getNearlineProducerCountPerWriter(); + + addToUpdatedConfigs(updatedConfigs, OWNER, owner, updatedStore::setOwner); + addToUpdatedConfigs(updatedConfigs, ENABLE_READS, readability, updatedStore::setEnableReads); + addToUpdatedConfigs(updatedConfigs, ENABLE_WRITES, writeability, updatedStore::setEnableWrites); + addToUpdatedConfigs(updatedConfigs, PARTITION_COUNT, partitionCount, updatedStore::setPartitionCount); + addToUpdatedConfigs( + updatedConfigs, + LARGEST_USED_VERSION_NUMBER, + largestUsedVersionNumber, + updatedStore::setLargestUsedVersionNumber); + addToUpdatedConfigs( + updatedConfigs, + BOOTSTRAP_TO_ONLINE_TIMEOUT_IN_HOURS, + bootstrapToOnlineTimeoutInHours, + updatedStore::setBootstrapToOnlineTimeoutInHours); + addToUpdatedConfigs(updatedConfigs, STORAGE_QUOTA_IN_BYTE, storageQuotaInByte, updatedStore::setStorageQuotaInByte); + addToUpdatedConfigs(updatedConfigs, READ_QUOTA_IN_CU, readQuotaInCU, updatedStore::setReadQuotaInCU); + addToUpdatedConfigs(updatedConfigs, ACCESS_CONTROLLED, accessControlled, updatedStore::setAccessControlled); + addToUpdatedConfigs( + updatedConfigs, + COMPRESSION_STRATEGY, + compressionStrategy, + updatedStore::setCompressionStrategy); + addToUpdatedConfigs( + updatedConfigs, + CLIENT_DECOMPRESSION_ENABLED, + clientDecompressionEnabled, + updatedStore::setClientDecompressionEnabled); + addToUpdatedConfigs(updatedConfigs, CHUNKING_ENABLED, chunkingEnabled, updatedStore::setChunkingEnabled); + addToUpdatedConfigs(updatedConfigs, RMD_CHUNKING_ENABLED, rmdChunkingEnabled, updatedStore::setRmdChunkingEnabled); + addToUpdatedConfigs(updatedConfigs, BATCH_GET_LIMIT, batchGetLimit, updatedStore::setBatchGetLimit); + addToUpdatedConfigs( + updatedConfigs, + NUM_VERSIONS_TO_PRESERVE, + numVersionsToPreserve, + updatedStore::setNumVersionsToPreserve); + addToUpdatedConfigs(updatedConfigs, REPLICATION_FACTOR, replicationFactor, updatedStore::setReplicationFactor); + addToUpdatedConfigs(updatedConfigs, STORE_MIGRATION, storeMigration, updatedStore::setMigrating); + addToUpdatedConfigs( + updatedConfigs, + MIGRATION_DUPLICATE_STORE, + migrationDuplicateStore, + updatedStore::setMigrationDuplicateStore); + addToUpdatedConfigs( + updatedConfigs, + WRITE_COMPUTATION_ENABLED, + writeComputationEnabled, + updatedStore::setWriteComputationEnabled); + addToUpdatedConfigs( + updatedConfigs, + REPLICATION_METADATA_PROTOCOL_VERSION_ID, + replicationMetadataVersionID, + updatedStore::setRmdVersion); + addToUpdatedConfigs( + updatedConfigs, + READ_COMPUTATION_ENABLED, + readComputationEnabled, + updatedStore::setReadComputationEnabled); + addToUpdatedConfigs( + updatedConfigs, + NATIVE_REPLICATION_ENABLED, + nativeReplicationEnabled, + updatedStore::setNativeReplicationEnabled); + addToUpdatedConfigs( + updatedConfigs, + ACTIVE_ACTIVE_REPLICATION_ENABLED, + activeActiveReplicationEnabled, + updatedStore::setActiveActiveReplicationEnabled); + addToUpdatedConfigs( + updatedConfigs, + PUSH_STREAM_SOURCE_ADDRESS, + pushStreamSourceAddress, + updatedStore::setPushStreamSourceAddress); + addToUpdatedConfigs(updatedConfigs, BACKUP_STRATEGY, backupStrategy, updatedStore::setBackupStrategy); + addToUpdatedConfigs( + updatedConfigs, + AUTO_SCHEMA_REGISTER_FOR_PUSHJOB_ENABLED, + autoSchemaRegisterPushJobEnabled, + updatedStore::setSchemaAutoRegisterFromPushJobEnabled); + addToUpdatedConfigs( + updatedConfigs, + HYBRID_STORE_DISK_QUOTA_ENABLED, + hybridStoreDiskQuotaEnabled, + updatedStore::setHybridStoreDiskQuotaEnabled); + addToUpdatedConfigs( + updatedConfigs, + BACKUP_VERSION_RETENTION_MS, + backupVersionRetentionMs, + updatedStore::setBackupVersionRetentionMs); + addToUpdatedConfigs( + updatedConfigs, + NATIVE_REPLICATION_SOURCE_FABRIC, + nativeReplicationSourceFabric, + updatedStore::setNativeReplicationSourceFabric); + addToUpdatedConfigs( + updatedConfigs, + LATEST_SUPERSET_SCHEMA_ID, + latestSupersetSchemaId, + updatedStore::setLatestSuperSetValueSchemaId); + addToUpdatedConfigs( + updatedConfigs, + MIN_COMPACTION_LAG_SECONDS, + minCompactionLagSeconds, + updatedStore::setMinCompactionLagSeconds); + addToUpdatedConfigs( + updatedConfigs, + MAX_COMPACTION_LAG_SECONDS, + maxCompactionLagSeconds, + updatedStore::setMaxCompactionLagSeconds); + addToUpdatedConfigs(updatedConfigs, MAX_RECORD_SIZE_BYTES, maxRecordSizeBytes, updatedStore::setMaxRecordSizeBytes); + addToUpdatedConfigs( + updatedConfigs, + MAX_NEARLINE_RECORD_SIZE_BYTES, + maxNearlineRecordSizeBytes, + updatedStore::setMaxNearlineRecordSizeBytes); + addToUpdatedConfigs( + updatedConfigs, + UNUSED_SCHEMA_DELETION_ENABLED, + unusedSchemaDeletionEnabled, + updatedStore::setUnusedSchemaDeletionEnabled); + addToUpdatedConfigs( + updatedConfigs, + BLOB_TRANSFER_ENABLED, + blobTransferEnabled, + updatedStore::setBlobTransferEnabled); + addToUpdatedConfigs( + updatedConfigs, + STORAGE_NODE_READ_QUOTA_ENABLED, + storageNodeReadQuotaEnabled, + updatedStore::setStorageNodeReadQuotaEnabled); + addToUpdatedConfigs(updatedConfigs, REGULAR_VERSION_ETL_ENABLED, regularVersionETLEnabled, regularVersionETL -> { + ETLStoreConfig etlStoreConfig = updatedStore.getEtlStoreConfig(); + if (etlStoreConfig == null) { + etlStoreConfig = new ETLStoreConfigImpl(); + } + etlStoreConfig.setRegularVersionETLEnabled(regularVersionETL); + updatedStore.setEtlStoreConfig(etlStoreConfig); + }); + addToUpdatedConfigs(updatedConfigs, FUTURE_VERSION_ETL_ENABLED, futureVersionETLEnabled, futureVersionETL -> { + ETLStoreConfig etlStoreConfig = updatedStore.getEtlStoreConfig(); + if (etlStoreConfig == null) { + etlStoreConfig = new ETLStoreConfigImpl(); + } + etlStoreConfig.setFutureVersionETLEnabled(futureVersionETL); + updatedStore.setEtlStoreConfig(etlStoreConfig); + }); + addToUpdatedConfigs(updatedConfigs, ETLED_PROXY_USER_ACCOUNT, etledUserProxyAccount, etlProxyAccount -> { + ETLStoreConfig etlStoreConfig = updatedStore.getEtlStoreConfig(); + if (etlStoreConfig == null) { + etlStoreConfig = new ETLStoreConfigImpl(); + } + etlStoreConfig.setEtledUserProxyAccount(etlProxyAccount); + updatedStore.setEtlStoreConfig(etlStoreConfig); + }); + addToUpdatedConfigs( + updatedConfigs, + INCREMENTAL_PUSH_ENABLED, + incrementalPushEnabled, + updatedStore::setIncrementalPushEnabled); + addToUpdatedConfigs( + updatedConfigs, + SEPARATE_REAL_TIME_TOPIC_ENABLED, + separateRealTimeTopicEnabled, + updatedStore::setSeparateRealTimeTopicEnabled); + addToUpdatedConfigs( + updatedConfigs, + NEARLINE_PRODUCER_COMPRESSION_ENABLED, + nearlineProducerCompressionEnabled, + updatedStore::setNearlineProducerCompressionEnabled); + addToUpdatedConfigs( + updatedConfigs, + NEARLINE_PRODUCER_COUNT_PER_WRITER, + nearlineProducerCountPerWriter, + updatedStore::setNearlineProducerCountPerWriter); + + // No matter what, set native replication to enabled in multi-region mode if the store currently doesn't enable it, + // and it is not explicitly asked to be updated + if (multiClusterConfigs.isMultiRegion() && !originalStore.isNativeReplicationEnabled()) { + updateInferredConfig( + admin, + updatedStore, + NATIVE_REPLICATION_ENABLED, + updatedConfigs, + () -> updatedStore.setNativeReplicationEnabled(true)); + } + + PartitionerConfig newPartitionerConfig = mergeNewSettingsIntoOldPartitionerConfig( + originalStore, + partitionerClass, + partitionerParams, + amplificationFactor); + + if (newPartitionerConfig != originalStore.getPartitionerConfig()) { + partitionerClass.ifPresent(p -> updatedConfigs.add(PARTITIONER_CLASS)); + partitionerParams.ifPresent(p -> updatedConfigs.add(PARTITIONER_PARAMS)); + amplificationFactor.ifPresent(p -> updatedConfigs.add(AMPLIFICATION_FACTOR)); + updatedStore.setPartitionerConfig(newPartitionerConfig); + } + + if (currentVersion.isPresent()) { + if (checkRegionFilter && admin.isParent() && !onlyParentRegionFilter) { + LOGGER.warn( + "Skipping current version update in parent region for store: {} in cluster: {}", + storeName, + clusterName); + } else { + updatedConfigs.add(VERSION); + updatedStore.setCurrentVersion(currentVersion.get()); + } + } + + HelixVeniceClusterResources resources = admin.getHelixVeniceClusterResources(clusterName); + VeniceControllerClusterConfig clusterConfig = resources.getConfig(); + + HybridStoreConfig originalHybridStoreConfig = originalStore.getHybridStoreConfig(); + HybridStoreConfig newHybridStoreConfigTemp = mergeNewSettingsIntoOldHybridStoreConfig( + originalStore, + hybridRewindSeconds, + hybridOffsetLagThreshold, + hybridTimeLagThreshold, + hybridDataReplicationPolicy, + hybridBufferReplayPolicy); + + HybridStoreConfig newHybridStoreConfig; + // Incremental push was enabled, but hybrid config hasn't changed. Set default hybrid configs + if (!AdminUtils.isHybrid(newHybridStoreConfigTemp) && !originalStore.isIncrementalPushEnabled() + && updatedStore.isIncrementalPushEnabled()) { + newHybridStoreConfig = new HybridStoreConfigImpl( + DEFAULT_REWIND_TIME_IN_SECONDS, + DEFAULT_HYBRID_OFFSET_LAG_THRESHOLD, + DEFAULT_HYBRID_TIME_LAG_THRESHOLD, + DataReplicationPolicy.NON_AGGREGATE, + BufferReplayPolicy.REWIND_FROM_EOP); + } else { + newHybridStoreConfig = newHybridStoreConfigTemp; + } + + if (!AdminUtils.isHybrid(newHybridStoreConfig) && AdminUtils.isHybrid(originalHybridStoreConfig)) { + /** + * If all the hybrid config values are negative, it indicates that the store is being set back to batch-only store. + * We cannot remove the RT topic immediately because with NR and AA, existing current version is + * still consuming the RT topic. + */ + updatedStore.setHybridStoreConfig(null); + + updatedConfigs.add(REWIND_TIME_IN_SECONDS); + updatedConfigs.add(OFFSET_LAG_TO_GO_ONLINE); + updatedConfigs.add(TIME_LAG_TO_GO_ONLINE); + updatedConfigs.add(DATA_REPLICATION_POLICY); + updatedConfigs.add(BUFFER_REPLAY_POLICY); + + updateInferredConfigsForHybridToBatch(admin, clusterConfig, updatedStore, updatedConfigs); + } else if (AdminUtils.isHybrid(newHybridStoreConfig)) { + if (!originalStore.isHybrid()) { + updateInferredConfigsForBatchToHybrid(admin, clusterConfig, updatedStore, updatedConfigs); + } + + // Enable A/A for new incremental-push stores in multi-region mode + if (multiClusterConfigs.isMultiRegion() && !originalStore.isIncrementalPushEnabled() + && updatedStore.isIncrementalPushEnabled()) { + updateInferredConfig( + admin, + updatedStore, + ACTIVE_ACTIVE_REPLICATION_ENABLED, + updatedConfigs, + () -> updatedStore.setActiveActiveReplicationEnabled(true)); + } + + // Store is being made Active-Active + if (updatedStore.isActiveActiveReplicationEnabled() && !originalStore.isActiveActiveReplicationEnabled()) { + // If configs are set to enable incremental push for hybrid Active-Active users store, enable it + if (clusterConfig.enabledIncrementalPushForHybridActiveActiveUserStores()) { + updateInferredConfig( + admin, + updatedStore, + INCREMENTAL_PUSH_ENABLED, + updatedConfigs, + () -> updatedStore.setIncrementalPushEnabled( + AdminUtils.isIncrementalPushSupported( + clusterConfig.isMultiRegion(), + updatedStore.isActiveActiveReplicationEnabled(), + newHybridStoreConfig))); + } + } + + if (AdminUtils.isHybrid(originalHybridStoreConfig)) { + if (originalHybridStoreConfig.getRewindTimeInSeconds() != newHybridStoreConfig.getRewindTimeInSeconds()) { + updatedConfigs.add(REWIND_TIME_IN_SECONDS); + } + + if (originalHybridStoreConfig.getOffsetLagThresholdToGoOnline() != newHybridStoreConfig + .getOffsetLagThresholdToGoOnline()) { + updatedConfigs.add(OFFSET_LAG_TO_GO_ONLINE); + } + + if (originalHybridStoreConfig.getProducerTimestampLagThresholdToGoOnlineInSeconds() != newHybridStoreConfig + .getProducerTimestampLagThresholdToGoOnlineInSeconds()) { + updatedConfigs.add(TIME_LAG_TO_GO_ONLINE); + } + + if (originalHybridStoreConfig.getDataReplicationPolicy() != newHybridStoreConfig.getDataReplicationPolicy()) { + updatedConfigs.add(DATA_REPLICATION_POLICY); + } + + if (originalHybridStoreConfig.getBufferReplayPolicy() != newHybridStoreConfig.getBufferReplayPolicy()) { + updatedConfigs.add(BUFFER_REPLAY_POLICY); + } + } else { + updatedConfigs.add(REWIND_TIME_IN_SECONDS); + updatedConfigs.add(OFFSET_LAG_TO_GO_ONLINE); + updatedConfigs.add(TIME_LAG_TO_GO_ONLINE); + updatedConfigs.add(DATA_REPLICATION_POLICY); + updatedConfigs.add(BUFFER_REPLAY_POLICY); + } + + updatedStore.setHybridStoreConfig(newHybridStoreConfig); + } + + VeniceControllerClusterConfig controllerConfig = admin.getMultiClusterConfigs().getControllerConfig(clusterName); + + if (updatedStore.isIncrementalPushEnabled() && !updatedStore.isSeparateRealTimeTopicEnabled() + && controllerConfig.enabledSeparateRealTimeTopicForStoreWithIncrementalPush()) { + updateInferredConfig(admin, updatedStore, SEPARATE_REAL_TIME_TOPIC_ENABLED, updatedConfigs, () -> { + LOGGER.info("Enabling separate RT topic because incremental push is enabled for store: " + storeName); + updatedStore.setSeparateRealTimeTopicEnabled(true); + }); + } + + if (!updatedStore.isChunkingEnabled() && updatedStore.isWriteComputationEnabled()) { + updateInferredConfig(admin, updatedStore, CHUNKING_ENABLED, updatedConfigs, () -> { + LOGGER.info("Enabling chunking because write compute is enabled for store: " + storeName); + updatedStore.setChunkingEnabled(true); + }); + } + + if (!updatedStore.isRmdChunkingEnabled() && updatedStore.isWriteComputationEnabled()) { + updateInferredConfig(admin, updatedStore, RMD_CHUNKING_ENABLED, updatedConfigs, () -> { + LOGGER.info("Enabling RMD chunking because write compute is enabled for Active/Active store: " + storeName); + updatedStore.setRmdChunkingEnabled(true); + }); + } + + if (!updatedStore.isRmdChunkingEnabled() && updatedStore.isActiveActiveReplicationEnabled()) { + updateInferredConfig(admin, updatedStore, RMD_CHUNKING_ENABLED, updatedConfigs, () -> { + LOGGER.info("Enabling RMD chunking because Active/Active is enabled for store: " + storeName); + updatedStore.setRmdChunkingEnabled(true); + }); + } + + if (params.disableMetaStore().isPresent() && params.disableMetaStore().get()) { + LOGGER.info("Disabling meta system store for store: {} of cluster: {}", storeName, clusterName); + updatedConfigs.add(DISABLE_META_STORE); + updatedStore.setStoreMetaSystemStoreEnabled(false); + updatedStore.setStoreMetadataSystemStoreEnabled(false); + } + + if (params.disableDavinciPushStatusStore().isPresent() && params.disableDavinciPushStatusStore().get()) { + updatedConfigs.add(DISABLE_DAVINCI_PUSH_STATUS_STORE); + LOGGER.info("Disabling davinci push status store for store: {} of cluster: {}", storeName, clusterName); + updatedStore.setDaVinciPushStatusStoreEnabled(false); + } + + if (storeViewConfig.isPresent() && viewName.isPresent()) { + throw new VeniceException("Cannot update a store view and overwrite store view setup together!"); + } + + if (viewName.isPresent()) { + Map updatedViewSettings; + if (!removeView.isPresent()) { + if (!viewClassName.isPresent()) { + throw new VeniceException("View class name is required when configuring a view."); + } + // If View parameter is not provided, use emtpy map instead. It does not inherit from existing config. + ViewConfig viewConfig = new ViewConfigImpl(viewClassName.get(), viewParams.orElse(Collections.emptyMap())); + ViewConfig validatedViewConfig = validateAndDecorateStoreViewConfig(originalStore, viewConfig, viewName.get()); + updatedViewSettings = addNewViewConfigsIntoOldConfigs(originalStore, viewName.get(), validatedViewConfig); + } else { + updatedViewSettings = removeViewConfigFromStoreViewConfigMap(originalStore, viewName.get()); + } + updatedStore.setViewConfigs(updatedViewSettings); + updatedConfigs.add(STORE_VIEW); + } + + if (storeViewConfig.isPresent()) { + // Validate and overwrite store views if they're getting set + Map validatedViewConfigs = + validateAndDecorateStoreViewConfigs(storeViewConfig.get(), originalStore); + updatedStore.setViewConfigs(validatedViewConfigs); + updatedConfigs.add(STORE_VIEW); + } + + if (personaName.isPresent()) { + updatedConfigs.add(PERSONA_NAME); + } + + validateStoreConfigs(admin, clusterName, updatedStore); + validateStoreUpdate(admin, multiClusterConfigs, clusterName, originalStore, updatedStore); + validatePersona(admin, clusterName, updatedStore, personaName); + + return updateStoreWrapper; + } + + private static void addToUpdatedConfigs( + Set updatedConfigs, + String configName, + Optional param, + Consumer paramSetter) { + if (param.isPresent()) { + updatedConfigs.add(configName); + paramSetter.accept(param.get()); + } + } + + static void updateInferredConfig( + Admin admin, + Store store, + String configName, + Set updatedConfigs, + Runnable updater) { + if (!isInferredStoreUpdateAllowed(admin, store.getName())) { + return; + } + + if (!updatedConfigs.contains(configName)) { + updater.run(); + updatedConfigs.add(configName); + } + } + + static void updateInferredConfigsForHybridToBatch( + Admin admin, + VeniceControllerClusterConfig clusterConfig, + Store updatedStore, + Set updatedConfigs) { + updateInferredConfig( + admin, + updatedStore, + INCREMENTAL_PUSH_ENABLED, + updatedConfigs, + () -> updatedStore.setIncrementalPushEnabled(false)); + updateInferredConfig( + admin, + updatedStore, + NATIVE_REPLICATION_SOURCE_FABRIC, + updatedConfigs, + () -> updatedStore + .setNativeReplicationSourceFabric(clusterConfig.getNativeReplicationSourceFabricAsDefaultForBatchOnly())); + updateInferredConfig( + admin, + updatedStore, + ACTIVE_ACTIVE_REPLICATION_ENABLED, + updatedConfigs, + () -> updatedStore.setActiveActiveReplicationEnabled(false)); + } + + static void updateInferredConfigsForBatchToHybrid( + Admin admin, + VeniceControllerClusterConfig clusterConfig, + Store updatedStore, + Set updatedConfigs) { + String clusterName = clusterConfig.getClusterName(); + String storeName = updatedStore.getName(); + + if (!Objects.equals( + updatedStore.getNativeReplicationSourceFabric(), + clusterConfig.getNativeReplicationSourceFabricAsDefaultForHybrid())) { + updateInferredConfig( + admin, + updatedStore, + NATIVE_REPLICATION_SOURCE_FABRIC, + updatedConfigs, + () -> updatedStore + .setNativeReplicationSourceFabric(clusterConfig.getNativeReplicationSourceFabricAsDefaultForHybrid())); + } + + boolean inferredActiveActiveReplicationEnabled = updatedStore.isActiveActiveReplicationEnabled() + || (clusterConfig.isActiveActiveReplicationEnabledAsDefaultForHybrid() && !updatedStore.isSystemStore()); + + if (inferredActiveActiveReplicationEnabled != updatedStore.isActiveActiveReplicationEnabled()) { + /* + * Enable/disable active-active replication for user hybrid stores if the cluster level config + * for new hybrid stores is on. + */ + updateInferredConfig( + admin, + updatedStore, + ACTIVE_ACTIVE_REPLICATION_ENABLED, + updatedConfigs, + () -> updatedStore.setActiveActiveReplicationEnabled(inferredActiveActiveReplicationEnabled)); + } + + if (updatedStore.getPartitionCount() == 0) { + updateInferredConfig(admin, updatedStore, PARTITION_COUNT, updatedConfigs, () -> { + int updatedPartitionCount = PartitionUtils.calculatePartitionCount( + storeName, + updatedStore.getStorageQuotaInByte(), + 0, + clusterConfig.getPartitionSize(), + clusterConfig.getMinNumberOfPartitionsForHybrid(), + clusterConfig.getMaxNumberOfPartitions(), + clusterConfig.isPartitionCountRoundUpEnabled(), + clusterConfig.getPartitionCountRoundUpSize()); + updatedStore.setPartitionCount(updatedPartitionCount); + LOGGER.info( + "Enforcing default hybrid partition count: {} for a new hybrid store: {}", + updatedPartitionCount, + storeName); + }); + } + + /** + * If a store: + * (1) Is being converted to hybrid; + * (2) Is not partial update enabled for now; + * (3) Does not request to change partial update config; + * It means partial update is not enabled, and there is no explict intention to change it. In this case, we will + * check cluster default config based on the replication policy to determine whether to try to enable partial update. + */ + final boolean shouldEnablePartialUpdateBasedOnClusterConfig = (updatedStore.isActiveActiveReplicationEnabled() + ? clusterConfig.isEnablePartialUpdateForHybridActiveActiveUserStores() + : clusterConfig.isEnablePartialUpdateForHybridNonActiveActiveUserStores()); + if (shouldEnablePartialUpdateBasedOnClusterConfig) { + LOGGER.info("Controller will enable partial update based on cluster config for store: " + storeName); + /** + * When trying to turn on partial update based on cluster config, if schema generation failed, we will not fail the + * whole request, but just do NOT turn on partial update, as other config update should still be respected. + */ + try { + PrimaryControllerConfigUpdateUtils.addUpdateSchemaForStore(admin, clusterName, updatedStore.getName(), true); + updateInferredConfig(admin, updatedStore, WRITE_COMPUTATION_ENABLED, updatedConfigs, () -> { + updatedStore.setWriteComputationEnabled(true); + }); + } catch (Exception e) { + LOGGER.warn( + "Caught exception when trying to enable partial update base on cluster config, will not enable partial update for store: " + + storeName, + e); + } + } + } + + /** + * Validate if the specified store is in a valid state or not + * Examples of such checks are: + *
        + *
      • Write compute on batch-only store
      • + *
      • Incremental push with NON_AGGREGATE DRP in multi-region mode
      • + *
      + */ + static void validateStoreConfigs(Admin admin, String clusterName, Store store) { + String storeName = store.getName(); + String errorMessagePrefix = "Store update error for " + storeName + " in cluster: " + clusterName + ": "; + + VeniceControllerClusterConfig controllerConfig = admin.getMultiClusterConfigs().getControllerConfig(clusterName); + + if (!store.isHybrid()) { + // Inc push + non hybrid not supported + if (store.isIncrementalPushEnabled()) { + throw new VeniceHttpException( + HttpStatus.SC_BAD_REQUEST, + errorMessagePrefix + "Incremental push is only supported for hybrid stores", + ErrorType.INVALID_CONFIG); + } + + // WC is only supported for hybrid stores + if (store.isWriteComputationEnabled()) { + throw new VeniceHttpException( + HttpStatus.SC_BAD_REQUEST, + errorMessagePrefix + "Write computation is only supported for hybrid stores", + ErrorType.INVALID_CONFIG); + } + + // AA is only supported for hybrid stores + if (store.isActiveActiveReplicationEnabled()) { + throw new VeniceHttpException( + HttpStatus.SC_BAD_REQUEST, + errorMessagePrefix + "Active-Active Replication is only supported for hybrid stores", + ErrorType.INVALID_CONFIG); + } + } else { + HybridStoreConfig hybridStoreConfig = store.getHybridStoreConfig(); + // All fields of hybrid store config must have valid values + if (hybridStoreConfig.getRewindTimeInSeconds() < 0) { + throw new VeniceHttpException( + HttpStatus.SC_BAD_REQUEST, + errorMessagePrefix + "Rewind time cannot be negative for a hybrid store", + ErrorType.INVALID_CONFIG); + } + + if (hybridStoreConfig.getOffsetLagThresholdToGoOnline() < 0 + && hybridStoreConfig.getProducerTimestampLagThresholdToGoOnlineInSeconds() < 0) { + throw new VeniceHttpException( + HttpStatus.SC_BAD_REQUEST, + errorMessagePrefix + + "Both offset lag threshold and producer timestamp lag threshold cannot be negative for a hybrid store", + ErrorType.INVALID_CONFIG); + } + + DataReplicationPolicy dataReplicationPolicy = hybridStoreConfig.getDataReplicationPolicy(); + // Incremental push + !AA + NON_AGGREGATE DRP is not supported in multi-region mode + if (controllerConfig.isMultiRegion() && store.isIncrementalPushEnabled() + && !store.isActiveActiveReplicationEnabled() + && dataReplicationPolicy == DataReplicationPolicy.NON_AGGREGATE) { + throw new VeniceHttpException( + HttpStatus.SC_BAD_REQUEST, + errorMessagePrefix + + "Incremental push is not supported for non active-active hybrid stores with NON_AGGREGATE data replication policy", + ErrorType.INVALID_CONFIG); + } + + // ACTIVE_ACTIVE DRP is only supported when activeActiveReplicationEnabled = true + if (dataReplicationPolicy == DataReplicationPolicy.ACTIVE_ACTIVE && !store.isActiveActiveReplicationEnabled()) { + throw new VeniceHttpException( + HttpStatus.SC_BAD_REQUEST, + errorMessagePrefix + + "Data replication policy ACTIVE_ACTIVE is only supported for hybrid stores with active-active replication enabled", + ErrorType.INVALID_CONFIG); + } + } + + // Storage quota can not be less than 0 + if (store.getStorageQuotaInByte() < 0 && store.getStorageQuotaInByte() != Store.UNLIMITED_STORAGE_QUOTA) { + throw new VeniceHttpException( + HttpStatus.SC_BAD_REQUEST, + "Storage quota can not be less than 0", + ErrorType.INVALID_CONFIG); + } + + // Read quota can not be less than 0 + if (store.getReadQuotaInCU() < 0) { + throw new VeniceHttpException( + HttpStatus.SC_BAD_REQUEST, + "Read quota can not be less than 0", + ErrorType.INVALID_CONFIG); + } + + if (!admin.isParent()) { + HelixVeniceClusterResources resources = admin.getHelixVeniceClusterResources(clusterName); + ZkRoutersClusterManager routersClusterManager = resources.getRoutersClusterManager(); + int routerCount = routersClusterManager.getLiveRoutersCount(); + int defaultReadQuotaPerRouter = controllerConfig.getDefaultReadQuotaPerRouter(); + + long clusterReadQuota = Math.max(defaultReadQuotaPerRouter, routerCount * defaultReadQuotaPerRouter); + if (store.getReadQuotaInCU() > clusterReadQuota) { + throw new VeniceHttpException( + HttpStatus.SC_BAD_REQUEST, + "Read quota can not be more than the cluster quota (" + clusterReadQuota + ")", + ErrorType.INVALID_CONFIG); + } + } + + // Active-active replication is only supported for stores that also have native replication + if (store.isActiveActiveReplicationEnabled() && !store.isNativeReplicationEnabled()) { + throw new VeniceHttpException( + HttpStatus.SC_BAD_REQUEST, + "Active/Active Replication cannot be enabled for store " + store.getName() + + " since Native Replication is not enabled on it.", + ErrorType.INVALID_CONFIG); + } + + PartitionerConfig partitionerConfig = store.getPartitionerConfig(); + if (partitionerConfig == null) { + throw new VeniceHttpException( + HttpStatus.SC_BAD_REQUEST, + errorMessagePrefix + "Partitioner Config cannot be null", + ErrorType.INVALID_CONFIG); + } + + // Active-Active and write-compute are not supported when amplification factor is more than 1 + if (partitionerConfig.getAmplificationFactor() > 1) { + if (store.isActiveActiveReplicationEnabled()) { + throw new VeniceHttpException( + HttpStatus.SC_BAD_REQUEST, + errorMessagePrefix + "Active-active replication is not supported for stores with amplification factor > 1", + ErrorType.INVALID_CONFIG); + } + + if (store.isWriteComputationEnabled()) { + throw new VeniceHttpException( + HttpStatus.SC_BAD_REQUEST, + errorMessagePrefix + "Write computation is not supported for stores with amplification factor > 1", + ErrorType.INVALID_CONFIG); + } + } + + // Before setting partitioner config, verify the updated partitionerConfig can be built + try { + Properties partitionerParams = new Properties(); + for (Map.Entry param: partitionerConfig.getPartitionerParams().entrySet()) { + partitionerParams.setProperty(param.getKey(), param.getValue()); + } + + PartitionUtils.getVenicePartitioner( + partitionerConfig.getPartitionerClass(), + new VeniceProperties(partitionerParams), + admin.getKeySchema(clusterName, storeName).getSchema()); + } catch (PartitionerSchemaMismatchException e) { + String errorMessage = errorMessagePrefix + e.getMessage(); + LOGGER.error(errorMessage); + throw new VeniceHttpException(HttpStatus.SC_BAD_REQUEST, errorMessage, ErrorType.INVALID_SCHEMA); + } catch (Exception e) { + String errorMessage = errorMessagePrefix + "Partitioner Configs are invalid, please verify that partitioner " + + "configs like classpath and parameters are correct!"; + LOGGER.error(errorMessage); + throw new VeniceHttpException(HttpStatus.SC_BAD_REQUEST, errorMessage, ErrorType.INVALID_CONFIG); + } + + // Validate if the latest superset schema id is an existing value schema + int latestSupersetSchemaId = store.getLatestSuperSetValueSchemaId(); + if (latestSupersetSchemaId != SchemaData.INVALID_VALUE_SCHEMA_ID) { + if (admin.getValueSchema(clusterName, storeName, latestSupersetSchemaId) == null) { + throw new VeniceHttpException( + HttpStatus.SC_BAD_REQUEST, + "Unknown value schema id: " + latestSupersetSchemaId + " in store: " + storeName, + ErrorType.INVALID_CONFIG); + } + } + + if (store.getMaxCompactionLagSeconds() < store.getMinCompactionLagSeconds()) { + throw new VeniceHttpException( + HttpStatus.SC_BAD_REQUEST, + "Store's max compaction lag seconds: " + store.getMaxCompactionLagSeconds() + " shouldn't be smaller than " + + "store's min compaction lag seconds: " + store.getMinCompactionLagSeconds(), + ErrorType.INVALID_CONFIG); + } + + ETLStoreConfig etlStoreConfig = store.getEtlStoreConfig(); + if (etlStoreConfig != null + && (etlStoreConfig.isRegularVersionETLEnabled() || etlStoreConfig.isFutureVersionETLEnabled())) { + if (StringUtils.isEmpty(etlStoreConfig.getEtledUserProxyAccount())) { + throw new VeniceHttpException( + HttpStatus.SC_BAD_REQUEST, + "Cannot enable ETL for this store because etled user proxy account is not set", + ErrorType.INVALID_CONFIG); + } + } + } + + /** + * Validate the config changes by looking at the store configs before and after applying the requested updates. + * Examples of such checks are: + *
        + *
      • Partition count
      • + *
      • Store partitioner
      • + *
      • If partial update gets enabled, then the schema must be one that can support it
      • + *
      + */ + private static void validateStoreUpdate( + Admin admin, + VeniceControllerMultiClusterConfig multiClusterConfig, + String clusterName, + Store originalStore, + Store updatedStore) { + validateStorePartitionCountUpdate(admin, multiClusterConfig, clusterName, originalStore, updatedStore); + validateStorePartitionerUpdate(clusterName, originalStore, updatedStore); + + if (updatedStore.isWriteComputationEnabled() && !originalStore.isWriteComputationEnabled()) { + // Dry-run generating update schemas before sending admin messages to enable partial update because + // update schema generation may fail due to some reasons. If that happens, abort the store update process. + PrimaryControllerConfigUpdateUtils.addUpdateSchemaForStore(admin, clusterName, originalStore.getName(), true); + } + } + + private static Map validateAndDecorateStoreViewConfigs(Map stringMap, Store store) { + Map configs = StoreViewUtils.convertStringMapViewToViewConfigMap(stringMap); + Map validatedConfigs = new HashMap<>(); + for (Map.Entry viewConfigEntry: configs.entrySet()) { + ViewConfig validatedViewConfig = + validateAndDecorateStoreViewConfig(store, viewConfigEntry.getValue(), viewConfigEntry.getKey()); + validatedConfigs.put(viewConfigEntry.getKey(), validatedViewConfig); + } + return validatedConfigs; + } + + private static ViewConfig validateAndDecorateStoreViewConfig(Store store, ViewConfig viewConfig, String viewName) { + // TODO: Pass a proper properties object here. Today this isn't used in this context + if (viewConfig.getViewClassName().equals(MaterializedView.class.getCanonicalName())) { + if (viewName.contains(VERSION_SEPARATOR)) { + throw new VeniceException( + String.format("Materialized View name cannot contain version separator: %s", VERSION_SEPARATOR)); + } + Map viewParams = viewConfig.getViewParameters(); + viewParams.put(ViewParameterKeys.MATERIALIZED_VIEW_NAME.name(), viewName); + if (!viewParams.containsKey(ViewParameterKeys.MATERIALIZED_VIEW_PARTITIONER.name())) { + viewParams.put( + ViewParameterKeys.MATERIALIZED_VIEW_PARTITIONER.name(), + store.getPartitionerConfig().getPartitionerClass()); + if (!store.getPartitionerConfig().getPartitionerParams().isEmpty()) { + try { + viewParams.put( + ViewParameterKeys.MATERIALIZED_VIEW_PARTITIONER_PARAMS.name(), + ObjectMapperFactory.getInstance() + .writeValueAsString(store.getPartitionerConfig().getPartitionerParams())); + } catch (JsonProcessingException e) { + throw new VeniceException("Failed to convert store partitioner params to string", e); + } + } + } + if (!viewParams.containsKey(ViewParameterKeys.MATERIALIZED_VIEW_PARTITION_COUNT.name())) { + viewParams.put( + ViewParameterKeys.MATERIALIZED_VIEW_PARTITION_COUNT.name(), + Integer.toString(store.getPartitionCount())); + } + viewConfig.setViewParameters(viewParams); + } + VeniceView view = + ViewUtils.getVeniceView(viewConfig.getViewClassName(), new Properties(), store, viewConfig.getViewParameters()); + view.validateConfigs(); + return viewConfig; + } + + /** + * Used by both the {@link VeniceHelixAdmin} and the {@link VeniceParentHelixAdmin} + * + * @param oldStore Existing Store that is the source for updates. This object will not be modified by this method. + * @param hybridRewindSeconds Optional is present if the returned object should include a new rewind time + * @param hybridOffsetLagThreshold Optional is present if the returned object should include a new offset lag threshold + * @param hybridTimeLagThreshold + * @param hybridDataReplicationPolicy + * @param bufferReplayPolicy + * @return null if oldStore has no hybrid configs and optionals are not present, + * otherwise a fully specified {@link HybridStoreConfig} + */ + static HybridStoreConfig mergeNewSettingsIntoOldHybridStoreConfig( + Store oldStore, + Optional hybridRewindSeconds, + Optional hybridOffsetLagThreshold, + Optional hybridTimeLagThreshold, + Optional hybridDataReplicationPolicy, + Optional bufferReplayPolicy) { + HybridStoreConfig mergedHybridStoreConfig; + if (oldStore.isHybrid()) { // for an existing hybrid store, just replace any specified values + HybridStoreConfig oldHybridConfig = oldStore.getHybridStoreConfig().clone(); + mergedHybridStoreConfig = new HybridStoreConfigImpl( + hybridRewindSeconds.orElseGet(oldHybridConfig::getRewindTimeInSeconds), + hybridOffsetLagThreshold.orElseGet(oldHybridConfig::getOffsetLagThresholdToGoOnline), + hybridTimeLagThreshold.orElseGet(oldHybridConfig::getProducerTimestampLagThresholdToGoOnlineInSeconds), + hybridDataReplicationPolicy.orElseGet(oldHybridConfig::getDataReplicationPolicy), + bufferReplayPolicy.orElseGet(oldHybridConfig::getBufferReplayPolicy)); + } else { + mergedHybridStoreConfig = new HybridStoreConfigImpl( + hybridRewindSeconds.orElse(-1L), + // If not specified, offset/time lag threshold will be -1 and will not be used to determine whether + // a partition is ready to serve + hybridOffsetLagThreshold.orElse(-1L), + hybridTimeLagThreshold.orElse(-1L), + hybridDataReplicationPolicy.orElse(DataReplicationPolicy.NON_AGGREGATE), + bufferReplayPolicy.orElse(BufferReplayPolicy.REWIND_FROM_EOP)); + } + + if (!AdminUtils.isHybrid(mergedHybridStoreConfig)) { + return null; + } + + return mergedHybridStoreConfig; + } + + public static void validateStorePartitionCountUpdate( + Admin admin, + VeniceControllerMultiClusterConfig multiClusterConfigs, + String clusterName, + Store originalStore, + int newPartitionCount) { + Store updatedStore = originalStore.cloneStore(); + updatedStore.setPartitionCount(newPartitionCount); + validateStorePartitionCountUpdate(admin, multiClusterConfigs, clusterName, originalStore, updatedStore); + } + + static void validateStorePartitionCountUpdate( + Admin admin, + VeniceControllerMultiClusterConfig multiClusterConfigs, + String clusterName, + Store originalStore, + Store updatedStore) { + String storeName = originalStore.getName(); + String errorMessagePrefix = "Store update error for " + storeName + " in cluster: " + clusterName + ": "; + VeniceControllerClusterConfig clusterConfig = admin.getHelixVeniceClusterResources(clusterName).getConfig(); + + int newPartitionCount = updatedStore.getPartitionCount(); + if (newPartitionCount < 0) { + String errorMessage = errorMessagePrefix + "Partition count: " + newPartitionCount + " should NOT be negative"; + LOGGER.error(errorMessage); + throw new VeniceHttpException(HttpStatus.SC_BAD_REQUEST, errorMessage, ErrorType.INVALID_CONFIG); + } + + if (updatedStore.isHybrid() && newPartitionCount == 0) { + String errorMessage = errorMessagePrefix + "Partition count cannot be 0 for hybrid store"; + LOGGER.error(errorMessage); + throw new VeniceHttpException(HttpStatus.SC_BAD_REQUEST, errorMessage, ErrorType.INVALID_CONFIG); + } + + if (originalStore.isHybrid() && updatedStore.isHybrid() && originalStore.getPartitionCount() != newPartitionCount) { + String errorMessage = errorMessagePrefix + "Cannot change partition count for this hybrid store"; + LOGGER.error(errorMessage); + throw new VeniceHttpException(HttpStatus.SC_BAD_REQUEST, errorMessage, ErrorType.INVALID_CONFIG); + } + + int minPartitionNum = clusterConfig.getMinNumberOfPartitions(); + if (newPartitionCount < minPartitionNum && newPartitionCount != 0) { + throw new VeniceHttpException( + HttpStatus.SC_BAD_REQUEST, + "Partition count must be at least " + minPartitionNum + " for store: " + storeName + + ". If a specific partition count is not required, set it to 0.", + ErrorType.INVALID_CONFIG); + } + + int maxPartitionNum = clusterConfig.getMaxNumberOfPartitions(); + if (newPartitionCount > maxPartitionNum) { + String errorMessage = + errorMessagePrefix + "Partition count: " + newPartitionCount + " should be less than max: " + maxPartitionNum; + LOGGER.error(errorMessage); + throw new VeniceHttpException(HttpStatus.SC_BAD_REQUEST, errorMessage, ErrorType.INVALID_CONFIG); + } + + if (updatedStore.isHybrid()) { + // Allow the update if the new partition count matches RT partition count + TopicManager topicManager; + if (admin.isParent()) { + // RT might not exist in parent colo. Get RT partition count from a child colo. + String childDatacenter = clusterConfig.getChildDatacenters().iterator().next(); + topicManager = admin.getTopicManager(multiClusterConfigs.getChildDataCenterKafkaUrlMap().get(childDatacenter)); + } else { + topicManager = admin.getTopicManager(); + } + PubSubTopic realTimeTopic = admin.getPubSubTopicRepository().getTopic(Version.composeRealTimeTopic(storeName)); + if (!topicManager.containsTopic(realTimeTopic) + || topicManager.getPartitionCount(realTimeTopic) == newPartitionCount) { + LOGGER.info("Allow updating store " + storeName + " partition count to " + newPartitionCount); + return; + } + String errorMessage = errorMessagePrefix + "Cannot change partition count for this hybrid store"; + LOGGER.error(errorMessage); + throw new VeniceHttpException(HttpStatus.SC_BAD_REQUEST, errorMessage, ErrorType.INVALID_CONFIG); + } + } + + static void validateStorePartitionerUpdate(String clusterName, Store existingStore, Store updatedStore) { + String storeName = existingStore.getName(); + String errorMessagePrefix = "Store update error for " + storeName + " in cluster: " + clusterName + ": "; + + if (!existingStore.isHybrid() || !updatedStore.isHybrid()) { + // Allow partitioner changes for non-hybrid stores + return; + } + + PartitionerConfig existingPartitionerConfig = existingStore.getPartitionerConfig(); + PartitionerConfig updatedPartitionerConfig = updatedStore.getPartitionerConfig(); + + if (!existingPartitionerConfig.getPartitionerClass().equals(updatedPartitionerConfig.getPartitionerClass())) { + String errorMessage = errorMessagePrefix + "Partitioner class cannot be changed for hybrid store"; + LOGGER.error(errorMessage); + throw new VeniceHttpException(HttpStatus.SC_BAD_REQUEST, errorMessage, ErrorType.INVALID_CONFIG); + } + + if (!existingPartitionerConfig.getPartitionerParams().equals(updatedPartitionerConfig.getPartitionerParams())) { + String errorMessage = errorMessagePrefix + "Partitioner params cannot be changed for hybrid store"; + LOGGER.error(errorMessage); + throw new VeniceHttpException(HttpStatus.SC_BAD_REQUEST, errorMessage, ErrorType.INVALID_CONFIG); + } + } + + static void validatePersona(Admin admin, String clusterName, Store updatedStore, Optional personaName) { + String storeName = updatedStore.getName(); + StoragePersonaRepository repository = + admin.getHelixVeniceClusterResources(clusterName).getStoragePersonaRepository(); + StoragePersona personaToValidate = null; + StoragePersona existingPersona = repository.getPersonaContainingStore(storeName); + + if (personaName.isPresent()) { + personaToValidate = admin.getStoragePersona(clusterName, personaName.get()); + if (personaToValidate == null) { + String errMsg = "UpdateStore command failed for store " + storeName + ". The provided StoragePersona " + + personaName.get() + " does not exist."; + throw new VeniceException(errMsg); + } + } else if (existingPersona != null) { + personaToValidate = existingPersona; + } + + if (personaToValidate != null) { + repository.validateAddUpdatedStore(personaToValidate, Optional.of(updatedStore)); + } + } + + static PartitionerConfig mergeNewSettingsIntoOldPartitionerConfig( + Store oldStore, + Optional partitionerClass, + Optional> partitionerParams, + Optional amplificationFactor) { + + if (!partitionerClass.isPresent() && !partitionerParams.isPresent() && !amplificationFactor.isPresent()) { + return oldStore.getPartitionerConfig(); + } + + PartitionerConfig originalPartitionerConfig; + if (oldStore.getPartitionerConfig() == null) { + originalPartitionerConfig = new PartitionerConfigImpl(); + } else { + originalPartitionerConfig = oldStore.getPartitionerConfig(); + } + return new PartitionerConfigImpl( + partitionerClass.orElse(originalPartitionerConfig.getPartitionerClass()), + partitionerParams.orElse(originalPartitionerConfig.getPartitionerParams()), + amplificationFactor.orElse(originalPartitionerConfig.getAmplificationFactor())); + } + + static Map addNewViewConfigsIntoOldConfigs( + Store oldStore, + String viewClass, + ViewConfig viewConfig) throws VeniceException { + // Add new view config into the existing config map. The new configs will override existing ones which share the + // same key. + Map oldViewConfigMap = oldStore.getViewConfigs(); + if (oldViewConfigMap == null) { + oldViewConfigMap = new HashMap<>(); + } + Map mergedConfigs = new HashMap<>(oldViewConfigMap); + mergedConfigs.put(viewClass, viewConfig); + return mergedConfigs; + } + + static Map removeViewConfigFromStoreViewConfigMap(Store oldStore, String viewClass) + throws VeniceException { + Map oldViewConfigMap = oldStore.getViewConfigs(); + if (oldViewConfigMap == null) { + // TODO: We might want to return a null instead of empty map + oldViewConfigMap = new HashMap<>(); + } + Map mergedConfigs = new HashMap<>(oldViewConfigMap); + mergedConfigs.remove(viewClass); + return mergedConfigs; + } + + /** + * This function is the entry-point of all operations that are necessary after the successful execution of the store + * update. These should only be executed in the primary controller. + * @param admin The main {@link Admin} object for this component + * @param clusterName The name of the cluster where the store is being updated + * @param storeName The name of the store that was updated + */ + public static void handlePostUpdateActions(Admin admin, String clusterName, String storeName) { + PrimaryControllerConfigUpdateUtils.registerInferredSchemas(admin, clusterName, storeName); + } + + /** + * Check if direct store config updates are allowed in this controller. In multi-region mode, parent controller + * decides what store configs get applied to a store. In a single-region mode, the child controller makes this + * decision. + * In a multi-region mode, the child controller must not do any inferencing and must only apply the configs that were + * applied by the parent controller, except for child-region-only stores - i.e. participant store. + */ + static boolean isInferredStoreUpdateAllowed(Admin admin, String storeName) { + // For system stores, do not allow any inferencing + if (VeniceSystemStoreUtils.isSystemStore(storeName)) { + return false; + } + + if (!admin.isPrimary()) { + return false; + } + + // Parent controller can only apply the updates if it is processing updates in VeniceParentHelixAdmin (i.e. not via + // the Admin channel) + return !admin.isParent() || admin instanceof VeniceParentHelixAdmin; + } +} diff --git a/services/venice-controller/src/main/java/com/linkedin/venice/controller/util/UpdateStoreWrapper.java b/services/venice-controller/src/main/java/com/linkedin/venice/controller/util/UpdateStoreWrapper.java new file mode 100644 index 0000000000..486b17a295 --- /dev/null +++ b/services/venice-controller/src/main/java/com/linkedin/venice/controller/util/UpdateStoreWrapper.java @@ -0,0 +1,18 @@ +package com.linkedin.venice.controller.util; + +import com.linkedin.venice.meta.Store; +import java.util.HashSet; +import java.util.Set; + + +public class UpdateStoreWrapper { + public final Set updatedConfigs; + public final Store originalStore; + public final Store updatedStore; + + public UpdateStoreWrapper(Store originalStore) { + this.originalStore = originalStore; + this.updatedConfigs = new HashSet<>(); + this.updatedStore = originalStore.cloneStore(); + } +} diff --git a/services/venice-controller/src/test/java/com/linkedin/venice/controller/AbstractTestVeniceParentHelixAdmin.java b/services/venice-controller/src/test/java/com/linkedin/venice/controller/AbstractTestVeniceParentHelixAdmin.java index 47c5c26577..f033253760 100644 --- a/services/venice-controller/src/test/java/com/linkedin/venice/controller/AbstractTestVeniceParentHelixAdmin.java +++ b/services/venice-controller/src/test/java/com/linkedin/venice/controller/AbstractTestVeniceParentHelixAdmin.java @@ -4,7 +4,6 @@ import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; import com.linkedin.venice.authorization.AuthorizerService; import com.linkedin.venice.authorization.DefaultIdentityParser; @@ -23,7 +22,6 @@ import com.linkedin.venice.helix.StoragePersonaRepository; import com.linkedin.venice.helix.ZkRoutersClusterManager; import com.linkedin.venice.helix.ZkStoreConfigAccessor; -import com.linkedin.venice.meta.HybridStoreConfig; import com.linkedin.venice.meta.OfflinePushStrategy; import com.linkedin.venice.meta.Store; import com.linkedin.venice.meta.StoreInfo; @@ -88,7 +86,6 @@ public void setupInternalMocks() { doReturn(true).when(topicManager).containsTopicAndAllPartitionsAreOnline(pubSubTopicRepository.getTopic(topicName)); internalAdmin = mock(VeniceHelixAdmin.class); - when(internalAdmin.isHybrid((HybridStoreConfig) any())).thenCallRealMethod(); doReturn(topicManager).when(internalAdmin).getTopicManager(); SchemaEntry mockEntry = new SchemaEntry(0, TEST_SCHEMA); doReturn(mockEntry).when(internalAdmin).getKeySchema(anyString(), anyString()); @@ -126,6 +123,8 @@ public void setupInternalMocks() { .put(regionName, ControllerClient.constructClusterControllerClient(clusterName, "localhost", Optional.empty())); doReturn(controllerClients).when(internalAdmin).getControllerClientMap(any()); + doReturn(true).when(internalAdmin).isPrimary(); + resources = mockResources(config, clusterName); doReturn(storeRepository).when(resources).getStoreMetadataRepository(); ZkRoutersClusterManager manager = mock(ZkRoutersClusterManager.class); @@ -139,6 +138,8 @@ public void setupInternalMocks() { clusterLockManager = mock(ClusterLockManager.class); doReturn(clusterLockManager).when(resources).getClusterLockManager(); + doReturn(1000).when(config).getDefaultReadQuotaPerRouter(); + adminStats = mock(VeniceAdminStats.class); doReturn(adminStats).when(resources).getVeniceAdminStats(); @@ -203,6 +204,10 @@ VeniceControllerClusterConfig mockConfig(String clusterName) { doReturn(childClusterMap).when(config).getChildDataCenterControllerUrlMap(); doReturn(MAX_PARTITION_NUM).when(config).getMaxNumberOfPartitions(); doReturn(DefaultIdentityParser.class.getName()).when(config).getIdentityParserClassName(); + doReturn(true).when(config).isMultiRegion(); + doReturn(10L).when(config).getPartitionSize(); + doReturn("dc-batch-nr").when(config).getNativeReplicationSourceFabricAsDefaultForBatchOnly(); + doReturn("dc-hybrid-nr").when(config).getNativeReplicationSourceFabricAsDefaultForHybrid(); return config; } diff --git a/services/venice-controller/src/test/java/com/linkedin/venice/controller/TestVeniceHelixAdminWithoutCluster.java b/services/venice-controller/src/test/java/com/linkedin/venice/controller/TestVeniceHelixAdminWithoutCluster.java index 9275758a4c..8938e2ac57 100644 --- a/services/venice-controller/src/test/java/com/linkedin/venice/controller/TestVeniceHelixAdminWithoutCluster.java +++ b/services/venice-controller/src/test/java/com/linkedin/venice/controller/TestVeniceHelixAdminWithoutCluster.java @@ -10,10 +10,6 @@ import com.linkedin.venice.common.VeniceSystemStoreType; import com.linkedin.venice.exceptions.VeniceException; import com.linkedin.venice.helix.ZkStoreConfigAccessor; -import com.linkedin.venice.meta.BufferReplayPolicy; -import com.linkedin.venice.meta.DataReplicationPolicy; -import com.linkedin.venice.meta.HybridStoreConfig; -import com.linkedin.venice.meta.HybridStoreConfigImpl; import com.linkedin.venice.meta.ReadWriteStoreRepository; import com.linkedin.venice.meta.Store; import com.linkedin.venice.meta.StoreConfig; @@ -37,58 +33,6 @@ public class TestVeniceHelixAdminWithoutCluster { private final PubSubTopicRepository pubSubTopicRepository = new PubSubTopicRepository(); - @Test - public void canMergeNewHybridConfigValuesToOldStore() { - String storeName = Utils.getUniqueString("storeName"); - Store store = TestUtils.createTestStore(storeName, "owner", System.currentTimeMillis()); - Assert.assertFalse(store.isHybrid()); - - Optional rewind = Optional.of(123L); - Optional lagOffset = Optional.of(1500L); - Optional timeLag = Optional.of(300L); - Optional dataReplicationPolicy = Optional.of(DataReplicationPolicy.AGGREGATE); - Optional bufferReplayPolicy = Optional.of(BufferReplayPolicy.REWIND_FROM_EOP); - HybridStoreConfig hybridStoreConfig = VeniceHelixAdmin.mergeNewSettingsIntoOldHybridStoreConfig( - store, - Optional.empty(), - Optional.empty(), - Optional.empty(), - Optional.empty(), - Optional.empty()); - Assert.assertNull( - hybridStoreConfig, - "passing empty optionals and a non-hybrid store should generate a null hybrid config"); - - hybridStoreConfig = VeniceHelixAdmin.mergeNewSettingsIntoOldHybridStoreConfig( - store, - rewind, - lagOffset, - timeLag, - dataReplicationPolicy, - bufferReplayPolicy); - Assert.assertNotNull(hybridStoreConfig, "specifying rewind and lagOffset should generate a valid hybrid config"); - Assert.assertEquals(hybridStoreConfig.getRewindTimeInSeconds(), 123L); - Assert.assertEquals(hybridStoreConfig.getOffsetLagThresholdToGoOnline(), 1500L); - Assert.assertEquals(hybridStoreConfig.getProducerTimestampLagThresholdToGoOnlineInSeconds(), 300L); - Assert.assertEquals(hybridStoreConfig.getDataReplicationPolicy(), DataReplicationPolicy.AGGREGATE); - - // It's okay that time lag threshold or data replication policy is not specified - hybridStoreConfig = VeniceHelixAdmin.mergeNewSettingsIntoOldHybridStoreConfig( - store, - rewind, - lagOffset, - Optional.empty(), - Optional.empty(), - Optional.empty()); - Assert.assertNotNull(hybridStoreConfig, "specifying rewind and lagOffset should generate a valid hybrid config"); - Assert.assertEquals(hybridStoreConfig.getRewindTimeInSeconds(), 123L); - Assert.assertEquals(hybridStoreConfig.getOffsetLagThresholdToGoOnline(), 1500L); - Assert.assertEquals( - hybridStoreConfig.getProducerTimestampLagThresholdToGoOnlineInSeconds(), - HybridStoreConfigImpl.DEFAULT_HYBRID_TIME_LAG_THRESHOLD); - Assert.assertEquals(hybridStoreConfig.getDataReplicationPolicy(), DataReplicationPolicy.NON_AGGREGATE); - } - @Test(expectedExceptions = VeniceException.class, expectedExceptionsMessageRegExp = ".*still exists in cluster.*") public void testCheckResourceCleanupBeforeStoreCreationWhenExistsInOtherCluster() { String clusterName = "cluster1"; diff --git a/services/venice-controller/src/test/java/com/linkedin/venice/controller/TestVeniceParentHelixAdmin.java b/services/venice-controller/src/test/java/com/linkedin/venice/controller/TestVeniceParentHelixAdmin.java index b8a8a2fc90..f49bc8118c 100644 --- a/services/venice-controller/src/test/java/com/linkedin/venice/controller/TestVeniceParentHelixAdmin.java +++ b/services/venice-controller/src/test/java/com/linkedin/venice/controller/TestVeniceParentHelixAdmin.java @@ -2,8 +2,8 @@ import static com.linkedin.venice.controller.VeniceHelixAdmin.VERSION_ID_UNSET; import static com.linkedin.venice.meta.BufferReplayPolicy.REWIND_FROM_SOP; -import static com.linkedin.venice.meta.HybridStoreConfigImpl.DEFAULT_HYBRID_TIME_LAG_THRESHOLD; import static com.linkedin.venice.meta.Version.VERSION_SEPARATOR; +import static com.linkedin.venice.utils.TestWriteUtils.loadFileAsString; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyBoolean; import static org.mockito.Mockito.anyInt; @@ -21,8 +21,10 @@ import static org.mockito.Mockito.when; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertThrows; +import static org.testng.Assert.assertTrue; +import static org.testng.Assert.expectThrows; -import com.linkedin.venice.common.VeniceSystemStoreUtils; +import com.linkedin.avroutil1.compatibility.AvroCompatibilityHelper; import com.linkedin.venice.compression.CompressionStrategy; import com.linkedin.venice.controller.kafka.AdminTopicUtils; import com.linkedin.venice.controller.kafka.consumer.AdminConsumptionTask; @@ -41,6 +43,7 @@ import com.linkedin.venice.controller.kafka.protocol.serializer.AdminOperationSerializer; import com.linkedin.venice.controller.lingeringjob.LingeringStoreVersionChecker; import com.linkedin.venice.controller.stats.VeniceAdminStats; +import com.linkedin.venice.controller.util.AdminUtils; import com.linkedin.venice.controllerapi.ControllerClient; import com.linkedin.venice.controllerapi.ControllerResponse; import com.linkedin.venice.controllerapi.JobStatusQueryResponse; @@ -82,6 +85,7 @@ import com.linkedin.venice.pushmonitor.PartitionStatus; import com.linkedin.venice.pushmonitor.StatusSnapshot; import com.linkedin.venice.schema.GeneratedSchemaID; +import com.linkedin.venice.schema.SchemaEntry; import com.linkedin.venice.schema.avro.DirectionalSchemaCompatibilityType; import com.linkedin.venice.serialization.avro.AvroProtocolDefinition; import com.linkedin.venice.utils.DataProviderUtils; @@ -91,7 +95,6 @@ import com.linkedin.venice.utils.TestUtils; import com.linkedin.venice.utils.Time; import com.linkedin.venice.utils.Utils; -import com.linkedin.venice.utils.concurrent.VeniceConcurrentHashMap; import com.linkedin.venice.utils.locks.ClusterLockManager; import com.linkedin.venice.views.ChangeCaptureView; import com.linkedin.venice.views.MaterializedView; @@ -109,6 +112,7 @@ import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; +import org.apache.avro.Schema; import org.apache.http.HttpStatus; import org.mockito.ArgumentCaptor; import org.testng.Assert; @@ -160,104 +164,6 @@ public void testStartWhenTopicNotExists() { Optional.empty()); } - /** - * Partially stubbed class to verify async setup behavior. - */ - private static class AsyncSetupMockVeniceParentHelixAdmin extends VeniceParentHelixAdmin { - private Map systemStores = new VeniceConcurrentHashMap<>(); - - public AsyncSetupMockVeniceParentHelixAdmin( - VeniceHelixAdmin veniceHelixAdmin, - VeniceControllerClusterConfig config) { - super(veniceHelixAdmin, TestUtils.getMultiClusterConfigFromOneCluster(config)); - } - - public boolean isAsyncSetupRunning(String clusterName) { - return asyncSetupEnabledMap.get(clusterName); - } - - @Override - public void createStore( - String clusterName, - String storeName, - String owner, - String keySchema, - String valueSchema, - boolean isSystemStore) { - if (!(VeniceSystemStoreUtils.isSystemStore(storeName) && isSystemStore)) { - throw new VeniceException("Invalid store name and isSystemStore combination. Got store name: " + storeName); - } - if (systemStores.containsKey(storeName)) { - // no op - return; - } - Store newStore = new ZKStore( - storeName, - owner, - System.currentTimeMillis(), - PersistenceType.IN_MEMORY, - RoutingStrategy.HASH, - ReadStrategy.ANY_OF_ONLINE, - OfflinePushStrategy.WAIT_N_MINUS_ONE_REPLCIA_PER_PARTITION, - 1); - systemStores.put(storeName, newStore); - } - - @Override - public Store getStore(String clusterName, String storeName) { - if (!systemStores.containsKey(storeName)) { - return null; - } - return systemStores.get(storeName).cloneStore(); - } - - @Override - public void updateStore(String clusterName, String storeName, UpdateStoreQueryParams params) { - Optional hybridRewindSeconds = params.getHybridRewindSeconds(); - Optional hybridOffsetLagThreshold = params.getHybridOffsetLagThreshold(); - Optional hybridTimeLagThreshold = params.getHybridTimeLagThreshold(); - Optional hybridDataReplicationPolicy = params.getHybridDataReplicationPolicy(); - Optional hybridBufferReplayPolicy = params.getHybridBufferReplayPolicy(); - - if (!systemStores.containsKey(storeName)) { - throw new VeniceNoStoreException("Cannot update store " + storeName + " because it's missing."); - } - if (hybridRewindSeconds.isPresent() && hybridOffsetLagThreshold.isPresent()) { - final long finalHybridTimeLagThreshold = hybridTimeLagThreshold.orElse(DEFAULT_HYBRID_TIME_LAG_THRESHOLD); - final DataReplicationPolicy finalHybridDataReplicationPolicy = - hybridDataReplicationPolicy.orElse(DataReplicationPolicy.NON_AGGREGATE); - final BufferReplayPolicy finalHybridBufferReplayPolicy = - hybridBufferReplayPolicy.orElse(BufferReplayPolicy.REWIND_FROM_EOP); - systemStores.get(storeName) - .setHybridStoreConfig( - new HybridStoreConfigImpl( - hybridRewindSeconds.get(), - hybridOffsetLagThreshold.get(), - finalHybridTimeLagThreshold, - finalHybridDataReplicationPolicy, - finalHybridBufferReplayPolicy)); - } - } - - @Override - public Version incrementVersionIdempotent( - String clusterName, - String storeName, - String pushJobId, - int numberOfPartition, - int replicationFactor) { - if (!systemStores.containsKey(storeName)) { - throw new VeniceNoStoreException("Cannot add version to store " + storeName + " because it's missing."); - } - Version version = new VersionImpl(storeName, 1, "test-id"); - version.setReplicationFactor(replicationFactor); - List versions = new ArrayList<>(); - versions.add(version); - systemStores.get(storeName).setVersions(versions); - return version; - } - } - @Test public void testAddStore() { doReturn(CompletableFuture.completedFuture(new SimplePubSubProduceResultImpl(topicName, partitionId, 1, -1))) @@ -1053,7 +959,7 @@ public void testIdempotentIncrementVersionWhenPreviousTopicsExistAndOfflineJobIs try { partialMockParentAdmin.incrementVersionIdempotent(clusterName, storeName, pushJobId, 1, 1); } catch (VeniceException e) { - Assert.assertTrue( + assertTrue( e.getMessage().contains(pushJobId), "Exception for topic exists when increment version should contain requested pushId"); } @@ -1475,11 +1381,11 @@ public void testStoreVersionCleanUpWithMoreVersions() { } // child region current versions 4,5,6 are persisted for (int i = 4; i <= 6; ++i) { - Assert.assertTrue(capturedStore.containsVersion(i)); + assertTrue(capturedStore.containsVersion(i)); } // last two probably failed pushes are persisted. for (int i = 9; i <= 10; ++i) { - Assert.assertTrue(capturedStore.containsVersion(i)); + assertTrue(capturedStore.containsVersion(i)); } } @@ -1571,7 +1477,7 @@ public void testGetExecutionStatus() { for (ExecutionStatus status: ExecutionStatus.values()) { assertEquals(clientMap.get(status).queryJobStatus("topic", Optional.empty()).getStatus(), status.toString()); } - Assert.assertTrue(clientMap.get(null).queryJobStatus("topic", Optional.empty()).isError()); + assertTrue(clientMap.get(null).queryJobStatus("topic", Optional.empty()).isError()); Map completeMap = new HashMap<>(); completeMap.put("cluster", clientMap.get(ExecutionStatus.COMPLETED)); @@ -1692,7 +1598,7 @@ public void testGetExecutionStatus() { assertEquals(extraInfo.get("fabric2"), ExecutionStatus.COMPLETED.toString()); assertEquals(extraInfo.get("failFabric"), ExecutionStatus.UNKNOWN.toString()); assertEquals(extraInfo.get("completelyFailingFabric"), ExecutionStatus.UNKNOWN.toString()); - Assert.assertTrue( + assertTrue( offlineJobStatus.getExtraDetails().get("completelyFailingFabric").contains(completelyFailingExceptionMessage)); Map errorMap = new HashMap<>(); @@ -1770,8 +1676,8 @@ public void testUpdateStore() { assertEquals(adminMessage.operationType, AdminMessageType.UPDATE_STORE.getValue()); UpdateStore updateStore = (UpdateStore) adminMessage.payloadUnion; - assertEquals(updateStore.incrementalPushEnabled, true); - Assert.assertTrue(updateStore.blobTransferEnabled); + assertTrue(updateStore.incrementalPushEnabled); + assertTrue(updateStore.blobTransferEnabled); long readQuota = 100L; boolean readability = true; @@ -1849,7 +1755,7 @@ public void testUpdateStore() { schemaId = schemaCaptor.getValue(); adminMessage = adminOperationSerializer.deserialize(ByteBuffer.wrap(valueBytes), schemaId); updateStore = (UpdateStore) adminMessage.payloadUnion; - Assert.assertTrue( + assertTrue( updateStore.nativeReplicationEnabled, "Native replication was not set to true after updating the store!"); // Test exception thrown for unsuccessful partitioner instance creation inside store update. @@ -1860,8 +1766,8 @@ public void testUpdateStore() { new UpdateStoreQueryParams().setPartitionerClass(InvalidKeySchemaPartitioner.class.getName())); Assert.fail("The partitioner creation should not be successful"); } catch (Exception e) { - Assert.assertTrue(e.getClass().isAssignableFrom(VeniceHttpException.class)); - Assert.assertTrue(e instanceof VeniceHttpException); + assertTrue(e.getClass().isAssignableFrom(VeniceHttpException.class)); + assertTrue(e instanceof VeniceHttpException); VeniceHttpException veniceHttpException = (VeniceHttpException) e; assertEquals(veniceHttpException.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); assertEquals(veniceHttpException.getErrorType(), ErrorType.INVALID_SCHEMA); @@ -1903,11 +1809,12 @@ public void testDisableHybridConfigWhenActiveActiveOrIncPushConfigIsEnabled() { 1000, 100, -1, - DataReplicationPolicy.NON_AGGREGATE, + DataReplicationPolicy.ACTIVE_ACTIVE, BufferReplayPolicy.REWIND_FROM_EOP)); store.setActiveActiveReplicationEnabled(true); store.setIncrementalPushEnabled(true); store.setNativeReplicationEnabled(true); + store.setNativeReplicationSourceFabric("dc-0"); store.setChunkingEnabled(true); doReturn(store).when(internalAdmin).getStore(clusterName, storeName); @@ -1943,7 +1850,7 @@ public void testDisableHybridConfigWhenActiveActiveOrIncPushConfigIsEnabled() { AdminOperation adminMessage = verifyAndGetSingleAdminOperation(); UpdateStore updateStore = (UpdateStore) adminMessage.payloadUnion; - Assert.assertFalse(internalAdmin.isHybrid(updateStore.getHybridStoreConfig())); + Assert.assertFalse(AdminUtils.isHybrid(updateStore.getHybridStoreConfig())); Assert.assertFalse(updateStore.incrementalPushEnabled); Assert.assertFalse(updateStore.activeActiveReplicationEnabled); } @@ -1960,7 +1867,7 @@ public void testSetStoreViewConfig() { AdminOperation adminMessage = verifyAndGetSingleAdminOperation(); UpdateStore updateStore = (UpdateStore) adminMessage.payloadUnion; - Assert.assertTrue(updateStore.getViews().containsKey("changeCapture")); + assertTrue(updateStore.getViews().containsKey("changeCapture")); } @Test @@ -2016,11 +1923,11 @@ public void testInsertStoreViewConfig() { AdminOperation adminMessage = verifyAndGetSingleAdminOperation(); UpdateStore updateStore = (UpdateStore) adminMessage.payloadUnion; assertEquals(updateStore.getViews().size(), 2); - Assert.assertTrue(updateStore.getViews().containsKey("changeCapture")); + assertTrue(updateStore.getViews().containsKey("changeCapture")); assertEquals( updateStore.getViews().get("changeCapture").viewClassName.toString(), ChangeCaptureView.class.getCanonicalName()); - Assert.assertTrue(updateStore.getViews().get("changeCapture").viewParameters.isEmpty()); + assertTrue(updateStore.getViews().get("changeCapture").viewParameters.isEmpty()); } @Test @@ -2072,6 +1979,14 @@ public void testInsertMaterializedViewConfig() { public void testRemoveStoreViewConfig() { String storeName = Utils.getUniqueString("testUpdateStore"); Store store = TestUtils.createTestStore(storeName, "test", System.currentTimeMillis()); + store.setPartitionCount(100); + store.setHybridStoreConfig( + new HybridStoreConfigImpl( + 100, + -1, + 100, + DataReplicationPolicy.ACTIVE_ACTIVE, + BufferReplayPolicy.REWIND_FROM_EOP)); store.setActiveActiveReplicationEnabled(true); store.setChunkingEnabled(true); store.setViewConfigs( @@ -2130,11 +2045,6 @@ public void testUpdateStoreWithBadPartitionerConfigs() { () -> parentAdmin .updateStore(clusterName, storeName, new UpdateStoreQueryParams().setWriteComputationEnabled(true))); verify(veniceWriter, times(0)).put(any(), any(), anyInt()); - - Assert.assertThrows( - () -> parentAdmin - .updateStore(clusterName, storeName, new UpdateStoreQueryParams().setActiveActiveReplicationEnabled(true))); - verify(veniceWriter, times(0)).put(any(), any(), anyInt()); } @Test @@ -2231,7 +2141,7 @@ private Map prepareForCurrentVersionTest(int regionCou public void testGetKafkaTopicsByAge() { String storeName = Utils.getUniqueString("test-store"); List versionTopics = parentAdmin.getKafkaTopicsByAge(storeName); - Assert.assertTrue(versionTopics.isEmpty()); + assertTrue(versionTopics.isEmpty()); Set topicList = new HashSet<>(); topicList.add(pubSubTopicRepository.getTopic(storeName + "_v1")); @@ -2242,8 +2152,8 @@ public void testGetKafkaTopicsByAge() { Assert.assertFalse(versionTopics.isEmpty()); PubSubTopic latestTopic = versionTopics.get(0); assertEquals(latestTopic, pubSubTopicRepository.getTopic(storeName + "_v3")); - Assert.assertTrue(topicList.containsAll(versionTopics)); - Assert.assertTrue(versionTopics.containsAll(topicList)); + assertTrue(topicList.containsAll(versionTopics)); + assertTrue(versionTopics.containsAll(topicList)); } @Test @@ -2301,7 +2211,7 @@ public void testGetTopicForCurrentPushJob() { doReturn(new Admin.OfflinePushStatusInfo(ExecutionStatus.PROGRESS)).when(mockParentAdmin) .getOffLinePushStatus(clusterName, latestTopic); Optional currentPush = mockParentAdmin.getTopicForCurrentPushJob(clusterName, storeName, false, false); - Assert.assertTrue(currentPush.isPresent()); + assertTrue(currentPush.isPresent()); assertEquals(currentPush.get(), latestTopic); verify(mockParentAdmin, times(2)).getOffLinePushStatus(clusterName, latestTopic); @@ -2322,7 +2232,7 @@ public void testGetTopicForCurrentPushJob() { doReturn(new Admin.OfflinePushStatusInfo(ExecutionStatus.PROGRESS, extraInfo)).when(mockParentAdmin) .getOffLinePushStatus(clusterName, latestTopic); currentPush = mockParentAdmin.getTopicForCurrentPushJob(clusterName, storeName, false, false); - Assert.assertTrue(currentPush.isPresent()); + assertTrue(currentPush.isPresent()); assertEquals(currentPush.get(), latestTopic); verify(mockParentAdmin, times(12)).getOffLinePushStatus(clusterName, latestTopic); @@ -2334,7 +2244,7 @@ public void testGetTopicForCurrentPushJob() { .thenReturn(new Admin.OfflinePushStatusInfo(ExecutionStatus.PROGRESS, extraInfo)) .thenReturn(new Admin.OfflinePushStatusInfo(ExecutionStatus.PROGRESS)); currentPush = mockParentAdmin.getTopicForCurrentPushJob(clusterName, storeName, false, false); - Assert.assertTrue(currentPush.isPresent()); + assertTrue(currentPush.isPresent()); assertEquals(currentPush.get(), latestTopic); verify(mockParentAdmin, times(14)).getOffLinePushStatus(clusterName, latestTopic); @@ -2544,7 +2454,7 @@ public void testAdminCanKillLingeringVersion(boolean isIncrementalPush) { newVersion, "Unexpected new version returned by incrementVersionIdempotent"); // Parent should kill the lingering job. - Assert.assertTrue(partialMockParentAdmin.isJobKilled(version.kafkaTopicName())); + assertTrue(partialMockParentAdmin.isJobKilled(version.kafkaTopicName())); } } } @@ -2605,7 +2515,7 @@ public void testAdminMessageIsolation() { parentAdmin.incrementVersionIdempotent(clusterName, storeA, "", 3, 3); Assert.fail("Admin operations to a store with existing exception should be blocked"); } catch (VeniceException e) { - Assert.assertTrue(e.getMessage().contains("due to existing exception")); + assertTrue(e.getMessage().contains("due to existing exception")); } // store B should still be able to process admin operations. assertEquals( @@ -2673,14 +2583,12 @@ public void testHybridAndIncrementalUpdateStoreCommands() { UpdateStore updateStore = (UpdateStore) adminMessage.payloadUnion; assertEquals(updateStore.hybridStoreConfig.offsetLagThresholdToGoOnline, 20000); assertEquals(updateStore.hybridStoreConfig.rewindTimeInSeconds, 60); + assertEquals(updateStore.nativeReplicationSourceFabric.toString(), "dc-hybrid-nr"); + assertEquals(updateStore.partitionNum, 1024); + store.setPartitionCount(1024); store.setHybridStoreConfig( - new HybridStoreConfigImpl( - 60, - 20000, - 0, - DataReplicationPolicy.NON_AGGREGATE, - BufferReplayPolicy.REWIND_FROM_EOP)); + new HybridStoreConfigImpl(60, 20000, 0, DataReplicationPolicy.NONE, BufferReplayPolicy.REWIND_FROM_EOP)); // Incremental push can be enabled on a hybrid store, default inc push policy is inc push to RT now parentAdmin.updateStore(clusterName, storeName, new UpdateStoreQueryParams().setIncrementalPushEnabled(true)); @@ -2688,6 +2596,66 @@ public void testHybridAndIncrementalUpdateStoreCommands() { verify(veniceWriter, times(2)).put(keyCaptor.capture(), valueCaptor.capture(), schemaCaptor.capture()); } + @Test + public void testEnableWcValidatesSchema() { + String storeName = Utils.getUniqueString("testUpdateStore"); + Store store = TestUtils.createTestStore(storeName, "test", System.currentTimeMillis()); + + store.setPartitionCount(100); + store.setHybridStoreConfig( + new HybridStoreConfigImpl( + 1000, + 100, + -1, + DataReplicationPolicy.NON_AGGREGATE, + BufferReplayPolicy.REWIND_FROM_EOP)); + store.setNativeReplicationEnabled(true); + store.setNativeReplicationSourceFabric("dc-0"); + doReturn(store).when(internalAdmin).getStore(clusterName, storeName); + + doReturn(CompletableFuture.completedFuture(new SimplePubSubProduceResultImpl(topicName, partitionId, 1, -1))) + .when(veniceWriter) + .put(any(), any(), anyInt()); + + when(zkClient.readData(zkMetadataNodePath, null)).thenReturn(null) + .thenReturn(AdminTopicMetadataAccessor.generateMetadataMap(1, -1, 1)); + + parentAdmin.initStorageCluster(clusterName); + + String stringSchemaStr = "\"string\""; + doReturn(Collections.singletonList(new SchemaEntry(1, stringSchemaStr))).when(internalAdmin) + .getValueSchemas(clusterName, storeName); + VeniceException e = expectThrows( + VeniceException.class, + () -> parentAdmin + .updateStore(clusterName, storeName, new UpdateStoreQueryParams().setWriteComputationEnabled(true))); + assertTrue(e.getMessage().contains("top level field probably missing defaults")); + + Schema recordSchema = AvroCompatibilityHelper.parse(loadFileAsString("superset_schema_test/v1.avsc")); + doReturn(Collections.singletonList(new SchemaEntry(1, recordSchema))).when(internalAdmin) + .getValueSchemas(clusterName, storeName); + parentAdmin.updateStore(clusterName, storeName, new UpdateStoreQueryParams().setWriteComputationEnabled(true)); + + verify(zkClient, times(1)).readData(zkMetadataNodePath, null); + ArgumentCaptor keyCaptor = ArgumentCaptor.forClass(byte[].class); + ArgumentCaptor valueCaptor = ArgumentCaptor.forClass(byte[].class); + ArgumentCaptor schemaCaptor = ArgumentCaptor.forClass(Integer.class); + verify(veniceWriter).put(keyCaptor.capture(), valueCaptor.capture(), schemaCaptor.capture()); + + byte[] keyBytes = keyCaptor.getValue(); + byte[] valueBytes = valueCaptor.getValue(); + int schemaId = schemaCaptor.getValue(); + assertEquals(schemaId, AdminOperationSerializer.LATEST_SCHEMA_ID_FOR_ADMIN_OPERATION); + assertEquals(keyBytes.length, 0); + + AdminOperation adminMessage = adminOperationSerializer.deserialize(ByteBuffer.wrap(valueBytes), schemaId); + assertEquals(adminMessage.operationType, AdminMessageType.UPDATE_STORE.getValue()); + + UpdateStore updateStore = (UpdateStore) adminMessage.payloadUnion; + assertTrue(updateStore.writeComputationEnabled); + assertTrue(updateStore.chunkingEnabled); + } + @Test public void testSetVersionShouldFailOnParentController() { try { @@ -2872,6 +2840,14 @@ public void testGetFinalReturnStatus() { private Store setupForStoreViewConfigUpdateTest(String storeName) { Store store = TestUtils.createTestStore(storeName, "test", System.currentTimeMillis()); + store.setPartitionCount(100); + store.setHybridStoreConfig( + new HybridStoreConfigImpl( + 100, + -1, + 100, + DataReplicationPolicy.ACTIVE_ACTIVE, + BufferReplayPolicy.REWIND_FROM_EOP)); store.setActiveActiveReplicationEnabled(true); store.setChunkingEnabled(true); doReturn(store).when(internalAdmin).getStore(clusterName, storeName); diff --git a/services/venice-controller/src/test/java/com/linkedin/venice/controller/init/SystemStoreInitializationHelperTest.java b/services/venice-controller/src/test/java/com/linkedin/venice/controller/init/SystemStoreInitializationHelperTest.java index 4a7123153b..83dd9c7def 100644 --- a/services/venice-controller/src/test/java/com/linkedin/venice/controller/init/SystemStoreInitializationHelperTest.java +++ b/services/venice-controller/src/test/java/com/linkedin/venice/controller/init/SystemStoreInitializationHelperTest.java @@ -72,11 +72,13 @@ public void testInitialSystemStoreSetup(boolean explicitlyProvidedKeySchema, boo int replicationFactor = 3; doReturn(1).when(firstVersion).getNumber(); doReturn(VersionStatus.ONLINE).when(firstVersion).getStatus(); + doReturn(partitionCount).when(firstVersion).getPartitionCount(); Store storeForTest = mock(Store.class); Store storeForTestAfterUpdateStore = mock(Store.class); doReturn(true).when(storeForTestAfterUpdateStore).isHybrid(); + doReturn(partitionCount).when(storeForTestAfterUpdateStore).getPartitionCount(); Store storeForTestAfterCreatingVersion = mock(Store.class); doReturn(true).when(storeForTestAfterCreatingVersion).isHybrid(); @@ -84,6 +86,7 @@ public void testInitialSystemStoreSetup(boolean explicitlyProvidedKeySchema, boo // The parent controller always has a current version of zero, so this info is not available there. doReturn(versionNumber).when(storeForTestAfterCreatingVersion).getCurrentVersion(); } + doReturn(partitionCount).when(storeForTestAfterCreatingVersion).getPartitionCount(); doReturn(firstVersion).when(storeForTestAfterCreatingVersion).getVersion(versionNumber); doReturn(Collections.singletonList(firstVersion)).when(storeForTestAfterCreatingVersion).getVersions(); diff --git a/services/venice-controller/src/test/java/com/linkedin/venice/controller/supersetschema/TestSupersetSchemaGeneratorWithCustomProp.java b/services/venice-controller/src/test/java/com/linkedin/venice/controller/supersetschema/TestSupersetSchemaGeneratorWithCustomProp.java index 7fd4296273..c734d5af60 100644 --- a/services/venice-controller/src/test/java/com/linkedin/venice/controller/supersetschema/TestSupersetSchemaGeneratorWithCustomProp.java +++ b/services/venice-controller/src/test/java/com/linkedin/venice/controller/supersetschema/TestSupersetSchemaGeneratorWithCustomProp.java @@ -31,6 +31,10 @@ public class TestSupersetSchemaGeneratorWithCustomProp { AvroCompatibilityHelper.parse(TestWriteUtils.loadFileAsString("superset_schema_test/v3.avsc")); private Schema schemaV4 = AvroCompatibilityHelper .parse(TestWriteUtils.loadFileAsString("superset_schema_test/v4_without_custom_prop.avsc")); + private Schema schemaV5 = + AvroCompatibilityHelper.parse(TestWriteUtils.loadFileAsString("superset_schema_test/v5.avsc")); + private Schema schemaV6 = + AvroCompatibilityHelper.parse(TestWriteUtils.loadFileAsString("superset_schema_test/v6.avsc")); private SupersetSchemaGenerator generator; @@ -75,6 +79,39 @@ public void testGenerateSupersetSchemaFromSchemas() throws IOException { assertNotNull(supersetSchema3.getField("f1")); assertNotNull(supersetSchema3.getField("f2")); assertNotNull(supersetSchema3.getField("f3")); + + // v5 contains all fields in superset schema and a custom prop + Collection schemaEntryCollection4 = Arrays.asList( + new SchemaEntry(1, schemaV1), + new SchemaEntry(2, schemaV2), + new SchemaEntry(3, schemaV3), + new SchemaEntry(4, schemaV4), + new SchemaEntry(5, schemaV5)); + SchemaEntry supersetSchemaEntry4 = generator.generateSupersetSchemaFromSchemas(schemaEntryCollection4); + assertEquals(supersetSchemaEntry4.getId(), 5); + Schema supersetSchema4 = supersetSchemaEntry4.getSchema(); + assertEquals(supersetSchema4.getProp(CUSTOM_PROP), "custom_prop_value_for_v5"); + assertNotNull(supersetSchema4.getField("f0")); + assertNotNull(supersetSchema4.getField("f1")); + assertNotNull(supersetSchema4.getField("f2")); + assertNotNull(supersetSchema4.getField("f3")); + + // v6 contains a subset of fields, but with a different custom prop + Collection schemaEntryCollection5 = Arrays.asList( + new SchemaEntry(1, schemaV1), + new SchemaEntry(2, schemaV2), + new SchemaEntry(3, schemaV3), + new SchemaEntry(4, schemaV4), + new SchemaEntry(5, schemaV5), + new SchemaEntry(6, schemaV6)); + SchemaEntry supersetSchemaEntry5 = generator.generateSupersetSchemaFromSchemas(schemaEntryCollection5); + assertEquals(supersetSchemaEntry5.getId(), 7); + Schema supersetSchema5 = supersetSchemaEntry5.getSchema(); + assertEquals(supersetSchema5.getProp(CUSTOM_PROP), "custom_prop_value_for_v6"); + assertNotNull(supersetSchema5.getField("f0")); + assertNotNull(supersetSchema5.getField("f1")); + assertNotNull(supersetSchema5.getField("f2")); + assertNotNull(supersetSchema5.getField("f3")); } @Test diff --git a/services/venice-controller/src/test/java/com/linkedin/venice/controller/util/AdminUtilsTest.java b/services/venice-controller/src/test/java/com/linkedin/venice/controller/util/AdminUtilsTest.java new file mode 100644 index 0000000000..3857b97d73 --- /dev/null +++ b/services/venice-controller/src/test/java/com/linkedin/venice/controller/util/AdminUtilsTest.java @@ -0,0 +1,167 @@ +package com.linkedin.venice.controller.util; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.reset; +import static org.mockito.Mockito.verify; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertTrue; + +import com.linkedin.venice.ConfigConstants; +import com.linkedin.venice.controller.Admin; +import com.linkedin.venice.controller.VeniceControllerClusterConfig; +import com.linkedin.venice.controller.VeniceControllerMultiClusterConfig; +import com.linkedin.venice.controller.kafka.protocol.admin.HybridStoreConfigRecord; +import com.linkedin.venice.exceptions.VeniceException; +import com.linkedin.venice.meta.BufferReplayPolicy; +import com.linkedin.venice.meta.DataReplicationPolicy; +import com.linkedin.venice.meta.HybridStoreConfig; +import com.linkedin.venice.meta.HybridStoreConfigImpl; +import com.linkedin.venice.meta.Store; +import org.testng.Assert; +import org.testng.annotations.Test; + + +public class AdminUtilsTest { + @Test + public void testIsHybrid() { + Assert.assertFalse(AdminUtils.isHybrid((HybridStoreConfig) null)); + Assert.assertFalse(AdminUtils.isHybrid((HybridStoreConfigRecord) null)); + + HybridStoreConfig hybridStoreConfig; + hybridStoreConfig = new HybridStoreConfigImpl(-1, -1, -1, null, null); + Assert.assertFalse(AdminUtils.isHybrid(hybridStoreConfig)); + + hybridStoreConfig = new HybridStoreConfigImpl(100, -1, -1, null, null); + Assert.assertFalse(AdminUtils.isHybrid(hybridStoreConfig)); + + hybridStoreConfig = new HybridStoreConfigImpl(100, 100, -1, null, null); + assertTrue(AdminUtils.isHybrid(hybridStoreConfig)); + + hybridStoreConfig = new HybridStoreConfigImpl(100, 100, 100, null, null); + assertTrue(AdminUtils.isHybrid(hybridStoreConfig)); + + hybridStoreConfig = new HybridStoreConfigImpl(100, -1, 100, null, null); + assertTrue(AdminUtils.isHybrid(hybridStoreConfig)); + + hybridStoreConfig = new HybridStoreConfigImpl(-1, -1, 100, null, null); + Assert.assertFalse(AdminUtils.isHybrid(hybridStoreConfig)); + + HybridStoreConfigRecord hybridStoreConfigRecord = new HybridStoreConfigRecord(); + hybridStoreConfigRecord.rewindTimeInSeconds = 100; + hybridStoreConfigRecord.offsetLagThresholdToGoOnline = 100; + hybridStoreConfigRecord.producerTimestampLagThresholdToGoOnlineInSeconds = -1; + hybridStoreConfigRecord.dataReplicationPolicy = DataReplicationPolicy.ACTIVE_ACTIVE.getValue(); + hybridStoreConfigRecord.bufferReplayPolicy = BufferReplayPolicy.REWIND_FROM_SOP.getValue(); + assertTrue(AdminUtils.isHybrid(hybridStoreConfigRecord)); + } + + @Test + public void testGetRmdVersionID() { + String storeName = "storeName"; + String clusterName = "clusterName"; + + Admin mockAdmin = mock(Admin.class); + VeniceControllerMultiClusterConfig multiClusterConfig = mock(VeniceControllerMultiClusterConfig.class); + VeniceControllerClusterConfig controllerConfig = mock(VeniceControllerClusterConfig.class); + Store mockStore = mock(Store.class); + + // Store null + cluster config not set + doReturn(null).when(mockAdmin).getStore(clusterName, storeName); + doReturn(multiClusterConfig).when(mockAdmin).getMultiClusterConfigs(); + doReturn(null).when(multiClusterConfig).getControllerConfig(clusterName); + VeniceException e1 = + Assert.expectThrows(VeniceException.class, () -> AdminUtils.getRmdVersionID(mockAdmin, storeName, clusterName)); + assertTrue(e1.getMessage().contains("No controller cluster config found for cluster clusterName")); + + reset(mockAdmin); + reset(multiClusterConfig); + reset(controllerConfig); + reset(mockStore); + + // Store null + cluster config set + doReturn(null).when(mockAdmin).getStore(clusterName, storeName); + doReturn(multiClusterConfig).when(mockAdmin).getMultiClusterConfigs(); + doReturn(controllerConfig).when(multiClusterConfig).getControllerConfig(clusterName); + doReturn(10).when(controllerConfig).getReplicationMetadataVersion(); + assertEquals(AdminUtils.getRmdVersionID(mockAdmin, storeName, clusterName), 10); + + reset(mockAdmin); + reset(multiClusterConfig); + reset(controllerConfig); + reset(mockStore); + + // Store-level RMD version ID not found + cluster config not set + doReturn(mockStore).when(mockAdmin).getStore(clusterName, storeName); + doReturn(ConfigConstants.UNSPECIFIED_REPLICATION_METADATA_VERSION).when(mockStore).getRmdVersion(); + doReturn(multiClusterConfig).when(mockAdmin).getMultiClusterConfigs(); + doReturn(null).when(multiClusterConfig).getControllerConfig(clusterName); + VeniceException e2 = + Assert.expectThrows(VeniceException.class, () -> AdminUtils.getRmdVersionID(mockAdmin, storeName, clusterName)); + assertTrue(e2.getMessage().contains("No controller cluster config found for cluster clusterName")); + + reset(mockAdmin); + reset(multiClusterConfig); + reset(controllerConfig); + reset(mockStore); + + // Store-level RMD version ID not found + cluster config set + doReturn(mockStore).when(mockAdmin).getStore(clusterName, storeName); + doReturn(ConfigConstants.UNSPECIFIED_REPLICATION_METADATA_VERSION).when(mockStore).getRmdVersion(); + doReturn(multiClusterConfig).when(mockAdmin).getMultiClusterConfigs(); + doReturn(controllerConfig).when(multiClusterConfig).getControllerConfig(clusterName); + doReturn(10).when(controllerConfig).getReplicationMetadataVersion(); + assertEquals(AdminUtils.getRmdVersionID(mockAdmin, storeName, clusterName), 10); + + reset(mockAdmin); + reset(multiClusterConfig); + reset(controllerConfig); + reset(mockStore); + + // Store-level RMD version ID found + doReturn(mockStore).when(mockAdmin).getStore(clusterName, storeName); + doReturn(5).when(mockStore).getRmdVersion(); + doReturn(multiClusterConfig).when(mockAdmin).getMultiClusterConfigs(); + doReturn(controllerConfig).when(multiClusterConfig).getControllerConfig(clusterName); + doReturn(10).when(controllerConfig).getReplicationMetadataVersion(); + assertEquals(AdminUtils.getRmdVersionID(mockAdmin, storeName, clusterName), 5); + verify(mockAdmin, never()).getMultiClusterConfigs(); + verify(multiClusterConfig, never()).getControllerConfig(any()); + verify(controllerConfig, never()).getReplicationMetadataVersion(); + } + + @Test + public void testIsIncrementalPushSupported() { + HybridStoreConfig nonHybridConfig = + new HybridStoreConfigImpl(-1, -1, -1, DataReplicationPolicy.AGGREGATE, BufferReplayPolicy.REWIND_FROM_EOP); + HybridStoreConfig hybridConfigWithNonAggregateDRP = new HybridStoreConfigImpl( + 100, + 1000, + -1, + DataReplicationPolicy.NON_AGGREGATE, + BufferReplayPolicy.REWIND_FROM_EOP); + HybridStoreConfig hybridConfigWithAggregateDRP = + new HybridStoreConfigImpl(100, 1000, -1, DataReplicationPolicy.AGGREGATE, BufferReplayPolicy.REWIND_FROM_EOP); + HybridStoreConfig hybridConfigWithNoneDRP = + new HybridStoreConfigImpl(100, 1000, -1, DataReplicationPolicy.NONE, BufferReplayPolicy.REWIND_FROM_EOP); + + // In single-region mode, any hybrid store should have incremental push enabled. + assertFalse(AdminUtils.isIncrementalPushSupported(false, false, null)); + assertFalse(AdminUtils.isIncrementalPushSupported(false, false, nonHybridConfig)); + assertTrue(AdminUtils.isIncrementalPushSupported(false, false, hybridConfigWithNonAggregateDRP)); + assertTrue(AdminUtils.isIncrementalPushSupported(false, false, hybridConfigWithAggregateDRP)); + assertTrue(AdminUtils.isIncrementalPushSupported(false, false, hybridConfigWithNoneDRP)); + + // In multi-region mode, hybrid stores with NON_AGGREGATE DataReplicationPolicy should not have incremental push + // enabled. + assertFalse(AdminUtils.isIncrementalPushSupported(true, false, null)); + assertFalse(AdminUtils.isIncrementalPushSupported(true, false, nonHybridConfig)); + assertFalse(AdminUtils.isIncrementalPushSupported(true, false, hybridConfigWithNonAggregateDRP)); + assertTrue(AdminUtils.isIncrementalPushSupported(true, false, hybridConfigWithAggregateDRP)); + assertTrue(AdminUtils.isIncrementalPushSupported(true, false, hybridConfigWithNoneDRP)); + assertTrue(AdminUtils.isIncrementalPushSupported(true, true, hybridConfigWithNonAggregateDRP)); + } +} diff --git a/services/venice-controller/src/test/java/com/linkedin/venice/controller/util/PrimaryControllerConfigUpdateUtilsTest.java b/services/venice-controller/src/test/java/com/linkedin/venice/controller/util/PrimaryControllerConfigUpdateUtilsTest.java new file mode 100644 index 0000000000..6985ba1298 --- /dev/null +++ b/services/venice-controller/src/test/java/com/linkedin/venice/controller/util/PrimaryControllerConfigUpdateUtilsTest.java @@ -0,0 +1,154 @@ +package com.linkedin.venice.controller.util; + +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.reset; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + +import com.linkedin.venice.controller.Admin; +import com.linkedin.venice.controller.HelixVeniceClusterResources; +import com.linkedin.venice.controller.supersetschema.SupersetSchemaGenerator; +import com.linkedin.venice.meta.ReadWriteSchemaRepository; +import com.linkedin.venice.meta.Store; +import com.linkedin.venice.schema.SchemaData; +import com.linkedin.venice.schema.SchemaEntry; +import com.linkedin.venice.schema.rmd.RmdSchemaGenerator; +import com.linkedin.venice.schema.writecompute.WriteComputeSchemaConverter; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import org.apache.avro.Schema; +import org.testng.annotations.Test; + + +public class PrimaryControllerConfigUpdateUtilsTest { + private static final String VALUE_FIELD_NAME = "int_field"; + private static final String SECOND_VALUE_FIELD_NAME = "opt_int_field"; + private static final String VALUE_SCHEMA_V1_STR = "{\n" + "\"type\": \"record\",\n" + + "\"name\": \"TestValueSchema\",\n" + "\"namespace\": \"com.linkedin.venice.fastclient.schema\",\n" + + "\"fields\": [\n" + " {\"name\": \"" + VALUE_FIELD_NAME + "\", \"type\": \"int\", \"default\": 10}]\n" + "}"; + private static final String VALUE_SCHEMA_V2_STR = + "{\n" + "\"type\": \"record\",\n" + "\"name\": \"TestValueSchema\",\n" + + "\"namespace\": \"com.linkedin.venice.fastclient.schema\",\n" + "\"fields\": [\n" + "{\"name\": \"" + + SECOND_VALUE_FIELD_NAME + "\", \"type\": [\"null\", \"int\"], \"default\": null}]\n" + "}"; + private static final String SUPERSET_VALUE_SCHEMA_STR = "{\n" + "\"type\": \"record\",\n" + + "\"name\": \"TestValueSchema\",\n" + "\"namespace\": \"com.linkedin.venice.fastclient.schema\",\n" + + "\"fields\": [\n" + " {\"name\": \"" + VALUE_FIELD_NAME + "\", \"type\": \"int\", \"default\": 10},\n" + + "{\"name\": \"" + SECOND_VALUE_FIELD_NAME + "\", \"type\": [\"null\", \"int\"], \"default\": null}]\n" + "}"; + + @Test + public void testRegisterInferredSchemas() { + String clusterName = "clusterName"; + String storeName = "storeName"; + Collection storeValueSchemas = + Arrays.asList(new SchemaEntry(1, VALUE_SCHEMA_V1_STR), new SchemaEntry(2, VALUE_SCHEMA_V2_STR)); + SchemaEntry supersetSchemaEntry = new SchemaEntry(3, SUPERSET_VALUE_SCHEMA_STR); + + Admin mockAdmin = mock(Admin.class); + Store store = mock(Store.class); + + reset(mockAdmin); + reset(store); + setupMocks(mockAdmin, store, clusterName, storeName, storeValueSchemas, supersetSchemaEntry); + + doReturn(SchemaData.INVALID_VALUE_SCHEMA_ID).when(store).getLatestSuperSetValueSchemaId(); + doReturn(true).when(store).isReadComputationEnabled(); + PrimaryControllerConfigUpdateUtils.registerInferredSchemas(mockAdmin, clusterName, storeName); + validateSuperSetSchemaGenerated(mockAdmin, clusterName, storeName); + + reset(mockAdmin); + reset(store); + setupMocks(mockAdmin, store, clusterName, storeName, storeValueSchemas, supersetSchemaEntry); + + doReturn(SchemaData.INVALID_VALUE_SCHEMA_ID).when(store).getLatestSuperSetValueSchemaId(); + doReturn(true).when(store).isWriteComputationEnabled(); + PrimaryControllerConfigUpdateUtils.registerInferredSchemas(mockAdmin, clusterName, storeName); + validateSuperSetSchemaGenerated(mockAdmin, clusterName, storeName); + validateUpdateSchemaGenerated(mockAdmin, clusterName, storeName); + + reset(mockAdmin); + reset(store); + setupMocks(mockAdmin, store, clusterName, storeName, storeValueSchemas, supersetSchemaEntry); + + doReturn(1).when(store).getLatestSuperSetValueSchemaId(); + PrimaryControllerConfigUpdateUtils.registerInferredSchemas(mockAdmin, clusterName, storeName); + validateSuperSetSchemaGenerated(mockAdmin, clusterName, storeName); + + reset(mockAdmin); + reset(store); + setupMocks(mockAdmin, store, clusterName, storeName, storeValueSchemas, supersetSchemaEntry); + + doReturn(true).when(store).isActiveActiveReplicationEnabled(); + doReturn(1).when(store).getRmdVersion(); + PrimaryControllerConfigUpdateUtils.registerInferredSchemas(mockAdmin, clusterName, storeName); + validateRmdSchemaGenerated(mockAdmin, clusterName, storeName); + } + + private void setupMocks( + Admin mockAdmin, + Store store, + String clusterName, + String storeName, + Collection storeValueSchemas, + SchemaEntry supersetSchemaEntry) { + doReturn(storeName).when(store).getName(); + + SupersetSchemaGenerator supersetSchemaGenerator = mock(SupersetSchemaGenerator.class); + + doReturn(true).when(mockAdmin).isPrimary(); + doReturn(false).when(mockAdmin).isParent(); + doReturn(store).when(mockAdmin).getStore(clusterName, storeName); + doReturn(supersetSchemaGenerator).when(mockAdmin).getSupersetSchemaGenerator(clusterName); + doReturn(storeValueSchemas).when(mockAdmin).getValueSchemas(clusterName, storeName); + + doReturn(supersetSchemaEntry).when(supersetSchemaGenerator).generateSupersetSchemaFromSchemas(storeValueSchemas); + + HelixVeniceClusterResources clusterResources = mock(HelixVeniceClusterResources.class); + doReturn(clusterResources).when(mockAdmin).getHelixVeniceClusterResources(clusterName); + + ReadWriteSchemaRepository schemaRepository = mock(ReadWriteSchemaRepository.class); + doReturn(schemaRepository).when(clusterResources).getSchemaRepository(); + + doReturn(Collections.emptyList()).when(schemaRepository).getReplicationMetadataSchemas(storeName); + } + + private void validateSuperSetSchemaGenerated(Admin mockAdmin, String clusterName, String storeName) { + verify(mockAdmin).addSupersetSchema( + clusterName, + storeName, + null, + SchemaData.INVALID_VALUE_SCHEMA_ID, + SUPERSET_VALUE_SCHEMA_STR, + 3); + } + + private void validateUpdateSchemaGenerated(Admin mockAdmin, String clusterName, String storeName) { + SchemaEntry updateSchemaEntry1 = new SchemaEntry(1, VALUE_SCHEMA_V1_STR); + Schema updateSchema1 = + WriteComputeSchemaConverter.getInstance().convertFromValueRecordSchema(updateSchemaEntry1.getSchema()); + + SchemaEntry updateSchemaEntry2 = new SchemaEntry(1, VALUE_SCHEMA_V2_STR); + Schema updateSchema2 = + WriteComputeSchemaConverter.getInstance().convertFromValueRecordSchema(updateSchemaEntry2.getSchema()); + + // Ideally, we should have seen the superset schema also, but due to the static-ness of mocks, we don't see it now + verify(mockAdmin).addDerivedSchema(clusterName, storeName, 1, updateSchema1.toString()); + verify(mockAdmin).addDerivedSchema(clusterName, storeName, 2, updateSchema2.toString()); + verify(mockAdmin, times(2)).addDerivedSchema(eq(clusterName), eq(storeName), anyInt(), anyString()); + } + + private void validateRmdSchemaGenerated(Admin mockAdmin, String clusterName, String storeName) { + Schema rmdSchema1 = RmdSchemaGenerator.generateMetadataSchema(VALUE_SCHEMA_V1_STR, 1); + Schema rmdSchema2 = RmdSchemaGenerator.generateMetadataSchema(VALUE_SCHEMA_V2_STR, 1); + + // Ideally, we should have seen the superset schema also, but due to the static-ness of mocks, we don't see it now + verify(mockAdmin).addReplicationMetadataSchema(clusterName, storeName, 1, 1, rmdSchema1.toString()); + verify(mockAdmin).addReplicationMetadataSchema(clusterName, storeName, 2, 1, rmdSchema2.toString()); + verify(mockAdmin, times(2)) + .addReplicationMetadataSchema(eq(clusterName), eq(storeName), anyInt(), eq(1), anyString()); + } +} diff --git a/services/venice-controller/src/test/java/com/linkedin/venice/controller/util/UpdateStoreUtilsTest.java b/services/venice-controller/src/test/java/com/linkedin/venice/controller/util/UpdateStoreUtilsTest.java new file mode 100644 index 0000000000..33555036be --- /dev/null +++ b/services/venice-controller/src/test/java/com/linkedin/venice/controller/util/UpdateStoreUtilsTest.java @@ -0,0 +1,1040 @@ +package com.linkedin.venice.controller.util; + +import static com.linkedin.venice.controllerapi.ControllerApiConstants.ACTIVE_ACTIVE_REPLICATION_ENABLED; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.INCREMENTAL_PUSH_ENABLED; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.NATIVE_REPLICATION_SOURCE_FABRIC; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.PARTITION_COUNT; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.WRITE_COMPUTATION_ENABLED; +import static com.linkedin.venice.utils.ByteUtils.BYTES_PER_GB; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.reset; +import static org.mockito.Mockito.verify; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertNotSame; +import static org.testng.Assert.assertSame; +import static org.testng.Assert.assertThrows; +import static org.testng.Assert.assertTrue; +import static org.testng.Assert.expectThrows; + +import com.linkedin.venice.common.VeniceSystemStoreType; +import com.linkedin.venice.common.VeniceSystemStoreUtils; +import com.linkedin.venice.controller.Admin; +import com.linkedin.venice.controller.HelixVeniceClusterResources; +import com.linkedin.venice.controller.VeniceControllerClusterConfig; +import com.linkedin.venice.controller.VeniceControllerMultiClusterConfig; +import com.linkedin.venice.controller.VeniceHelixAdmin; +import com.linkedin.venice.controller.VeniceParentHelixAdmin; +import com.linkedin.venice.exceptions.ErrorType; +import com.linkedin.venice.exceptions.PartitionerSchemaMismatchException; +import com.linkedin.venice.exceptions.VeniceException; +import com.linkedin.venice.exceptions.VeniceHttpException; +import com.linkedin.venice.helix.StoragePersonaRepository; +import com.linkedin.venice.helix.ZkRoutersClusterManager; +import com.linkedin.venice.meta.BufferReplayPolicy; +import com.linkedin.venice.meta.DataReplicationPolicy; +import com.linkedin.venice.meta.ETLStoreConfigImpl; +import com.linkedin.venice.meta.HybridStoreConfig; +import com.linkedin.venice.meta.HybridStoreConfigImpl; +import com.linkedin.venice.meta.PartitionerConfig; +import com.linkedin.venice.meta.PartitionerConfigImpl; +import com.linkedin.venice.meta.Store; +import com.linkedin.venice.meta.Version; +import com.linkedin.venice.meta.ViewConfig; +import com.linkedin.venice.partitioner.DefaultVenicePartitioner; +import com.linkedin.venice.partitioner.VenicePartitioner; +import com.linkedin.venice.persona.StoragePersona; +import com.linkedin.venice.pubsub.PubSubTopicRepository; +import com.linkedin.venice.pubsub.api.PubSubTopic; +import com.linkedin.venice.pubsub.manager.TopicManager; +import com.linkedin.venice.schema.SchemaData; +import com.linkedin.venice.schema.SchemaEntry; +import com.linkedin.venice.utils.TestUtils; +import com.linkedin.venice.utils.Utils; +import com.linkedin.venice.utils.VeniceProperties; +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; +import javax.annotation.Nonnull; +import org.apache.avro.Schema; +import org.apache.http.HttpStatus; +import org.testng.Assert; +import org.testng.annotations.Test; + + +public class UpdateStoreUtilsTest { + private static final String VALUE_FIELD_NAME = "int_field"; + private static final String SECOND_VALUE_FIELD_NAME = "opt_int_field"; + private static final String VALUE_SCHEMA_V1_STR = "{\n" + "\"type\": \"record\",\n" + + "\"name\": \"TestValueSchema\",\n" + "\"namespace\": \"com.linkedin.venice.fastclient.schema\",\n" + + "\"fields\": [\n" + " {\"name\": \"" + VALUE_FIELD_NAME + "\", \"type\": \"int\", \"default\": 10}]\n" + "}"; + private static final String VALUE_SCHEMA_V2_STR = + "{\n" + "\"type\": \"record\",\n" + "\"name\": \"TestValueSchema\",\n" + + "\"namespace\": \"com.linkedin.venice.fastclient.schema\",\n" + "\"fields\": [\n" + "{\"name\": \"" + + SECOND_VALUE_FIELD_NAME + "\", \"type\": [\"null\", \"int\"], \"default\": null}]\n" + "}"; + + @Test + public void testMergeNewHybridConfigValuesToOldStore() { + String storeName = Utils.getUniqueString("storeName"); + Store store = TestUtils.createTestStore(storeName, "owner", System.currentTimeMillis()); + assertFalse(store.isHybrid()); + + Optional rewind = Optional.of(123L); + Optional lagOffset = Optional.of(1500L); + Optional timeLag = Optional.of(300L); + Optional dataReplicationPolicy = Optional.of(DataReplicationPolicy.AGGREGATE); + Optional bufferReplayPolicy = Optional.of(BufferReplayPolicy.REWIND_FROM_EOP); + HybridStoreConfig hybridStoreConfig = UpdateStoreUtils.mergeNewSettingsIntoOldHybridStoreConfig( + store, + Optional.empty(), + Optional.empty(), + Optional.empty(), + Optional.empty(), + Optional.empty()); + Assert.assertNull( + hybridStoreConfig, + "passing empty optionals and a non-hybrid store should generate a null hybrid config"); + + hybridStoreConfig = UpdateStoreUtils.mergeNewSettingsIntoOldHybridStoreConfig( + store, + rewind, + lagOffset, + timeLag, + dataReplicationPolicy, + bufferReplayPolicy); + Assert.assertNotNull(hybridStoreConfig, "specifying rewind and lagOffset should generate a valid hybrid config"); + Assert.assertEquals(hybridStoreConfig.getRewindTimeInSeconds(), 123L); + Assert.assertEquals(hybridStoreConfig.getOffsetLagThresholdToGoOnline(), 1500L); + Assert.assertEquals(hybridStoreConfig.getProducerTimestampLagThresholdToGoOnlineInSeconds(), 300L); + Assert.assertEquals(hybridStoreConfig.getDataReplicationPolicy(), DataReplicationPolicy.AGGREGATE); + + // It's okay that time lag threshold or data replication policy is not specified + hybridStoreConfig = UpdateStoreUtils.mergeNewSettingsIntoOldHybridStoreConfig( + store, + rewind, + lagOffset, + Optional.empty(), + Optional.empty(), + Optional.empty()); + Assert.assertNotNull(hybridStoreConfig, "specifying rewind and lagOffset should generate a valid hybrid config"); + Assert.assertEquals(hybridStoreConfig.getRewindTimeInSeconds(), 123L); + Assert.assertEquals(hybridStoreConfig.getOffsetLagThresholdToGoOnline(), 1500L); + Assert.assertEquals( + hybridStoreConfig.getProducerTimestampLagThresholdToGoOnlineInSeconds(), + HybridStoreConfigImpl.DEFAULT_HYBRID_TIME_LAG_THRESHOLD); + Assert.assertEquals(hybridStoreConfig.getDataReplicationPolicy(), DataReplicationPolicy.NON_AGGREGATE); + } + + @Test + public void testIsInferredStoreUpdateAllowed() { + String clusterName = "clusterName"; + String storeName = "storeName"; + Admin mockAdmin = mock(Admin.class); + + assertFalse( + UpdateStoreUtils.isInferredStoreUpdateAllowed(mockAdmin, VeniceSystemStoreUtils.getMetaStoreName(storeName))); + assertFalse( + UpdateStoreUtils + .isInferredStoreUpdateAllowed(mockAdmin, VeniceSystemStoreUtils.getDaVinciPushStatusStoreName(storeName))); + assertFalse( + UpdateStoreUtils.isInferredStoreUpdateAllowed( + mockAdmin, + VeniceSystemStoreUtils.getParticipantStoreNameForCluster(clusterName))); + assertFalse( + UpdateStoreUtils.isInferredStoreUpdateAllowed( + mockAdmin, + VeniceSystemStoreType.BATCH_JOB_HEARTBEAT_STORE.getZkSharedStoreName())); + + doReturn(false).when(mockAdmin).isPrimary(); + assertFalse(UpdateStoreUtils.isInferredStoreUpdateAllowed(mockAdmin, storeName)); + + Admin mockChildAdmin = mock(VeniceHelixAdmin.class); + doReturn(true).when(mockChildAdmin).isPrimary(); + doReturn(false).when(mockChildAdmin).isParent(); + assertTrue(UpdateStoreUtils.isInferredStoreUpdateAllowed(mockChildAdmin, storeName)); + + Admin mockParentAdmin = mock(VeniceParentHelixAdmin.class); + doReturn(true).when(mockParentAdmin).isPrimary(); + doReturn(true).when(mockParentAdmin).isParent(); + assertTrue(UpdateStoreUtils.isInferredStoreUpdateAllowed(mockParentAdmin, storeName)); + } + + @Test + public void testUpdateInferredConfig() { + String storeName = "storeName"; + Admin admin = mock(Admin.class); + Store store = mock(Store.class); + Set updatedConfigSet = new HashSet<>(); + final AtomicBoolean updaterInvoked = new AtomicBoolean(false); + + doReturn(storeName).when(store).getName(); + doReturn(true).when(admin).isPrimary(); + doReturn(false).when(admin).isParent(); + + // Config previously updated. Will not update again. + updatedConfigSet.add("key1"); + updaterInvoked.set(false); + UpdateStoreUtils.updateInferredConfig(admin, store, "key1", updatedConfigSet, () -> updaterInvoked.set(true)); + assertFalse(updaterInvoked.get()); + assertTrue(updatedConfigSet.contains("key1")); + assertEquals(updatedConfigSet.size(), 1); + + // Config not updated previously. Will update it. + updatedConfigSet.clear(); + updaterInvoked.set(false); + UpdateStoreUtils.updateInferredConfig(admin, store, "key1", updatedConfigSet, () -> updaterInvoked.set(true)); + assertTrue(updaterInvoked.get()); + assertTrue(updatedConfigSet.contains("key1")); + assertEquals(updatedConfigSet.size(), 1); + + // Config not updated previously. Will not update it for system stores. + updatedConfigSet.clear(); + updaterInvoked.set(false); + doReturn(VeniceSystemStoreUtils.getParticipantStoreNameForCluster(storeName)).when(store).getName(); + UpdateStoreUtils.updateInferredConfig(admin, store, "key1", updatedConfigSet, () -> updaterInvoked.set(true)); + assertFalse(updaterInvoked.get()); + assertTrue(updatedConfigSet.isEmpty()); + } + + @Test + public void testUpdateInferredConfigsForHybridToBatch() { + String storeName = "storeName"; + Admin admin = mock(Admin.class); + Store store = mock(Store.class); + VeniceControllerClusterConfig clusterConfig = mock(VeniceControllerClusterConfig.class); + + doReturn(true).when(admin).isPrimary(); + doReturn(false).when(admin).isParent(); + + Set updatedConfigSet = new HashSet<>(); + doReturn(storeName).when(store).getName(); + doReturn("dc-batch").when(clusterConfig).getNativeReplicationSourceFabricAsDefaultForBatchOnly(); + doReturn("dc-hybrid").when(clusterConfig).getNativeReplicationSourceFabricAsDefaultForHybrid(); + + UpdateStoreUtils.updateInferredConfigsForHybridToBatch(admin, clusterConfig, store, updatedConfigSet); + + verify(store).setIncrementalPushEnabled(false); + verify(store).setNativeReplicationSourceFabric("dc-batch"); + verify(store).setActiveActiveReplicationEnabled(false); + + assertEquals(updatedConfigSet.size(), 3); + assertTrue(updatedConfigSet.contains(INCREMENTAL_PUSH_ENABLED)); + assertTrue(updatedConfigSet.contains(NATIVE_REPLICATION_SOURCE_FABRIC)); + assertTrue(updatedConfigSet.contains(ACTIVE_ACTIVE_REPLICATION_ENABLED)); + } + + @Test + public void testUpdateInferredConfigsForBatchToHybrid() { + String clusterName = "clusterName"; + String storeName = "storeName"; + Admin admin = mock(Admin.class); + Store store = mock(Store.class); + VeniceControllerClusterConfig clusterConfig = mock(VeniceControllerClusterConfig.class); + + doReturn(true).when(admin).isPrimary(); + doReturn(false).when(admin).isParent(); + + doReturn(storeName).when(store).getName(); + doReturn(false).when(store).isSystemStore(); + doReturn(0).when(store).getPartitionCount(); + doReturn(10 * BYTES_PER_GB).when(store).getStorageQuotaInByte(); + + Set updatedConfigSet = new HashSet<>(); + doReturn(clusterName).when(clusterConfig).getClusterName(); + doReturn("dc-batch").when(clusterConfig).getNativeReplicationSourceFabricAsDefaultForBatchOnly(); + doReturn("dc-hybrid").when(clusterConfig).getNativeReplicationSourceFabricAsDefaultForHybrid(); + doReturn(true).when(clusterConfig).isActiveActiveReplicationEnabledAsDefaultForHybrid(); + doReturn(1 * BYTES_PER_GB).when(clusterConfig).getPartitionSize(); + doReturn(3).when(clusterConfig).getMinNumberOfPartitionsForHybrid(); + doReturn(100).when(clusterConfig).getMaxNumberOfPartitions(); + doReturn(1).when(clusterConfig).getPartitionCountRoundUpSize(); + doReturn(true).when(clusterConfig).isEnablePartialUpdateForHybridNonActiveActiveUserStores(); + + doReturn(Arrays.asList(new SchemaEntry(1, VALUE_SCHEMA_V1_STR), new SchemaEntry(2, VALUE_SCHEMA_V2_STR))) + .when(admin) + .getValueSchemas(clusterName, storeName); + + UpdateStoreUtils.updateInferredConfigsForBatchToHybrid(admin, clusterConfig, store, updatedConfigSet); + + verify(store).setNativeReplicationSourceFabric("dc-hybrid"); + verify(store).setActiveActiveReplicationEnabled(true); + verify(store).setPartitionCount(10); + verify(store).setWriteComputationEnabled(true); + + assertEquals(updatedConfigSet.size(), 4); + assertTrue(updatedConfigSet.contains(NATIVE_REPLICATION_SOURCE_FABRIC)); + assertTrue(updatedConfigSet.contains(ACTIVE_ACTIVE_REPLICATION_ENABLED)); + assertTrue(updatedConfigSet.contains(PARTITION_COUNT)); + assertTrue(updatedConfigSet.contains(WRITE_COMPUTATION_ENABLED)); + + // Update schemas should only be generated in dry-run mode and not registered yet. + verify(admin, never()).addDerivedSchema(eq(clusterName), eq(storeName), anyInt(), anyString()); + } + + @Test + public void testValidateStoreConfigs() { + String clusterName = "clusterName"; + String storeName = "storeName"; + Admin admin = mock(Admin.class); + Store store = mock(Store.class); + VeniceControllerMultiClusterConfig multiClusterConfigs = mock(VeniceControllerMultiClusterConfig.class); + VeniceControllerClusterConfig controllerConfig = mock(VeniceControllerClusterConfig.class); + + doReturn(multiClusterConfigs).when(admin).getMultiClusterConfigs(); + doReturn(controllerConfig).when(multiClusterConfigs).getControllerConfig(clusterName); + doReturn(1000).when(controllerConfig).getDefaultReadQuotaPerRouter(); + + HelixVeniceClusterResources resources = mock(HelixVeniceClusterResources.class); + ZkRoutersClusterManager routersClusterManager = mock(ZkRoutersClusterManager.class); + + doReturn(resources).when(admin).getHelixVeniceClusterResources(clusterName); + doReturn(routersClusterManager).when(resources).getRoutersClusterManager(); + doReturn(1).when(routersClusterManager).getLiveRoutersCount(); + + // Batch-only + incremental push is not allowed + doReturn(storeName).when(store).getName(); + doReturn(false).when(store).isHybrid(); + doReturn(true).when(store).isIncrementalPushEnabled(); + VeniceHttpException e1 = + expectThrows(VeniceHttpException.class, () -> UpdateStoreUtils.validateStoreConfigs(admin, clusterName, store)); + assertEquals(e1.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e1.getErrorType(), ErrorType.INVALID_CONFIG); + assertTrue(e1.getMessage().contains("Incremental push is only supported for hybrid stores")); + + reset(store); + + // Batch-only + write compute is not allowed + doReturn(storeName).when(store).getName(); + doReturn(false).when(store).isHybrid(); + doReturn(true).when(store).isWriteComputationEnabled(); + VeniceHttpException e2 = + expectThrows(VeniceHttpException.class, () -> UpdateStoreUtils.validateStoreConfigs(admin, clusterName, store)); + assertEquals(e2.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e2.getErrorType(), ErrorType.INVALID_CONFIG); + assertTrue(e2.getMessage().contains("Write computation is only supported for hybrid stores")); + + reset(store); + + // Hybrid store cannot have negative rewind time config + doReturn(storeName).when(store).getName(); + doReturn(true).when(store).isHybrid(); + doReturn( + new HybridStoreConfigImpl(-1, 100, -1, DataReplicationPolicy.NON_AGGREGATE, BufferReplayPolicy.REWIND_FROM_EOP)) + .when(store) + .getHybridStoreConfig(); + VeniceHttpException e3 = + expectThrows(VeniceHttpException.class, () -> UpdateStoreUtils.validateStoreConfigs(admin, clusterName, store)); + assertEquals(e3.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e3.getErrorType(), ErrorType.INVALID_CONFIG); + assertTrue(e3.getMessage().contains("Rewind time cannot be negative for a hybrid store")); + + reset(store); + + // Hybrid store cannot have negative offset lag and negative producer time lag thresholds + doReturn(storeName).when(store).getName(); + doReturn(true).when(store).isHybrid(); + doReturn( + new HybridStoreConfigImpl(100, -1, -1, DataReplicationPolicy.NON_AGGREGATE, BufferReplayPolicy.REWIND_FROM_EOP)) + .when(store) + .getHybridStoreConfig(); + VeniceHttpException e4 = + expectThrows(VeniceHttpException.class, () -> UpdateStoreUtils.validateStoreConfigs(admin, clusterName, store)); + assertEquals(e4.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e4.getErrorType(), ErrorType.INVALID_CONFIG); + assertTrue( + e4.getMessage() + .contains( + "Both offset lag threshold and producer timestamp lag threshold cannot be negative for a hybrid store")); + + reset(store); + + // Incremental push + NON_AGGREGATE DRP is not supported in multi-region mode + doReturn(true).when(controllerConfig).isMultiRegion(); + doReturn(storeName).when(store).getName(); + doReturn(true).when(store).isHybrid(); + doReturn(true).when(store).isIncrementalPushEnabled(); + doReturn( + new HybridStoreConfigImpl( + 100, + 100, + -1, + DataReplicationPolicy.NON_AGGREGATE, + BufferReplayPolicy.REWIND_FROM_EOP)).when(store).getHybridStoreConfig(); + VeniceHttpException e5 = + expectThrows(VeniceHttpException.class, () -> UpdateStoreUtils.validateStoreConfigs(admin, clusterName, store)); + assertEquals(e5.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e5.getErrorType(), ErrorType.INVALID_CONFIG); + assertTrue( + e5.getMessage() + .contains( + "Incremental push is not supported for non active-active hybrid stores with NON_AGGREGATE data replication policy")); + + reset(controllerConfig); + reset(store); + + // Incremental push + NON_AGGREGATE DRP is supported in single-region mode + doReturn(false).when(controllerConfig).isMultiRegion(); + doReturn(storeName).when(store).getName(); + doReturn(true).when(store).isHybrid(); + doReturn(true).when(store).isIncrementalPushEnabled(); + doReturn( + new HybridStoreConfigImpl( + 100, + 100, + -1, + DataReplicationPolicy.NON_AGGREGATE, + BufferReplayPolicy.REWIND_FROM_EOP)).when(store).getHybridStoreConfig(); + doReturn(new PartitionerConfigImpl()).when(store).getPartitionerConfig(); + doReturn(new SchemaEntry(1, VALUE_SCHEMA_V1_STR)).when(admin).getKeySchema(clusterName, storeName); + doReturn(SchemaData.INVALID_VALUE_SCHEMA_ID).when(store).getLatestSuperSetValueSchemaId(); + UpdateStoreUtils.validateStoreConfigs(admin, clusterName, store); + + reset(controllerConfig); + reset(store); + + // ACTIVE_ACTIVE DRP is only supported when activeActiveReplicationEnabled = true + doReturn(storeName).when(store).getName(); + doReturn(true).when(store).isHybrid(); + doReturn(false).when(store).isActiveActiveReplicationEnabled(); + doReturn( + new HybridStoreConfigImpl( + 100, + -1, + 100, + DataReplicationPolicy.ACTIVE_ACTIVE, + BufferReplayPolicy.REWIND_FROM_EOP)).when(store).getHybridStoreConfig(); + VeniceHttpException e6 = + expectThrows(VeniceHttpException.class, () -> UpdateStoreUtils.validateStoreConfigs(admin, clusterName, store)); + assertEquals(e6.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e6.getErrorType(), ErrorType.INVALID_CONFIG); + assertTrue( + e6.getMessage() + .contains( + "Data replication policy ACTIVE_ACTIVE is only supported for hybrid stores with active-active replication enabled")); + + reset(controllerConfig); + reset(store); + + // Storage quota can not be less than 0 + doReturn(storeName).when(store).getName(); + doReturn(-5L).when(store).getStorageQuotaInByte(); + VeniceHttpException e7 = + expectThrows(VeniceHttpException.class, () -> UpdateStoreUtils.validateStoreConfigs(admin, clusterName, store)); + assertEquals(e7.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e7.getErrorType(), ErrorType.INVALID_CONFIG); + assertTrue(e7.getMessage().contains("Storage quota can not be less than 0")); + + reset(controllerConfig); + reset(store); + + // Storage quota can be -1. Special value for unlimited quota + doReturn(storeName).when(store).getName(); + doReturn(-1L).when(store).getStorageQuotaInByte(); + doReturn(new PartitionerConfigImpl()).when(store).getPartitionerConfig(); + doReturn(new SchemaEntry(1, VALUE_SCHEMA_V1_STR)).when(admin).getKeySchema(clusterName, storeName); + doReturn(SchemaData.INVALID_VALUE_SCHEMA_ID).when(store).getLatestSuperSetValueSchemaId(); + UpdateStoreUtils.validateStoreConfigs(admin, clusterName, store); + + reset(controllerConfig); + reset(store); + + // Read quota can not be less than 0 + doReturn(storeName).when(store).getName(); + doReturn(-5L).when(store).getReadQuotaInCU(); + VeniceHttpException e8 = + expectThrows(VeniceHttpException.class, () -> UpdateStoreUtils.validateStoreConfigs(admin, clusterName, store)); + assertEquals(e8.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e8.getErrorType(), ErrorType.INVALID_CONFIG); + assertTrue(e8.getMessage().contains("Read quota can not be less than 0")); + + reset(controllerConfig); + reset(store); + + // Read quota can not be larger than cluster quota + doReturn(storeName).when(store).getName(); + doReturn(2000L).when(store).getReadQuotaInCU(); + VeniceHttpException e9 = + expectThrows(VeniceHttpException.class, () -> UpdateStoreUtils.validateStoreConfigs(admin, clusterName, store)); + assertEquals(e9.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e9.getErrorType(), ErrorType.INVALID_CONFIG); + assertTrue(e9.getMessage().contains("Read quota can not be more than the cluster quota")); + + reset(controllerConfig); + reset(store); + + // Active-active replication is not supported for batch-only stores + doReturn(storeName).when(store).getName(); + doReturn(false).when(store).isNativeReplicationEnabled(); + doReturn(true).when(store).isActiveActiveReplicationEnabled(); + VeniceHttpException e10 = + expectThrows(VeniceHttpException.class, () -> UpdateStoreUtils.validateStoreConfigs(admin, clusterName, store)); + assertEquals(e10.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e10.getErrorType(), ErrorType.INVALID_CONFIG); + assertTrue(e10.getMessage().contains("Active-Active Replication is only supported for hybrid stores")); + + reset(controllerConfig); + reset(store); + + // Active-active replication is only supported for stores that also have native replication + doReturn(storeName).when(store).getName(); + doReturn(true).when(store).isHybrid(); + doReturn( + new HybridStoreConfigImpl( + 100, + -1, + 100, + DataReplicationPolicy.ACTIVE_ACTIVE, + BufferReplayPolicy.REWIND_FROM_EOP)).when(store).getHybridStoreConfig(); + doReturn(false).when(store).isNativeReplicationEnabled(); + doReturn(true).when(store).isActiveActiveReplicationEnabled(); + VeniceHttpException e11 = + expectThrows(VeniceHttpException.class, () -> UpdateStoreUtils.validateStoreConfigs(admin, clusterName, store)); + assertEquals(e11.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e11.getErrorType(), ErrorType.INVALID_CONFIG); + assertTrue( + e11.getMessage() + .contains( + "Active/Active Replication cannot be enabled for store " + store.getName() + + " since Native Replication is not enabled on it.")); + + reset(controllerConfig); + reset(store); + + // Partitioner Config cannot be null + doReturn(storeName).when(store).getName(); + doReturn(null).when(store).getPartitionerConfig(); + VeniceHttpException e12 = + expectThrows(VeniceHttpException.class, () -> UpdateStoreUtils.validateStoreConfigs(admin, clusterName, store)); + assertEquals(e12.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e12.getErrorType(), ErrorType.INVALID_CONFIG); + assertTrue(e12.getMessage().contains("Partitioner Config cannot be null")); + + reset(controllerConfig); + reset(store); + + // Active-Active is not supported when amplification factor is more than 1 + doReturn(storeName).when(store).getName(); + doReturn(true).when(store).isHybrid(); + doReturn( + new HybridStoreConfigImpl( + 100, + -1, + 100, + DataReplicationPolicy.ACTIVE_ACTIVE, + BufferReplayPolicy.REWIND_FROM_EOP)).when(store).getHybridStoreConfig(); + doReturn(new PartitionerConfigImpl(DefaultVenicePartitioner.class.getName(), new HashMap<>(), 10)).when(store) + .getPartitionerConfig(); + doReturn(true).when(store).isNativeReplicationEnabled(); + doReturn(true).when(store).isActiveActiveReplicationEnabled(); + VeniceHttpException e13 = + expectThrows(VeniceHttpException.class, () -> UpdateStoreUtils.validateStoreConfigs(admin, clusterName, store)); + assertEquals(e13.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e13.getErrorType(), ErrorType.INVALID_CONFIG); + assertTrue( + e13.getMessage() + .contains("Active-active replication is not supported for stores with amplification factor > 1")); + + reset(controllerConfig); + reset(store); + + // Write-compute is not supported when amplification factor is more than 1 + doReturn(storeName).when(store).getName(); + doReturn(true).when(store).isHybrid(); + doReturn( + new HybridStoreConfigImpl( + 100, + 100, + -1, + DataReplicationPolicy.NON_AGGREGATE, + BufferReplayPolicy.REWIND_FROM_EOP)).when(store).getHybridStoreConfig(); + doReturn(new PartitionerConfigImpl(DefaultVenicePartitioner.class.getName(), new HashMap<>(), 10)).when(store) + .getPartitionerConfig(); + doReturn(true).when(store).isWriteComputationEnabled(); + VeniceHttpException e14 = + expectThrows(VeniceHttpException.class, () -> UpdateStoreUtils.validateStoreConfigs(admin, clusterName, store)); + assertEquals(e14.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e14.getErrorType(), ErrorType.INVALID_CONFIG); + assertTrue( + e14.getMessage().contains("Write computation is not supported for stores with amplification factor > 1")); + + reset(controllerConfig); + reset(store); + + // Verify the updated partitionerConfig can be built - partitioner doesn't exist + doReturn(storeName).when(store).getName(); + doReturn(new PartitionerConfigImpl("com.linkedin.venice.InvalidPartitioner", new HashMap<>(), 10)).when(store) + .getPartitionerConfig(); + VeniceHttpException e15 = + expectThrows(VeniceHttpException.class, () -> UpdateStoreUtils.validateStoreConfigs(admin, clusterName, store)); + assertEquals(e15.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e15.getErrorType(), ErrorType.INVALID_CONFIG); + assertTrue( + e15.getMessage() + .contains( + "Partitioner Configs are invalid, please verify that partitioner configs like classpath and parameters are correct!")); + + reset(controllerConfig); + reset(store); + + // Verify the updated partitionerConfig can be built - schema is not supported by partitioner + doReturn(storeName).when(store).getName(); + doReturn( + new PartitionerConfigImpl( + PickyVenicePartitioner.class.getName(), + Collections.singletonMap(PickyVenicePartitioner.SCHEMA_VALID, "false"), + 10)).when(store).getPartitionerConfig(); + VeniceHttpException e16 = + expectThrows(VeniceHttpException.class, () -> UpdateStoreUtils.validateStoreConfigs(admin, clusterName, store)); + assertEquals(e16.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e16.getErrorType(), ErrorType.INVALID_SCHEMA); + assertTrue(e16.getMessage().contains("Schema is not valid")); + + reset(controllerConfig); + reset(store); + + // Validate if the latest superset schema id is an existing value schema + doReturn(storeName).when(store).getName(); + doReturn(new PartitionerConfigImpl()).when(store).getPartitionerConfig(); + doReturn(new SchemaEntry(1, VALUE_SCHEMA_V1_STR)).when(admin).getKeySchema(clusterName, storeName); + doReturn(null).when(admin).getValueSchema(clusterName, storeName, 10); + doReturn(10).when(store).getLatestSuperSetValueSchemaId(); + VeniceHttpException e17 = + expectThrows(VeniceHttpException.class, () -> UpdateStoreUtils.validateStoreConfigs(admin, clusterName, store)); + assertEquals(e17.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e17.getErrorType(), ErrorType.INVALID_CONFIG); + assertTrue(e17.getMessage().contains("Unknown value schema id: 10 in store: storeName")); + + reset(controllerConfig); + reset(store); + + // Max compaction lag >= Min compaction lag + doReturn(storeName).when(store).getName(); + doReturn(new PartitionerConfigImpl()).when(store).getPartitionerConfig(); + doReturn(SchemaData.INVALID_VALUE_SCHEMA_ID).when(store).getLatestSuperSetValueSchemaId(); + doReturn(10L).when(store).getMaxCompactionLagSeconds(); + doReturn(100L).when(store).getMinCompactionLagSeconds(); + VeniceHttpException e18 = + expectThrows(VeniceHttpException.class, () -> UpdateStoreUtils.validateStoreConfigs(admin, clusterName, store)); + assertEquals(e18.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e18.getErrorType(), ErrorType.INVALID_CONFIG); + assertTrue( + e18.getMessage() + .contains( + "Store's max compaction lag seconds: 10 shouldn't be smaller than store's min compaction lag seconds: 100")); + + reset(controllerConfig); + reset(store); + + // ETL Proxy user must be set if ETL is enabled for current or future version + doReturn(storeName).when(store).getName(); + doReturn(new PartitionerConfigImpl()).when(store).getPartitionerConfig(); + doReturn(SchemaData.INVALID_VALUE_SCHEMA_ID).when(store).getLatestSuperSetValueSchemaId(); + doReturn(new ETLStoreConfigImpl("", true, false)).when(store).getEtlStoreConfig(); + VeniceHttpException e19 = + expectThrows(VeniceHttpException.class, () -> UpdateStoreUtils.validateStoreConfigs(admin, clusterName, store)); + assertEquals(e19.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e19.getErrorType(), ErrorType.INVALID_CONFIG); + assertTrue( + e19.getMessage().contains("Cannot enable ETL for this store because etled user proxy account is not set")); + + reset(controllerConfig); + reset(store); + } + + @Test + public void testValidateStorePartitionCountUpdate() { + String clusterName = "clusterName"; + String storeName = "storeName"; + + Admin admin = mock(Admin.class); + VeniceControllerMultiClusterConfig multiClusterConfigs = mock(VeniceControllerMultiClusterConfig.class); + VeniceControllerClusterConfig clusterConfig = mock(VeniceControllerClusterConfig.class); + HelixVeniceClusterResources clusterResources = mock(HelixVeniceClusterResources.class); + TopicManager topicManager = mock(TopicManager.class); + PubSubTopicRepository topicRepository = mock(PubSubTopicRepository.class); + PubSubTopic rtTopic = mock(PubSubTopic.class); + + doReturn(false).when(admin).isParent(); + doReturn(topicManager).when(admin).getTopicManager(); + doReturn(topicRepository).when(admin).getPubSubTopicRepository(); + doReturn(rtTopic).when(topicRepository).getTopic(Version.composeRealTimeTopic(storeName)); + + Store originalStore = mock(Store.class); + Store updatedStore = mock(Store.class); + + doReturn(3).when(clusterConfig).getMinNumberOfPartitions(); + doReturn(100).when(clusterConfig).getMaxNumberOfPartitions(); + + doReturn(clusterResources).when(admin).getHelixVeniceClusterResources(clusterName); + doReturn(clusterConfig).when(clusterResources).getConfig(); + + doReturn(storeName).when(originalStore).getName(); + doReturn(storeName).when(updatedStore).getName(); + + // Negative partition count is not allowed + doReturn(-1).when(updatedStore).getPartitionCount(); + VeniceHttpException e1 = expectThrows( + VeniceHttpException.class, + () -> UpdateStoreUtils + .validateStorePartitionCountUpdate(admin, multiClusterConfigs, clusterName, originalStore, updatedStore)); + assertEquals(e1.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e1.getErrorType(), ErrorType.INVALID_CONFIG); + + // Hybrid store with partition count = 0 + doReturn(true).when(updatedStore).isHybrid(); + doReturn(0).when(updatedStore).getPartitionCount(); + VeniceHttpException e2 = expectThrows( + VeniceHttpException.class, + () -> UpdateStoreUtils + .validateStorePartitionCountUpdate(admin, multiClusterConfigs, clusterName, originalStore, updatedStore)); + assertEquals(e2.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e2.getErrorType(), ErrorType.INVALID_CONFIG); + + // Partition count cannot be less than min partition count + doReturn(true).when(updatedStore).isHybrid(); + doReturn(1).when(updatedStore).getPartitionCount(); + VeniceHttpException e3 = expectThrows( + VeniceHttpException.class, + () -> UpdateStoreUtils + .validateStorePartitionCountUpdate(admin, multiClusterConfigs, clusterName, originalStore, updatedStore)); + assertEquals(e3.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e3.getErrorType(), ErrorType.INVALID_CONFIG); + + // Partition count cannot be greater than max partition count + doReturn(false).when(updatedStore).isHybrid(); + doReturn(1000).when(updatedStore).getPartitionCount(); + VeniceHttpException e4 = expectThrows( + VeniceHttpException.class, + () -> UpdateStoreUtils + .validateStorePartitionCountUpdate(admin, multiClusterConfigs, clusterName, originalStore, updatedStore)); + assertEquals(e4.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e4.getErrorType(), ErrorType.INVALID_CONFIG); + + // Partition count change for hybrid stores is not allowed + doReturn(true).when(originalStore).isHybrid(); + doReturn(true).when(updatedStore).isHybrid(); + doReturn(10).when(originalStore).getPartitionCount(); + doReturn(20).when(updatedStore).getPartitionCount(); + VeniceHttpException e5 = expectThrows( + VeniceHttpException.class, + () -> UpdateStoreUtils + .validateStorePartitionCountUpdate(admin, multiClusterConfigs, clusterName, originalStore, updatedStore)); + assertEquals(e5.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e5.getErrorType(), ErrorType.INVALID_CONFIG); + + // Partition count update is allowed if RT topic doesn't exist + doReturn(true).when(originalStore).isHybrid(); + doReturn(true).when(updatedStore).isHybrid(); + doReturn(10).when(originalStore).getPartitionCount(); + doReturn(10).when(updatedStore).getPartitionCount(); + doReturn(false).when(topicManager).containsTopic(rtTopic); + UpdateStoreUtils + .validateStorePartitionCountUpdate(admin, multiClusterConfigs, clusterName, originalStore, updatedStore); + + // Partition count update is allowed if RT topic exists and partition count matches the store's partition count + doReturn(true).when(originalStore).isHybrid(); + doReturn(true).when(updatedStore).isHybrid(); + doReturn(10).when(originalStore).getPartitionCount(); + doReturn(10).when(updatedStore).getPartitionCount(); + doReturn(true).when(topicManager).containsTopic(rtTopic); + doReturn(10).when(topicManager).getPartitionCount(rtTopic); + UpdateStoreUtils + .validateStorePartitionCountUpdate(admin, multiClusterConfigs, clusterName, originalStore, updatedStore); + + // Partition count update is not allowed if RT topic exists and partition count is different from the store's + // partition count + doReturn(true).when(originalStore).isHybrid(); + doReturn(true).when(updatedStore).isHybrid(); + doReturn(10).when(originalStore).getPartitionCount(); + doReturn(10).when(updatedStore).getPartitionCount(); + doReturn(true).when(topicManager).containsTopic(rtTopic); + doReturn(20).when(topicManager).getPartitionCount(rtTopic); + VeniceHttpException e6 = expectThrows( + VeniceHttpException.class, + () -> UpdateStoreUtils + .validateStorePartitionCountUpdate(admin, multiClusterConfigs, clusterName, originalStore, updatedStore)); + assertEquals(e6.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e6.getErrorType(), ErrorType.INVALID_CONFIG); + + // Partition count change for batch stores is allowed + doReturn(true).when(originalStore).isHybrid(); + doReturn(false).when(updatedStore).isHybrid(); + doReturn(10).when(originalStore).getPartitionCount(); + doReturn(20).when(updatedStore).getPartitionCount(); + // No exception is thrown + UpdateStoreUtils + .validateStorePartitionCountUpdate(admin, multiClusterConfigs, clusterName, originalStore, updatedStore); + } + + @Test + public void testValidateStorePartitionerUpdate() { + String clusterName = "clusterName"; + String storeName = "storeName"; + + Store originalStore = mock(Store.class); + Store updatedStore = mock(Store.class); + + doReturn(storeName).when(originalStore).getName(); + doReturn(storeName).when(updatedStore).getName(); + + // Partitioner param update is allowed for batch-only stores + doReturn(false).when(originalStore).isHybrid(); + doReturn(false).when(updatedStore).isHybrid(); + UpdateStoreUtils.validateStorePartitionerUpdate(clusterName, originalStore, updatedStore); + + // Partitioner param update is allowed during hybrid to batch conversion + doReturn(true).when(originalStore).isHybrid(); + doReturn(false).when(updatedStore).isHybrid(); + UpdateStoreUtils.validateStorePartitionerUpdate(clusterName, originalStore, updatedStore); + + // Partitioner param update is allowed during batch to hybrid conversion + doReturn(false).when(originalStore).isHybrid(); + doReturn(true).when(updatedStore).isHybrid(); + UpdateStoreUtils.validateStorePartitionerUpdate(clusterName, originalStore, updatedStore); + + PartitionerConfig originalPartitionerConfig; + PartitionerConfig updatedPartitionerConfig; + + // Partitioner class update is not allowed for hybrid stores + doReturn(true).when(originalStore).isHybrid(); + doReturn(true).when(updatedStore).isHybrid(); + originalPartitionerConfig = new PartitionerConfigImpl("ClassA", Collections.singletonMap("key1", "value1"), 1); + updatedPartitionerConfig = new PartitionerConfigImpl("ClassB", Collections.singletonMap("key1", "value1"), 1); + doReturn(originalPartitionerConfig).when(originalStore).getPartitionerConfig(); + doReturn(updatedPartitionerConfig).when(updatedStore).getPartitionerConfig(); + VeniceHttpException e1 = expectThrows( + VeniceHttpException.class, + () -> UpdateStoreUtils.validateStorePartitionerUpdate(clusterName, originalStore, updatedStore)); + assertEquals(e1.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e1.getErrorType(), ErrorType.INVALID_CONFIG); + assertTrue(e1.getMessage().contains("Partitioner class cannot be changed for hybrid store")); + + // Partitioner param update is not allowed for hybrid stores + doReturn(true).when(originalStore).isHybrid(); + doReturn(true).when(updatedStore).isHybrid(); + originalPartitionerConfig = new PartitionerConfigImpl("ClassA", Collections.singletonMap("key1", "value1"), 1); + updatedPartitionerConfig = new PartitionerConfigImpl("ClassA", Collections.singletonMap("key2", "value2"), 1); + doReturn(originalPartitionerConfig).when(originalStore).getPartitionerConfig(); + doReturn(updatedPartitionerConfig).when(updatedStore).getPartitionerConfig(); + VeniceHttpException e2 = expectThrows( + VeniceHttpException.class, + () -> UpdateStoreUtils.validateStorePartitionerUpdate(clusterName, originalStore, updatedStore)); + assertEquals(e2.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e2.getErrorType(), ErrorType.INVALID_CONFIG); + assertTrue(e2.getMessage().contains("Partitioner params cannot be changed for hybrid store")); + + // Amplification factor changes are allowed for hybrid stores + doReturn(true).when(originalStore).isHybrid(); + doReturn(true).when(updatedStore).isHybrid(); + originalPartitionerConfig = new PartitionerConfigImpl("ClassA", Collections.singletonMap("key1", "value1"), 1); + updatedPartitionerConfig = new PartitionerConfigImpl("ClassA", Collections.singletonMap("key1", "value1"), 10); + doReturn(originalPartitionerConfig).when(originalStore).getPartitionerConfig(); + doReturn(updatedPartitionerConfig).when(updatedStore).getPartitionerConfig(); + UpdateStoreUtils.validateStorePartitionerUpdate(clusterName, originalStore, updatedStore); + } + + @Test + public void testValidatePersona() { + String clusterName = "clusterName"; + String storeName = "storeName"; + + Store store = mock(Store.class); + Admin admin = mock(Admin.class); + HelixVeniceClusterResources clusterResources = mock(HelixVeniceClusterResources.class); + StoragePersonaRepository personaRepository = mock(StoragePersonaRepository.class); + StoragePersona persona = mock(StoragePersona.class); + + doReturn(storeName).when(store).getName(); + + doReturn(clusterResources).when(admin).getHelixVeniceClusterResources(clusterName); + doReturn(personaRepository).when(clusterResources).getStoragePersonaRepository(); + + // Persona not updated. Store doesn't have an existing persona. Update is allowed. + doReturn(null).when(personaRepository).getPersonaContainingStore(storeName); + UpdateStoreUtils.validatePersona(admin, clusterName, store, Optional.empty()); + + // Persona not updated. Store has an existing persona. Update is allowed if persona repo allows. + doReturn(persona).when(personaRepository).getPersonaContainingStore(storeName); + // Validation doesn't throw exception -> update is allowed + doNothing().when(personaRepository).validateAddUpdatedStore(any(), any()); + UpdateStoreUtils.validatePersona(admin, clusterName, store, Optional.empty()); + // Validation throws exception -> update is not allowed + doThrow(new VeniceException()).when(personaRepository).validateAddUpdatedStore(any(), any()); + assertThrows( + VeniceException.class, + () -> UpdateStoreUtils.validatePersona(admin, clusterName, store, Optional.empty())); + + String updatedPersona = "persona2"; + // Persona updated. New persona doesn't exist. Update is not allowed. + doReturn(null).when(personaRepository).getPersonaContainingStore(storeName); + doReturn(null).when(admin).getStoragePersona(clusterName, updatedPersona); + assertThrows( + VeniceException.class, + () -> UpdateStoreUtils.validatePersona(admin, clusterName, store, Optional.of(updatedPersona))); + + // Persona updated. New persona exists. Update is allowed if persona repo allows. + doReturn(null).when(personaRepository).getPersonaContainingStore(storeName); + doReturn(persona).when(admin).getStoragePersona(clusterName, updatedPersona); + // Validation doesn't throw exception -> update is allowed + doNothing().when(personaRepository).validateAddUpdatedStore(any(), any()); + UpdateStoreUtils.validatePersona(admin, clusterName, store, Optional.of(updatedPersona)); + // Validation throws exception -> update is not allowed + doThrow(new VeniceException()).when(personaRepository).validateAddUpdatedStore(any(), any()); + assertThrows( + VeniceException.class, + () -> UpdateStoreUtils.validatePersona(admin, clusterName, store, Optional.of(updatedPersona))); + } + + @Test + public void testMergeNewSettingsIntoOldPartitionerConfig() { + String storeName = "storeName"; + Store store = mock(Store.class); + + PartitionerConfig oldPartitionerConfig = new PartitionerConfigImpl(); + + doReturn(storeName).when(store).getName(); + doReturn(oldPartitionerConfig).when(store).getPartitionerConfig(); + + // No updates to the store partitioner configs should return the same partitioner configs + assertSame( + UpdateStoreUtils + .mergeNewSettingsIntoOldPartitionerConfig(store, Optional.empty(), Optional.empty(), Optional.empty()), + oldPartitionerConfig); + + String updatedPartitionerClass = "Class B"; + Map updatedPartitionerParams = Collections.singletonMap("key1", "value1"); + int updatedAmpFactor = 10; + + PartitionerConfig newPartitionerConfig = UpdateStoreUtils.mergeNewSettingsIntoOldPartitionerConfig( + store, + Optional.of(updatedPartitionerClass), + Optional.of(updatedPartitionerParams), + Optional.of(updatedAmpFactor)); + assertNotSame(newPartitionerConfig, oldPartitionerConfig); // Should be a new object + assertEquals(newPartitionerConfig.getPartitionerClass(), updatedPartitionerClass); + assertEquals(newPartitionerConfig.getPartitionerParams(), updatedPartitionerParams); + assertEquals(newPartitionerConfig.getAmplificationFactor(), updatedAmpFactor); + + // Even if the store doesn't have a partitioner config, the new partitioner config should be returned + doReturn(null).when(store).getPartitionerConfig(); + PartitionerConfig newPartitionerConfig2 = UpdateStoreUtils.mergeNewSettingsIntoOldPartitionerConfig( + store, + Optional.of(updatedPartitionerClass), + Optional.of(updatedPartitionerParams), + Optional.of(updatedAmpFactor)); + assertNotSame(newPartitionerConfig2, oldPartitionerConfig); // Should be a new object + assertEquals(newPartitionerConfig2.getPartitionerClass(), updatedPartitionerClass); + assertEquals(newPartitionerConfig2.getPartitionerParams(), updatedPartitionerParams); + assertEquals(newPartitionerConfig2.getAmplificationFactor(), updatedAmpFactor); + } + + @Test + public void testAddNewViewConfigsIntoOldConfigs() { + String storeName = "storeName"; + Store store = mock(Store.class); + String classA = "ClassA"; + String classB = "ClassB"; + String classC = "ClassC"; + + ViewConfig viewConfigA = mock(ViewConfig.class); + ViewConfig viewConfigB = mock(ViewConfig.class); + ViewConfig viewConfigC = mock(ViewConfig.class); + + Map viewConfigMap = new HashMap() { + { + put(classA, viewConfigA); + put(classB, viewConfigB); + } + }; + + doReturn(storeName).when(store).getName(); + doReturn(viewConfigMap).when(store).getViewConfigs(); + + Map mergedViewConfig1 = + UpdateStoreUtils.addNewViewConfigsIntoOldConfigs(store, classC, viewConfigC); + assertEquals(mergedViewConfig1.size(), 3); + assertEquals(mergedViewConfig1.get(classA), viewConfigA); + assertEquals(mergedViewConfig1.get(classB), viewConfigB); + assertEquals(mergedViewConfig1.get(classC), viewConfigC); + + Map mergedViewConfig2 = + UpdateStoreUtils.addNewViewConfigsIntoOldConfigs(store, classB, viewConfigC); + assertEquals(mergedViewConfig2.size(), 2); + assertEquals(mergedViewConfig2.get(classA), viewConfigA); + assertEquals(mergedViewConfig2.get(classB), viewConfigC); + + doReturn(null).when(store).getViewConfigs(); + Map mergedViewConfig3 = + UpdateStoreUtils.addNewViewConfigsIntoOldConfigs(store, classA, viewConfigA); + assertEquals(mergedViewConfig3.size(), 1); + assertEquals(mergedViewConfig3.get(classA), viewConfigA); + } + + @Test + public void testRemoveViewConfigFromStoreViewConfigMap() { + String storeName = "storeName"; + Store store = mock(Store.class); + String classA = "ClassA"; + String classB = "ClassB"; + + ViewConfig viewConfigA = mock(ViewConfig.class); + ViewConfig viewConfigB = mock(ViewConfig.class); + + Map viewConfigMap = new HashMap() { + { + put(classA, viewConfigA); + put(classB, viewConfigB); + } + }; + + doReturn(storeName).when(store).getName(); + doReturn(viewConfigMap).when(store).getViewConfigs(); + + Map newViewConfig1 = UpdateStoreUtils.removeViewConfigFromStoreViewConfigMap(store, classB); + assertEquals(newViewConfig1.size(), 1); + assertEquals(newViewConfig1.get(classA), viewConfigA); + + doReturn(null).when(store).getViewConfigs(); + Map newViewConfig2 = UpdateStoreUtils.removeViewConfigFromStoreViewConfigMap(store, classA); + assertTrue(newViewConfig2.isEmpty()); + } + + public static class PickyVenicePartitioner extends VenicePartitioner { + private static final String SCHEMA_VALID = "SCHEMA_VALID"; + + public PickyVenicePartitioner(VeniceProperties props, Schema schema) { + super(props, schema); + } + + @Override + public int getPartitionId(byte[] keyBytes, int numPartitions) { + return 0; + } + + @Override + public int getPartitionId(ByteBuffer keyByteBuffer, int numPartitions) { + return 0; + } + + @Override + protected void checkSchema(@Nonnull Schema keySchema) throws PartitionerSchemaMismatchException { + if (!props.getBoolean(SCHEMA_VALID)) { + throw new PartitionerSchemaMismatchException("Schema is not valid"); + } + } + } +} diff --git a/services/venice-controller/src/test/java/com/linkedin/venice/controller/utils/ParentControllerConfigUpdateUtilsTest.java b/services/venice-controller/src/test/java/com/linkedin/venice/controller/utils/ParentControllerConfigUpdateUtilsTest.java deleted file mode 100644 index c3963d5211..0000000000 --- a/services/venice-controller/src/test/java/com/linkedin/venice/controller/utils/ParentControllerConfigUpdateUtilsTest.java +++ /dev/null @@ -1,308 +0,0 @@ -package com.linkedin.venice.controller.utils; - -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.linkedin.venice.controller.HelixVeniceClusterResources; -import com.linkedin.venice.controller.VeniceControllerClusterConfig; -import com.linkedin.venice.controller.VeniceHelixAdmin; -import com.linkedin.venice.controller.VeniceParentHelixAdmin; -import com.linkedin.venice.controller.kafka.protocol.admin.UpdateStore; -import com.linkedin.venice.controller.util.ParentControllerConfigUpdateUtils; -import com.linkedin.venice.meta.Store; -import com.linkedin.venice.schema.SchemaEntry; -import com.linkedin.venice.utils.TestWriteUtils; -import java.util.Collections; -import java.util.Optional; -import org.testng.Assert; -import org.testng.annotations.Test; - - -public class ParentControllerConfigUpdateUtilsTest { - @Test - public void testPartialUpdateConfigUpdate() { - VeniceParentHelixAdmin parentHelixAdmin = mock(VeniceParentHelixAdmin.class); - VeniceHelixAdmin veniceHelixAdmin = mock(VeniceHelixAdmin.class); - String cluster = "foo"; - String storeName = "bar"; - Store store = mock(Store.class); - when(parentHelixAdmin.getVeniceHelixAdmin()).thenReturn(veniceHelixAdmin); - when(veniceHelixAdmin.getStore(anyString(), anyString())).thenReturn(store); - HelixVeniceClusterResources helixVeniceClusterResources = mock(HelixVeniceClusterResources.class); - VeniceControllerClusterConfig controllerConfig = mock(VeniceControllerClusterConfig.class); - when(helixVeniceClusterResources.getConfig()).thenReturn(controllerConfig); - when(veniceHelixAdmin.getHelixVeniceClusterResources(anyString())).thenReturn(helixVeniceClusterResources); - SchemaEntry schemaEntry = new SchemaEntry(1, TestWriteUtils.USER_WITH_DEFAULT_SCHEMA); - when(veniceHelixAdmin.getValueSchemas(anyString(), anyString())).thenReturn(Collections.singletonList(schemaEntry)); - when(parentHelixAdmin.getValueSchemas(anyString(), anyString())).thenReturn(Collections.singletonList(schemaEntry)); - - /** - * Explicit request. - */ - Optional partialUpdateRequest = Optional.of(true); - // Case 1: partial update config updated. - UpdateStore setStore = new UpdateStore(); - Assert.assertTrue( - ParentControllerConfigUpdateUtils.checkAndMaybeApplyPartialUpdateConfig( - parentHelixAdmin, - cluster, - storeName, - partialUpdateRequest, - setStore, - true)); - // Case 2: partial update config updated. - setStore = new UpdateStore(); - when(store.isWriteComputationEnabled()).thenReturn(true); - Assert.assertTrue( - ParentControllerConfigUpdateUtils.checkAndMaybeApplyPartialUpdateConfig( - parentHelixAdmin, - cluster, - storeName, - partialUpdateRequest, - setStore, - true)); - // Case 3: partial update config updated. - partialUpdateRequest = Optional.of(false); - when(store.isWriteComputationEnabled()).thenReturn(true); - Assert.assertTrue( - ParentControllerConfigUpdateUtils.checkAndMaybeApplyPartialUpdateConfig( - parentHelixAdmin, - cluster, - storeName, - partialUpdateRequest, - setStore, - true)); - // Case 4: partial update config updated. - setStore = new UpdateStore(); - when(store.isWriteComputationEnabled()).thenReturn(false); - Assert.assertTrue( - ParentControllerConfigUpdateUtils.checkAndMaybeApplyPartialUpdateConfig( - parentHelixAdmin, - cluster, - storeName, - partialUpdateRequest, - setStore, - true)); - - /** - * No request. - */ - partialUpdateRequest = Optional.empty(); - when(controllerConfig.isEnablePartialUpdateForHybridActiveActiveUserStores()).thenReturn(false); - when(controllerConfig.isEnablePartialUpdateForHybridNonActiveActiveUserStores()).thenReturn(false); - // Case 1: partial update config not updated. - setStore = new UpdateStore(); - Assert.assertFalse( - ParentControllerConfigUpdateUtils.checkAndMaybeApplyPartialUpdateConfig( - parentHelixAdmin, - cluster, - storeName, - partialUpdateRequest, - setStore, - true)); - setStore.activeActiveReplicationEnabled = true; - Assert.assertFalse( - ParentControllerConfigUpdateUtils.checkAndMaybeApplyPartialUpdateConfig( - parentHelixAdmin, - cluster, - storeName, - partialUpdateRequest, - setStore, - true)); - // Case 2: partial update config updated. - when(controllerConfig.isEnablePartialUpdateForHybridActiveActiveUserStores()).thenReturn(true); - when(controllerConfig.isEnablePartialUpdateForHybridNonActiveActiveUserStores()).thenReturn(true); - setStore = new UpdateStore(); - Assert.assertTrue( - ParentControllerConfigUpdateUtils.checkAndMaybeApplyPartialUpdateConfig( - parentHelixAdmin, - cluster, - storeName, - partialUpdateRequest, - setStore, - true)); - setStore.activeActiveReplicationEnabled = true; - Assert.assertTrue( - ParentControllerConfigUpdateUtils.checkAndMaybeApplyPartialUpdateConfig( - parentHelixAdmin, - cluster, - storeName, - partialUpdateRequest, - setStore, - true)); - } - - @Test - public void testChunkingConfigUpdate() { - VeniceParentHelixAdmin parentHelixAdmin = mock(VeniceParentHelixAdmin.class); - VeniceHelixAdmin veniceHelixAdmin = mock(VeniceHelixAdmin.class); - String cluster = "foo"; - String storeName = "bar"; - Store store = mock(Store.class); - when(parentHelixAdmin.getVeniceHelixAdmin()).thenReturn(veniceHelixAdmin); - when(veniceHelixAdmin.getStore(anyString(), anyString())).thenReturn(store); - - /** - * Explicit request. - */ - Optional chunkingRequest = Optional.of(true); - when(store.isChunkingEnabled()).thenReturn(false); - // Case 1: chunking config updated. - UpdateStore setStore = new UpdateStore(); - setStore.chunkingEnabled = false; - Assert.assertTrue( - ParentControllerConfigUpdateUtils - .checkAndMaybeApplyChunkingConfigChange(parentHelixAdmin, cluster, storeName, chunkingRequest, setStore)); - // Case 2: chunking config updated. - setStore = new UpdateStore(); - setStore.chunkingEnabled = false; - when(store.isChunkingEnabled()).thenReturn(true); - Assert.assertTrue( - ParentControllerConfigUpdateUtils - .checkAndMaybeApplyChunkingConfigChange(parentHelixAdmin, cluster, storeName, chunkingRequest, setStore)); - // Case 3: chunking config updated. - chunkingRequest = Optional.of(false); - when(store.isChunkingEnabled()).thenReturn(true); - Assert.assertTrue( - ParentControllerConfigUpdateUtils - .checkAndMaybeApplyChunkingConfigChange(parentHelixAdmin, cluster, storeName, chunkingRequest, setStore)); - // Case 4: chunking config updated. - setStore = new UpdateStore(); - when(store.isChunkingEnabled()).thenReturn(false); - Assert.assertTrue( - ParentControllerConfigUpdateUtils - .checkAndMaybeApplyChunkingConfigChange(parentHelixAdmin, cluster, storeName, chunkingRequest, setStore)); - /** - * No request. - */ - chunkingRequest = Optional.empty(); - when(store.isWriteComputationEnabled()).thenReturn(false); - // Case 1: already enabled, chunking config not updated. - when(store.isChunkingEnabled()).thenReturn(true); - setStore = new UpdateStore(); - setStore.writeComputationEnabled = true; - Assert.assertFalse( - ParentControllerConfigUpdateUtils - .checkAndMaybeApplyChunkingConfigChange(parentHelixAdmin, cluster, storeName, chunkingRequest, setStore)); - // Case 2: chunking config updated. - when(store.isChunkingEnabled()).thenReturn(false); - setStore = new UpdateStore(); - setStore.writeComputationEnabled = true; - Assert.assertTrue( - ParentControllerConfigUpdateUtils - .checkAndMaybeApplyChunkingConfigChange(parentHelixAdmin, cluster, storeName, chunkingRequest, setStore)); - } - - @Test - public void testRmdChunkingConfigUpdate() { - VeniceParentHelixAdmin parentHelixAdmin = mock(VeniceParentHelixAdmin.class); - VeniceHelixAdmin veniceHelixAdmin = mock(VeniceHelixAdmin.class); - String cluster = "foo"; - String storeName = "bar"; - Store store = mock(Store.class); - when(parentHelixAdmin.getVeniceHelixAdmin()).thenReturn(veniceHelixAdmin); - when(veniceHelixAdmin.getStore(anyString(), anyString())).thenReturn(store); - - /** - * Explicit request. - */ - Optional chunkingRequest = Optional.of(true); - when(store.isChunkingEnabled()).thenReturn(false); - // Case 1: chunking config updated. - UpdateStore setStore = new UpdateStore(); - setStore.chunkingEnabled = false; - Assert.assertTrue( - ParentControllerConfigUpdateUtils.checkAndMaybeApplyRmdChunkingConfigChange( - parentHelixAdmin, - cluster, - storeName, - chunkingRequest, - setStore)); - // Case 2: chunking config updated. - setStore = new UpdateStore(); - setStore.chunkingEnabled = false; - when(store.isChunkingEnabled()).thenReturn(true); - Assert.assertTrue( - ParentControllerConfigUpdateUtils.checkAndMaybeApplyRmdChunkingConfigChange( - parentHelixAdmin, - cluster, - storeName, - chunkingRequest, - setStore)); - // Case 3: chunking config updated. - chunkingRequest = Optional.of(false); - when(store.isChunkingEnabled()).thenReturn(true); - Assert.assertTrue( - ParentControllerConfigUpdateUtils.checkAndMaybeApplyRmdChunkingConfigChange( - parentHelixAdmin, - cluster, - storeName, - chunkingRequest, - setStore)); - // Case 4: chunking config updated. - setStore = new UpdateStore(); - when(store.isChunkingEnabled()).thenReturn(false); - Assert.assertTrue( - ParentControllerConfigUpdateUtils.checkAndMaybeApplyRmdChunkingConfigChange( - parentHelixAdmin, - cluster, - storeName, - chunkingRequest, - setStore)); - - /** - * No request. - */ - chunkingRequest = Optional.empty(); - when(store.isWriteComputationEnabled()).thenReturn(false); - // Case 1: already enabled, chunking config not updated. - when(store.isChunkingEnabled()).thenReturn(true); - setStore = new UpdateStore(); - setStore.writeComputationEnabled = true; - Assert.assertFalse( - ParentControllerConfigUpdateUtils.checkAndMaybeApplyRmdChunkingConfigChange( - parentHelixAdmin, - cluster, - storeName, - chunkingRequest, - setStore)); - // Case 2: chunking config not updated. - when(store.isChunkingEnabled()).thenReturn(false); - setStore = new UpdateStore(); - setStore.writeComputationEnabled = true; - setStore.activeActiveReplicationEnabled = false; - Assert.assertFalse( - ParentControllerConfigUpdateUtils.checkAndMaybeApplyRmdChunkingConfigChange( - parentHelixAdmin, - cluster, - storeName, - chunkingRequest, - setStore)); - // Case 3: chunking config not updated. - when(store.isChunkingEnabled()).thenReturn(false); - setStore = new UpdateStore(); - setStore.writeComputationEnabled = false; - setStore.activeActiveReplicationEnabled = true; - Assert.assertFalse( - ParentControllerConfigUpdateUtils.checkAndMaybeApplyRmdChunkingConfigChange( - parentHelixAdmin, - cluster, - storeName, - chunkingRequest, - setStore)); - // Case 4: chunking config updated. - when(store.isChunkingEnabled()).thenReturn(false); - setStore = new UpdateStore(); - setStore.writeComputationEnabled = true; - setStore.activeActiveReplicationEnabled = true; - Assert.assertTrue( - ParentControllerConfigUpdateUtils.checkAndMaybeApplyRmdChunkingConfigChange( - parentHelixAdmin, - cluster, - storeName, - chunkingRequest, - setStore)); - - } -} diff --git a/services/venice-controller/src/test/resources/superset_schema_test/v5.avsc b/services/venice-controller/src/test/resources/superset_schema_test/v5.avsc new file mode 100644 index 0000000000..7081139240 --- /dev/null +++ b/services/venice-controller/src/test/resources/superset_schema_test/v5.avsc @@ -0,0 +1,12 @@ +{ + "type" : "record", + "namespace" : "example.avro", + "name" : "ValueRecordName", + "fields" : [ + { "name" : "f0", "type" : "int", "default" : -1 }, + { "name" : "f1", "type" : "int", "default" : -1 }, + { "name" : "f2", "type" : "int", "default" : -1 }, + { "name" : "f3", "type" : "int", "default" : -1 } + ], + "custom_prop" : "custom_prop_value_for_v5" +} \ No newline at end of file diff --git a/services/venice-controller/src/test/resources/superset_schema_test/v6.avsc b/services/venice-controller/src/test/resources/superset_schema_test/v6.avsc new file mode 100644 index 0000000000..57328b34ae --- /dev/null +++ b/services/venice-controller/src/test/resources/superset_schema_test/v6.avsc @@ -0,0 +1,9 @@ +{ + "type" : "record", + "namespace" : "example.avro", + "name" : "ValueRecordName", + "fields" : [ + { "name" : "f0", "type" : "int", "default" : -1 } + ], + "custom_prop" : "custom_prop_value_for_v6" +} \ No newline at end of file From 9f3e225790efa417662fedeb9dc24a9477c08fef Mon Sep 17 00:00:00 2001 From: Nisarg Thakkar Date: Thu, 17 Oct 2024 15:02:38 -0700 Subject: [PATCH 2/2] Address review comments --- .../controller/VeniceParentHelixAdmin.java | 3 - .../controller/util/UpdateStoreUtils.java | 68 +++++++++---------- .../TestVeniceParentHelixAdmin.java | 4 +- 3 files changed, 33 insertions(+), 42 deletions(-) diff --git a/services/venice-controller/src/main/java/com/linkedin/venice/controller/VeniceParentHelixAdmin.java b/services/venice-controller/src/main/java/com/linkedin/venice/controller/VeniceParentHelixAdmin.java index 3cd3898f53..f9ccde2eb8 100644 --- a/services/venice-controller/src/main/java/com/linkedin/venice/controller/VeniceParentHelixAdmin.java +++ b/services/venice-controller/src/main/java/com/linkedin/venice/controller/VeniceParentHelixAdmin.java @@ -59,13 +59,11 @@ import static com.linkedin.venice.controllerapi.ControllerApiConstants.UNUSED_SCHEMA_DELETION_ENABLED; import static com.linkedin.venice.controllerapi.ControllerApiConstants.VERSION; import static com.linkedin.venice.controllerapi.ControllerApiConstants.WRITE_COMPUTATION_ENABLED; -import static com.linkedin.venice.meta.Version.VERSION_SEPARATOR; import static com.linkedin.venice.meta.VersionStatus.ONLINE; import static com.linkedin.venice.meta.VersionStatus.PUSHED; import static com.linkedin.venice.serialization.avro.AvroProtocolDefinition.BATCH_JOB_HEARTBEAT; import static com.linkedin.venice.serialization.avro.AvroProtocolDefinition.PUSH_JOB_DETAILS; -import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.node.ArrayNode; @@ -225,7 +223,6 @@ import com.linkedin.venice.utils.VeniceProperties; import com.linkedin.venice.utils.concurrent.VeniceConcurrentHashMap; import com.linkedin.venice.utils.locks.AutoCloseableLock; -import com.linkedin.venice.views.MaterializedView; import com.linkedin.venice.views.VeniceView; import com.linkedin.venice.writer.VeniceWriter; import com.linkedin.venice.writer.VeniceWriterFactory; diff --git a/services/venice-controller/src/main/java/com/linkedin/venice/controller/util/UpdateStoreUtils.java b/services/venice-controller/src/main/java/com/linkedin/venice/controller/util/UpdateStoreUtils.java index f4bbda0969..ad9569e5be 100644 --- a/services/venice-controller/src/main/java/com/linkedin/venice/controller/util/UpdateStoreUtils.java +++ b/services/venice-controller/src/main/java/com/linkedin/venice/controller/util/UpdateStoreUtils.java @@ -61,7 +61,6 @@ import static com.linkedin.venice.meta.HybridStoreConfigImpl.DEFAULT_HYBRID_TIME_LAG_THRESHOLD; import static com.linkedin.venice.meta.HybridStoreConfigImpl.DEFAULT_REWIND_TIME_IN_SECONDS; import static com.linkedin.venice.meta.Version.VERSION_SEPARATOR; -import static com.linkedin.venice.utils.RegionUtils.parseRegionsFilterList; import com.fasterxml.jackson.core.JsonProcessingException; import com.linkedin.venice.common.VeniceSystemStoreUtils; @@ -101,6 +100,7 @@ import com.linkedin.venice.schema.SchemaData; import com.linkedin.venice.utils.ObjectMapperFactory; import com.linkedin.venice.utils.PartitionUtils; +import com.linkedin.venice.utils.RegionUtils; import com.linkedin.venice.utils.VeniceProperties; import com.linkedin.venice.views.MaterializedView; import com.linkedin.venice.views.VeniceView; @@ -133,29 +133,24 @@ public static UpdateStoreWrapper getStoreUpdate( boolean checkRegionFilter) { VeniceControllerMultiClusterConfig multiClusterConfigs = admin.getMultiClusterConfigs(); + // Check whether the command affects this region. + Set regionsFilter = + params.getRegionsFilter().map(RegionUtils::parseRegionsFilterList).orElse(Collections.emptySet()); + if (checkRegionFilter && !regionsFilter.isEmpty() && !regionsFilter.contains(multiClusterConfigs.getRegionName())) { + LOGGER.info( + "UpdateStore command will be skipped for store: {} in cluster: {}, because the region filter is {}" + + " which doesn't include the current region: {}", + storeName, + clusterName, + regionsFilter, + multiClusterConfigs.getRegionName()); + return null; + } + // There are certain configs that are only allowed to be updated in child regions. We might still want the ability // to update such configs in the parent region via the Admin tool for operational reasons. So, we allow such updates // if the regions filter only specifies one region, which is the parent region. - boolean onlyParentRegionFilter = false; - - // Check whether the command affects this region. - if (params.getRegionsFilter().isPresent()) { - Set regionsFilter = parseRegionsFilterList(params.getRegionsFilter().get()); - if (checkRegionFilter && !regionsFilter.contains(multiClusterConfigs.getRegionName())) { - LOGGER.info( - "UpdateStore command will be skipped for store: {} in cluster: {}, because the region filter is {}" - + " which doesn't include the current region: {}", - storeName, - clusterName, - regionsFilter, - multiClusterConfigs.getRegionName()); - return null; - } - - if (admin.isParent() && regionsFilter.size() == 1) { - onlyParentRegionFilter = true; - } - } + boolean onlyParentRegionFilter = admin.isParent() && regionsFilter.size() == 1; Store originalStore = admin.getStore(clusterName, storeName); if (originalStore == null) { @@ -823,11 +818,11 @@ static void validateStoreConfigs(Admin admin, String clusterName, Store store) { ErrorType.INVALID_CONFIG); } - DataReplicationPolicy dataReplicationPolicy = hybridStoreConfig.getDataReplicationPolicy(); - // Incremental push + !AA + NON_AGGREGATE DRP is not supported in multi-region mode - if (controllerConfig.isMultiRegion() && store.isIncrementalPushEnabled() - && !store.isActiveActiveReplicationEnabled() - && dataReplicationPolicy == DataReplicationPolicy.NON_AGGREGATE) { + boolean isIncrementalPushSupported = AdminUtils.isIncrementalPushSupported( + controllerConfig.isMultiRegion(), + store.isActiveActiveReplicationEnabled(), + hybridStoreConfig); + if (store.isIncrementalPushEnabled() && !isIncrementalPushSupported) { throw new VeniceHttpException( HttpStatus.SC_BAD_REQUEST, errorMessagePrefix @@ -835,6 +830,7 @@ static void validateStoreConfigs(Admin admin, String clusterName, Store store) { ErrorType.INVALID_CONFIG); } + DataReplicationPolicy dataReplicationPolicy = hybridStoreConfig.getDataReplicationPolicy(); // ACTIVE_ACTIVE DRP is only supported when activeActiveReplicationEnabled = true if (dataReplicationPolicy == DataReplicationPolicy.ACTIVE_ACTIVE && !store.isActiveActiveReplicationEnabled()) { throw new VeniceHttpException( @@ -988,7 +984,9 @@ private static void validateStoreUpdate( } } - private static Map validateAndDecorateStoreViewConfigs(Map stringMap, Store store) { + private static Map validateAndDecorateStoreViewConfigs( + Map stringMap, + Store store) { Map configs = StoreViewUtils.convertStringMapViewToViewConfigMap(stringMap); Map validatedConfigs = new HashMap<>(); for (Map.Entry viewConfigEntry: configs.entrySet()) { @@ -1124,11 +1122,10 @@ static void validateStorePartitionCountUpdate( int minPartitionNum = clusterConfig.getMinNumberOfPartitions(); if (newPartitionCount < minPartitionNum && newPartitionCount != 0) { - throw new VeniceHttpException( - HttpStatus.SC_BAD_REQUEST, - "Partition count must be at least " + minPartitionNum + " for store: " + storeName - + ". If a specific partition count is not required, set it to 0.", - ErrorType.INVALID_CONFIG); + String errorMessage = errorMessagePrefix + "Partition count must be at least " + minPartitionNum + + ". If a specific partition count is not required, set it to 0."; + LOGGER.error(errorMessage); + throw new VeniceHttpException(HttpStatus.SC_BAD_REQUEST, errorMessage, ErrorType.INVALID_CONFIG); } int maxPartitionNum = clusterConfig.getMaxNumberOfPartitions(); @@ -1219,12 +1216,11 @@ static PartitionerConfig mergeNewSettingsIntoOldPartitionerConfig( return oldStore.getPartitionerConfig(); } - PartitionerConfig originalPartitionerConfig; - if (oldStore.getPartitionerConfig() == null) { + PartitionerConfig originalPartitionerConfig = oldStore.getPartitionerConfig(); + if (originalPartitionerConfig == null) { originalPartitionerConfig = new PartitionerConfigImpl(); - } else { - originalPartitionerConfig = oldStore.getPartitionerConfig(); } + return new PartitionerConfigImpl( partitionerClass.orElse(originalPartitionerConfig.getPartitionerClass()), partitionerParams.orElse(originalPartitionerConfig.getPartitionerParams()), diff --git a/services/venice-controller/src/test/java/com/linkedin/venice/controller/TestVeniceParentHelixAdmin.java b/services/venice-controller/src/test/java/com/linkedin/venice/controller/TestVeniceParentHelixAdmin.java index f49bc8118c..c0fb5648cf 100644 --- a/services/venice-controller/src/test/java/com/linkedin/venice/controller/TestVeniceParentHelixAdmin.java +++ b/services/venice-controller/src/test/java/com/linkedin/venice/controller/TestVeniceParentHelixAdmin.java @@ -1655,8 +1655,7 @@ public void testUpdateStore() { when(zkClient.readData(zkMetadataNodePath, null)).thenReturn(null) .thenReturn(AdminTopicMetadataAccessor.generateMetadataMap(1, -1, 1)); - UpdateStoreQueryParams storeQueryParams1 = - new UpdateStoreQueryParams().setIncrementalPushEnabled(true).setBlobTransferEnabled(true); + UpdateStoreQueryParams storeQueryParams1 = new UpdateStoreQueryParams().setBlobTransferEnabled(true); parentAdmin.initStorageCluster(clusterName); parentAdmin.updateStore(clusterName, storeName, storeQueryParams1); @@ -1676,7 +1675,6 @@ public void testUpdateStore() { assertEquals(adminMessage.operationType, AdminMessageType.UPDATE_STORE.getValue()); UpdateStore updateStore = (UpdateStore) adminMessage.payloadUnion; - assertTrue(updateStore.incrementalPushEnabled); assertTrue(updateStore.blobTransferEnabled); long readQuota = 100L;