diff --git a/docs/data-sources/backup_schedule.md b/docs/data-sources/backup_schedule.md
index bf4163de0..6314ba832 100644
--- a/docs/data-sources/backup_schedule.md
+++ b/docs/data-sources/backup_schedule.md
@@ -13,11 +13,13 @@ Listing target locations by cluster scope is supported only for clusters enabled
```terraform
data "tanzu-mission-control_backup_schedule" "demo" {
+ name = "BACKUP_SCHEDULE_NAME"
scope {
- management_cluster_name = "MGMT_CLS_NAME"
- provisioner_name = "PROVISIONER_NAME"
- cluster_name = "CLS_NAME"
- name = "TARGET_LOCATION_NAME"
+ cluster {
+ management_cluster_name = "MGMT_CLS_NAME"
+ provisioner_name = "PROVISIONER_NAME"
+ cluster_name = "CLS_NAME"
+ }
}
query = "QUERY"
@@ -32,11 +34,12 @@ data "tanzu-mission-control_backup_schedule" "demo" {
### Required
- `scope` (Block List, Min: 1, Max: 1) Search scope block (see [below for nested schema](#nestedblock--scope))
+- `name` (String) The name of the backup schedule
### Optional
- `include_total_count` (Boolean) Whether to include total count of backups.
-(Default: True)
+ (Default: True)
- `query` (String) Define a query for listing backups
- `sort_by` (String) Sort backups by field.
@@ -49,28 +52,30 @@ data "tanzu-mission-control_backup_schedule" "demo" {
### Nested Schema for `scope`
-Required:
+Optional:
-- `cluster_name` (String) Cluster name
+- `cluster` (Block List, Max: 1) Cluster scope block (see [below for nested schema](#nestedblock--scope--cluster))
-Optional:
+
+### Nested Schema for `scope.cluster`
+
+Required:
+- `cluster_name` (String) Cluster name
- `management_cluster_name` (String) Management cluster name
-- `name` (String) The name of the backup schedule
- `provisioner_name` (String) Cluster provisioner name
+
### Nested Schema for `schedules`
Read-Only:
-- `cluster_name` (String)
-- `management_cluster_name` (String)
-- `meta` (List of Object) (see [below for nested schema](#nestedobjatt--schedules--meta))
- `name` (String)
-- `provisioner_name` (String)
-- `scope` (String)
+- `backup_scope` (String)
+- `meta` (List of Object) (see [below for nested schema](#nestedobjatt--schedules--meta))
+- `scope` (List of Object) (see [below for nested schema](#nestedobjatt--schedules--scope))
- `spec` (List of Object) (see [below for nested schema](#nestedobjatt--schedules--spec))
@@ -85,6 +90,24 @@ Read-Only:
- `uid` (String)
+
+### Nested Schema for `schedules.scope`
+
+Read-Only:
+
+- `cluster` (List of Object) (see [below for nested schema](#nestedobjatt--schedules--scope--cluster))
+
+
+### Nested Schema for `schedules.scope.cluster`
+
+Read-Only:
+
+- `cluster_name` (String)
+- `management_cluster_name` (String)
+- `provisioner_name` (String)
+
+
+
### Nested Schema for `schedules.spec`
diff --git a/docs/resources/backup_schedule.md b/docs/resources/backup_schedule.md
index b67cbd201..84e93a623 100644
--- a/docs/resources/backup_schedule.md
+++ b/docs/resources/backup_schedule.md
@@ -24,11 +24,15 @@ For more information regarding scheduled backups, see [Scheduled Backups][backup
```terraform
resource "tanzu-mission-control_backup_schedule" "sample-full" {
name = "full-weekly"
- management_cluster_name = "MGMT_CLS_NAME"
- provisioner_name = "PROVISIONER_NAME"
- cluster_name = "CLS_NAME"
+ scope {
+ cluster {
+ management_cluster_name = "MGMT_CLS_NAME"
+ provisioner_name = "PROVISIONER_NAME"
+ cluster_name = "CLS_NAME"
+ }
+ }
- scope = "FULL_CLUSTER"
+ backup_scope = "FULL_CLUSTER"
spec {
schedule {
@@ -62,12 +66,15 @@ resource "tanzu-mission-control_backup_schedule" "sample-full" {
```terraform
resource "tanzu-mission-control_backup_schedule" "sample-full" {
name = "namespaces-hourly"
- management_cluster_name = "MGMT_CLS_NAME"
- provisioner_name = "PROVISIONER_NAME"
- cluster_name = "CLS_NAME"
-
- scope = "SET_NAMESPACES"
+ scope {
+ cluster {
+ management_cluster_name = "MGMT_CLS_NAME"
+ provisioner_name = "PROVISIONER_NAME"
+ cluster_name = "CLS_NAME"
+ }
+ }
+ backup_scope = "SET_NAMESPACES"
spec {
schedule {
@@ -144,11 +151,15 @@ resource "tanzu-mission-control_backup_schedule" "sample-full" {
```terraform
resource "tanzu-mission-control_backup_schedule" "sample-full" {
name = "label-based-no-storage"
- management_cluster_name = "MGMT_CLS_NAME"
- provisioner_name = "PROVISIONER_NAME"
- cluster_name = "CLS_NAME"
+ scope {
+ cluster {
+ management_cluster_name = "MGMT_CLS_NAME"
+ provisioner_name = "PROVISIONER_NAME"
+ cluster_name = "CLS_NAME"
+ }
+ }
- scope = "LABEL_SELECTOR"
+ backup_scope = "LABEL_SELECTOR"
spec {
@@ -183,12 +194,10 @@ resource "tanzu-mission-control_backup_schedule" "sample-full" {
### Required
-- `cluster_name` (String) Cluster name
-- `management_cluster_name` (String) Management cluster name
- `name` (String) The name of the backup schedule
-- `provisioner_name` (String) Cluster provisioner name
-- `scope` (String) Scope for backup schedule.
-Valid values are (FULL_CLUSTER, SET_NAMESPACES, LABEL_SELECTOR)
+- `backup_scope` (String) Scope for backup schedule.
+ Valid values are (FULL_CLUSTER, SET_NAMESPACES, LABEL_SELECTOR)
+- `scope` (Block List, Min: 1, Max: 1) Scope block for Back up schedule (cluster/cluster group) (see [below for nested schema](#nestedblock--scope))
- `spec` (Block List, Min: 1, Max: 1) Backup schedule spec block (see [below for nested schema](#nestedblock--spec))
### Optional
@@ -199,6 +208,24 @@ Valid values are (FULL_CLUSTER, SET_NAMESPACES, LABEL_SELECTOR)
- `id` (String) The ID of this resource.
+
+### Nested Schema for `scope`
+
+Optional:
+
+- `cluster` (Block List, Max: 1) Cluster scope block (see [below for nested schema](#nestedblock--scope--cluster))
+
+
+### Nested Schema for `scope.cluster`
+
+Required:
+
+- `cluster_name` (String) Cluster name
+- `management_cluster_name` (String) Management cluster name
+- `provisioner_name` (String) Cluster provisioner name
+
+
+
### Nested Schema for `spec`
@@ -226,42 +253,42 @@ Optional:
- `backup_ttl` (String) The backup retention period.
- `csi_snapshot_timeout` (String) Specifies the time used to wait for CSI VolumeSnapshot status turns to ReadyToUse during creation, before returning error as timeout.
-The default value is 10 minute.
-Format is the time number and time sign, example: "50s" (50 seconds)
+ The default value is 10 minute.
+ Format is the time number and time sign, example: "50s" (50 seconds)
- `default_volumes_to_fs_backup` (Boolean) Specifies whether all pod volumes should be backed up via file system backup by default.
-(Default: True)
+ (Default: True)
- `default_volumes_to_restic` (Boolean) Specifies whether restic should be used to take a backup of all pod volumes by default.
-(Default: False)
+ (Default: False)
- `excluded_namespaces` (List of String) The namespaces to be excluded in the backup.
-Can't be used if scope is SET_NAMESPACES.
+ Can't be used if scope is SET_NAMESPACES.
- `excluded_resources` (List of String) The name list for the resources to be excluded in backup.
- `hooks` (Block List, Max: 1) Hooks block represent custom actions that should be executed at different phases of the backup. (see [below for nested schema](#nestedblock--spec--template--hooks))
- `include_cluster_resources` (Boolean) A flag which specifies whether cluster-scoped resources should be included for consideration in the backup.
-If set to true, all cluster-scoped resources will be backed up. If set to false, all cluster-scoped resources will be excluded from the backup.
-If unset, all cluster-scoped resources are included if and only if all namespaces are included and there are no excluded namespaces.
-Otherwise, only cluster-scoped resources associated with namespace-scoped resources included in the backup spec are backed up.
-For example, if a PersistentVolumeClaim is included in the backup, its associated PersistentVolume (which is cluster-scoped) would also be backed up.
-(Default: False)
+ If set to true, all cluster-scoped resources will be backed up. If set to false, all cluster-scoped resources will be excluded from the backup.
+ If unset, all cluster-scoped resources are included if and only if all namespaces are included and there are no excluded namespaces.
+ Otherwise, only cluster-scoped resources associated with namespace-scoped resources included in the backup spec are backed up.
+ For example, if a PersistentVolumeClaim is included in the backup, its associated PersistentVolume (which is cluster-scoped) would also be backed up.
+ (Default: False)
- `included_namespaces` (List of String) The namespace to be included for backup from.
-If empty, all namespaces are included.
-Can't be used if scope is FULL_CLUSTER.
-Required if scope is SET_NAMESPACES.
+ If empty, all namespaces are included.
+ Can't be used if scope is FULL_CLUSTER.
+ Required if scope is SET_NAMESPACES.
- `included_resources` (List of String) The name list for the resources to be included into backup. If empty, all resources are included.
- `label_selector` (Block List, Max: 1) The label selector to selectively adding individual objects to the backup schedule.
-If not specified, all objects are included.
-Can't be used if scope is FULL_CLUSTER or SET_NAMESPACES.
-Required if scope is LABEL_SELECTOR and Or Label Selectors are not defined (see [below for nested schema](#nestedblock--spec--template--label_selector))
+ If not specified, all objects are included.
+ Can't be used if scope is FULL_CLUSTER or SET_NAMESPACES.
+ Required if scope is LABEL_SELECTOR and Or Label Selectors are not defined (see [below for nested schema](#nestedblock--spec--template--label_selector))
- `or_label_selector` (Block List) (Repeatable Block) A list of label selectors to filter with when adding individual objects to the backup.
-If multiple provided they will be joined by the OR operator.
-LabelSelector as well as OrLabelSelectors cannot co-exist in backup request, only one of them can be used.
-Can't be used if scope is FULL_CLUSTER or SET_NAMESPACES.
-Required if scope is LABEL_SELECTOR and Label Selector is not defined (see [below for nested schema](#nestedblock--spec--template--or_label_selector))
+ If multiple provided they will be joined by the OR operator.
+ LabelSelector as well as OrLabelSelectors cannot co-exist in backup request, only one of them can be used.
+ Can't be used if scope is FULL_CLUSTER or SET_NAMESPACES.
+ Required if scope is LABEL_SELECTOR and Label Selector is not defined (see [below for nested schema](#nestedblock--spec--template--or_label_selector))
- `ordered_resources` (Map of String) Specifies the backup order of resources of specific Kind. The map key is the Kind name and value is a list of resource names separated by commas.
-Each resource name has format "namespace/resourcename".
-For cluster resources, simply use "resourcename".
+ Each resource name has format "namespace/resourcename".
+ For cluster resources, simply use "resourcename".
- `snapshot_volumes` (Boolean) A flag which specifies whether to take cloud snapshots of any PV's referenced in the set of objects included in the Backup.
-If set to true, snapshots will be taken, otherwise, snapshots will be skipped.
-If left unset, snapshots will be attempted if volume snapshots are configured for the cluster.
+ If set to true, snapshots will be taken, otherwise, snapshots will be skipped.
+ If left unset, snapshots will be attempted if volume snapshots are configured for the cluster.
- `storage_location` (String) The name of a BackupStorageLocation where the backup should be stored.
- `volume_snapshot_locations` (List of String) A list containing names of VolumeSnapshotLocations associated with this backup.
@@ -287,13 +314,13 @@ Optional:
- `excluded_namespaces` (List of String) Specifies the namespaces to which this hook spec does not apply.
- `included_namespaces` (List of String) Specifies the namespaces to which this hook spec applies.
-If empty, it applies to all namespaces.
+ If empty, it applies to all namespaces.
- `label_selector` (Block List, Max: 1) The label selector to selectively adding individual objects to the hook resource.
-If not specified, all objects are included. (see [below for nested schema](#nestedblock--spec--template--hooks--resource--label_selector))
+ If not specified, all objects are included. (see [below for nested schema](#nestedblock--spec--template--hooks--resource--label_selector))
- `post_hook` (Block List) (Repeatable Block) A list of backup hooks to execute after storing the item in the backup.
-These are executed after all "additional items" from item actions are processed. (see [below for nested schema](#nestedblock--spec--template--hooks--resource--post_hook))
+ These are executed after all "additional items" from item actions are processed. (see [below for nested schema](#nestedblock--spec--template--hooks--resource--post_hook))
- `pre_hook` (Block List) (Repeatable Block) A list of backup hooks to execute after storing the item in the backup.
-These are executed after all "additional items" from item actions are processed. (see [below for nested schema](#nestedblock--spec--template--hooks--resource--pre_hook))
+ These are executed after all "additional items" from item actions are processed. (see [below for nested schema](#nestedblock--spec--template--hooks--resource--pre_hook))
### Nested Schema for `spec.template.hooks.resource.label_selector`
@@ -302,7 +329,7 @@ Optional:
- `match_expression` (Block List) (Repeatable Block) A list of label selector requirements. The requirements are ANDed. (see [below for nested schema](#nestedblock--spec--template--hooks--resource--label_selector--match_expression))
- `match_labels` (Map of String) A map of {key,value} pairs. A single {key,value} in the map is equivalent to an element of match_expressions, whose key field is "key", the operator is "In" and the values array contains only "value".
-The requirements are ANDed.
+ The requirements are ANDed.
### Nested Schema for `spec.template.hooks.resource.label_selector.match_expression`
@@ -311,14 +338,14 @@ Required:
- `key` (String) Key is the label key that the selector applies to.
- `operator` (String) Operator represents a key's relationship to a set of values.
-Valid operators are "In", "NotIn", "Exists" and "DoesNotExist".
+ Valid operators are "In", "NotIn", "Exists" and "DoesNotExist".
Optional:
- `values` (List of String) Values is an array of string values.
-If the operator is "In" or "NotIn", the values array must be non-empty.
-If the operator is "Exists" or "DoesNotExist", the values array must be empty.
-This array is replaced during a strategic merge patch.
+ If the operator is "In" or "NotIn", the values array must be non-empty.
+ If the operator is "Exists" or "DoesNotExist", the values array must be empty.
+ This array is replaced during a strategic merge patch.
@@ -336,12 +363,12 @@ Required:
- `command` (List of String) The command and arguments to execute.
- `container` (String) The container in the pod where the command should be executed.
-If not specified, the pod's first container is used.
+ If not specified, the pod's first container is used.
Optional:
- `on_error` (String) Specifies how Velero should behave if it encounters an error executing this hook.
-Valid values are (FAIL, CONTINUE)
+ Valid values are (FAIL, CONTINUE)
- `timeout` (String) Defines the maximum amount of time Velero should wait for the hook to complete before considering the execution a failure.
@@ -360,12 +387,12 @@ Required:
- `command` (List of String) The command and arguments to execute.
- `container` (String) The container in the pod where the command should be executed.
-If not specified, the pod's first container is used.
+ If not specified, the pod's first container is used.
Optional:
- `on_error` (String) Specifies how Velero should behave if it encounters an error executing this hook.
-Valid values are (FAIL, CONTINUE)
+ Valid values are (FAIL, CONTINUE)
- `timeout` (String) Defines the maximum amount of time Velero should wait for the hook to complete before considering the execution a failure.
@@ -379,7 +406,7 @@ Optional:
- `match_expression` (Block List) (Repeatable Block) A list of label selector requirements. The requirements are ANDed. (see [below for nested schema](#nestedblock--spec--template--label_selector--match_expression))
- `match_labels` (Map of String) A map of {key,value} pairs. A single {key,value} in the map is equivalent to an element of match_expressions, whose key field is "key", the operator is "In" and the values array contains only "value".
-The requirements are ANDed.
+ The requirements are ANDed.
### Nested Schema for `spec.template.label_selector.match_expression`
@@ -388,14 +415,14 @@ Required:
- `key` (String) Key is the label key that the selector applies to.
- `operator` (String) Operator represents a key's relationship to a set of values.
-Valid operators are "In", "NotIn", "Exists" and "DoesNotExist".
+ Valid operators are "In", "NotIn", "Exists" and "DoesNotExist".
Optional:
- `values` (List of String) Values is an array of string values.
-If the operator is "In" or "NotIn", the values array must be non-empty.
-If the operator is "Exists" or "DoesNotExist", the values array must be empty.
-This array is replaced during a strategic merge patch.
+ If the operator is "In" or "NotIn", the values array must be non-empty.
+ If the operator is "Exists" or "DoesNotExist", the values array must be empty.
+ This array is replaced during a strategic merge patch.
@@ -406,7 +433,7 @@ Optional:
- `match_expression` (Block List) (Repeatable Block) A list of label selector requirements. The requirements are ANDed. (see [below for nested schema](#nestedblock--spec--template--or_label_selector--match_expression))
- `match_labels` (Map of String) A map of {key,value} pairs. A single {key,value} in the map is equivalent to an element of match_expressions, whose key field is "key", the operator is "In" and the values array contains only "value".
-The requirements are ANDed.
+ The requirements are ANDed.
### Nested Schema for `spec.template.or_label_selector.match_expression`
@@ -415,14 +442,14 @@ Required:
- `key` (String) Key is the label key that the selector applies to.
- `operator` (String) Operator represents a key's relationship to a set of values.
-Valid operators are "In", "NotIn", "Exists" and "DoesNotExist".
+ Valid operators are "In", "NotIn", "Exists" and "DoesNotExist".
Optional:
- `values` (List of String) Values is an array of string values.
-If the operator is "In" or "NotIn", the values array must be non-empty.
-If the operator is "Exists" or "DoesNotExist", the values array must be empty.
-This array is replaced during a strategic merge patch.
+ If the operator is "In" or "NotIn", the values array must be non-empty.
+ If the operator is "Exists" or "DoesNotExist", the values array must be empty.
+ This array is replaced during a strategic merge patch.
diff --git a/examples/data-sources/backupschedule/data_source_backup_schedule.tf b/examples/data-sources/backupschedule/data_source_backup_schedule.tf
index f793f5757..2e93a0598 100644
--- a/examples/data-sources/backupschedule/data_source_backup_schedule.tf
+++ b/examples/data-sources/backupschedule/data_source_backup_schedule.tf
@@ -1,9 +1,11 @@
data "tanzu-mission-control_backup_schedule" "demo" {
+ name = "BACKUP_SCHEDULE_NAME"
scope {
- management_cluster_name = "MGMT_CLS_NAME"
- provisioner_name = "PROVISIONER_NAME"
- cluster_name = "CLS_NAME"
- name = "TARGET_LOCATION_NAME"
+ cluster {
+ management_cluster_name = "MGMT_CLS_NAME"
+ provisioner_name = "PROVISIONER_NAME"
+ cluster_name = "CLS_NAME"
+ }
}
query = "QUERY"
diff --git a/examples/resources/backupschedule/resource_backup_schedule_full_cluster.tf b/examples/resources/backupschedule/resource_backup_schedule_full_cluster.tf
index 12ddf80f1..3e22c20ff 100644
--- a/examples/resources/backupschedule/resource_backup_schedule_full_cluster.tf
+++ b/examples/resources/backupschedule/resource_backup_schedule_full_cluster.tf
@@ -1,10 +1,14 @@
resource "tanzu-mission-control_backup_schedule" "sample-full" {
name = "full-weekly"
- management_cluster_name = "MGMT_CLS_NAME"
- provisioner_name = "PROVISIONER_NAME"
- cluster_name = "CLS_NAME"
+ scope {
+ cluster {
+ management_cluster_name = "MGMT_CLS_NAME"
+ provisioner_name = "PROVISIONER_NAME"
+ cluster_name = "CLS_NAME"
+ }
+ }
- scope = "FULL_CLUSTER"
+ backup_scope = "FULL_CLUSTER"
spec {
schedule {
diff --git a/examples/resources/backupschedule/resource_backup_schedule_labels.tf b/examples/resources/backupschedule/resource_backup_schedule_labels.tf
index e725a7d92..4f74f22dd 100644
--- a/examples/resources/backupschedule/resource_backup_schedule_labels.tf
+++ b/examples/resources/backupschedule/resource_backup_schedule_labels.tf
@@ -1,10 +1,14 @@
resource "tanzu-mission-control_backup_schedule" "sample-full" {
name = "label-based-no-storage"
- management_cluster_name = "MGMT_CLS_NAME"
- provisioner_name = "PROVISIONER_NAME"
- cluster_name = "CLS_NAME"
+ scope {
+ cluster {
+ management_cluster_name = "MGMT_CLS_NAME"
+ provisioner_name = "PROVISIONER_NAME"
+ cluster_name = "CLS_NAME"
+ }
+ }
- scope = "LABEL_SELECTOR"
+ backup_scope = "LABEL_SELECTOR"
spec {
diff --git a/examples/resources/backupschedule/resource_backup_schedule_namespaces.tf b/examples/resources/backupschedule/resource_backup_schedule_namespaces.tf
index 97976da8a..7170b608c 100644
--- a/examples/resources/backupschedule/resource_backup_schedule_namespaces.tf
+++ b/examples/resources/backupschedule/resource_backup_schedule_namespaces.tf
@@ -1,11 +1,14 @@
resource "tanzu-mission-control_backup_schedule" "sample-full" {
name = "namespaces-hourly"
- management_cluster_name = "MGMT_CLS_NAME"
- provisioner_name = "PROVISIONER_NAME"
- cluster_name = "CLS_NAME"
-
- scope = "SET_NAMESPACES"
+ scope {
+ cluster {
+ management_cluster_name = "MGMT_CLS_NAME"
+ provisioner_name = "PROVISIONER_NAME"
+ cluster_name = "CLS_NAME"
+ }
+ }
+ backup_scope = "SET_NAMESPACES"
spec {
schedule {
diff --git a/internal/resources/cluster/backupschedule/converter_mapping.go b/internal/resources/cluster/backupschedule/converter_mapping.go
index 82759a562..86c654f54 100644
--- a/internal/resources/cluster/backupschedule/converter_mapping.go
+++ b/internal/resources/cluster/backupschedule/converter_mapping.go
@@ -20,11 +20,18 @@ var (
)
var tfModelResourceMap = &tfModelConverterHelper.BlockToStruct{
- NameKey: tfModelConverterHelper.BuildDefaultModelPath("fullName", "name"),
- ClusterNameKey: tfModelConverterHelper.BuildDefaultModelPath("fullName", "clusterName"),
- ManagementClusterNameKey: tfModelConverterHelper.BuildDefaultModelPath("fullName", "managementClusterName"),
- ProvisionerNameKey: tfModelConverterHelper.BuildDefaultModelPath("fullName", "provisionerName"),
- common.MetaKey: common.GetMetaConverterMap(tfModelConverterHelper.DefaultModelPathSeparator),
+ NameKey: tfModelConverterHelper.BuildDefaultModelPath("fullName", "name"),
+ ScopeKey: &tfModelConverterHelper.BlockToStruct{
+ ClusterGroupScopeKey: &tfModelConverterHelper.BlockToStruct{
+ ClusterGroupNameKey: tfModelConverterHelper.BuildDefaultModelPath("fullName", "clusterGroupName"),
+ },
+ ClusterScopeKey: &tfModelConverterHelper.BlockToStruct{
+ ClusterNameKey: tfModelConverterHelper.BuildDefaultModelPath("fullName", "clusterName"),
+ ManagementClusterNameKey: tfModelConverterHelper.BuildDefaultModelPath("fullName", "managementClusterName"),
+ ProvisionerNameKey: tfModelConverterHelper.BuildDefaultModelPath("fullName", "provisionerName"),
+ },
+ },
+ common.MetaKey: common.GetMetaConverterMap(tfModelConverterHelper.DefaultModelPathSeparator),
SpecKey: &tfModelConverterHelper.BlockToStruct{
PausedKey: tfModelConverterHelper.BuildDefaultModelPath("spec", "paused"),
ScheduleKey: &tfModelConverterHelper.BlockToStruct{
@@ -121,11 +128,16 @@ var tfModelDataSourceRequestMap = &tfModelConverterHelper.BlockToStruct{
SortByKey: "sortBy",
QueryKey: "query",
IncludeTotalCountKey: "includeTotal",
+ NameKey: tfModelConverterHelper.BuildDefaultModelPath("fullName", "name"),
ScopeKey: &tfModelConverterHelper.BlockToStruct{
- ClusterNameKey: tfModelConverterHelper.BuildDefaultModelPath("searchScope", "clusterName"),
- ManagementClusterNameKey: tfModelConverterHelper.BuildDefaultModelPath("searchScope", "managementClusterName"),
- ProvisionerNameKey: tfModelConverterHelper.BuildDefaultModelPath("searchScope", "provisionerName"),
- NameKey: tfModelConverterHelper.BuildDefaultModelPath("searchScope", "name"),
+ ClusterGroupScopeKey: &tfModelConverterHelper.BlockToStruct{
+ ClusterGroupNameKey: tfModelConverterHelper.BuildDefaultModelPath("fullName", "clusterGroupName"),
+ },
+ ClusterScopeKey: &tfModelConverterHelper.BlockToStruct{
+ ClusterNameKey: tfModelConverterHelper.BuildDefaultModelPath("fullName", "clusterName"),
+ ManagementClusterNameKey: tfModelConverterHelper.BuildDefaultModelPath("fullName", "managementClusterName"),
+ ProvisionerNameKey: tfModelConverterHelper.BuildDefaultModelPath("fullName", "provisionerName"),
+ },
},
}
diff --git a/internal/resources/cluster/backupschedule/datasource_schema.go b/internal/resources/cluster/backupschedule/datasource_schema.go
index 5d46c127e..eee54c873 100644
--- a/internal/resources/cluster/backupschedule/datasource_schema.go
+++ b/internal/resources/cluster/backupschedule/datasource_schema.go
@@ -16,80 +16,115 @@ const (
IncludeTotalCountKey = "include_total_count"
SchedulesKey = "schedules"
TotalCountKey = "total_count"
+ ScopeKey = "scope"
+ ClusterScopeKey = "cluster"
+ ClusterGroupScopeKey = "cluster_group"
+ ClusterGroupNameKey = "cluster_group_name"
)
-var backupScheduleDataSourceSchema = map[string]*schema.Schema{
- ScopeKey: searchScopeSchema,
- SortByKey: sortBySchema,
- QueryKey: querySchema,
- IncludeTotalCountKey: includeTotalSchema,
- SchedulesKey: schedulesSchema,
- TotalCountKey: totalCountSchema,
-}
+var (
+ nameDSSchema = &schema.Schema{
+ Type: schema.TypeString,
+ Description: "The name of the backup schedule",
+ Required: true,
+ ForceNew: true,
+ }
-var searchScopeSchema = &schema.Schema{
- Type: schema.TypeList,
- Description: "Search scope block",
- MaxItems: 1,
- Required: true,
- Elem: &schema.Resource{
- Schema: map[string]*schema.Schema{
- ClusterNameKey: clusterNameSchema,
- ManagementClusterNameKey: managementClusterNameDSSchema,
- ProvisionerNameKey: provisionerNameDSSchema,
- NameKey: nameDSSchema,
- },
- },
-}
+ managementClusterNameDSSchema = &schema.Schema{
+ Type: schema.TypeString,
+ Description: "Management cluster name",
+ Required: true,
+ ForceNew: true,
+ }
-var nameDSSchema = &schema.Schema{
- Type: schema.TypeString,
- Description: "The name of the backup schedule",
- Optional: true,
-}
+ provisionerNameDSSchema = &schema.Schema{
+ Type: schema.TypeString,
+ Description: "Cluster provisioner name",
+ Required: true,
+ ForceNew: true,
+ }
-var managementClusterNameDSSchema = &schema.Schema{
- Type: schema.TypeString,
- Description: "Management cluster name",
- Optional: true,
-}
+ sortBySchema = &schema.Schema{
+ Type: schema.TypeString,
+ Description: "Sort backups by field.",
+ Optional: true,
+ }
-var provisionerNameDSSchema = &schema.Schema{
- Type: schema.TypeString,
- Description: "Cluster provisioner name",
- Optional: true,
-}
+ querySchema = &schema.Schema{
+ Type: schema.TypeString,
+ Description: "Define a query for listing backups",
+ Optional: true,
+ }
-var sortBySchema = &schema.Schema{
- Type: schema.TypeString,
- Description: "Sort backups by field.",
- Optional: true,
-}
+ includeTotalSchema = &schema.Schema{
+ Type: schema.TypeBool,
+ Description: "Whether to include total count of backups.\n(Default: True)",
+ Optional: true,
+ Default: true,
+ }
-var querySchema = &schema.Schema{
- Type: schema.TypeString,
- Description: "Define a query for listing backups",
- Optional: true,
-}
+ schedulesSchema = &schema.Schema{
+ Type: schema.TypeList,
+ Description: "A list of schedules returned",
+ Computed: true,
+ Elem: &schema.Resource{
+ Schema: backupScheduleResourceSchema,
+ },
+ }
-var includeTotalSchema = &schema.Schema{
- Type: schema.TypeBool,
- Description: "Whether to include total count of backups.\n(Default: True)",
- Optional: true,
- Default: true,
-}
+ totalCountSchema = &schema.Schema{
+ Type: schema.TypeString,
+ Description: "Total count of schedules returned",
+ Computed: true,
+ }
-var schedulesSchema = &schema.Schema{
+ backupScheduleDataSourceSchema = map[string]*schema.Schema{
+ NameKey: nameDSSchema,
+ ScopeKey: searchScopeSchema,
+ SortByKey: sortBySchema,
+ QueryKey: querySchema,
+ IncludeTotalCountKey: includeTotalSchema,
+ SchedulesKey: schedulesSchema,
+ TotalCountKey: totalCountSchema,
+ }
+)
+
+var searchScopeSchema = &schema.Schema{
Type: schema.TypeList,
- Description: "A list of schedules returned",
- Computed: true,
+ Description: "Search scope block",
+ MaxItems: 1,
+ Required: true,
Elem: &schema.Resource{
- Schema: backupScheduleResourceSchema,
+ Schema: map[string]*schema.Schema{
+ ClusterGroupScopeKey: {
+ Type: schema.TypeList,
+ Optional: true,
+ Description: "Cluster group scope block",
+ MaxItems: 1,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ ClusterGroupNameKey: {
+ Type: schema.TypeString,
+ Description: "Cluster group name",
+ Required: true,
+ ForceNew: true,
+ },
+ },
+ },
+ },
+ ClusterScopeKey: {
+ Type: schema.TypeList,
+ Optional: true,
+ Description: "Cluster scope block",
+ MaxItems: 1,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ ClusterNameKey: clusterNameSchema,
+ ManagementClusterNameKey: managementClusterNameDSSchema,
+ ProvisionerNameKey: provisionerNameDSSchema,
+ },
+ },
+ },
+ },
},
}
-
-var totalCountSchema = &schema.Schema{
- Type: schema.TypeString,
- Description: "Total count of schedules returned",
- Computed: true,
-}
diff --git a/internal/resources/cluster/backupschedule/resource_backup_schedule.go b/internal/resources/cluster/backupschedule/resource_backup_schedule.go
index d46fccd4a..d3e62e9e5 100644
--- a/internal/resources/cluster/backupschedule/resource_backup_schedule.go
+++ b/internal/resources/cluster/backupschedule/resource_backup_schedule.go
@@ -42,7 +42,7 @@ func resourceBackupScheduleCreate(ctx context.Context, data *schema.ResourceData
return diag.FromErr(errors.Wrapf(err, "Couldn't create Tanzu Mission Control backup schedule."))
}
- diags = validateSchema(model, BackupScope(data.Get(ScopeKey).(string)))
+ diags = validateSchema(model, BackupScope(data.Get(BackupScopeKey).(string)))
if diags.HasError() {
return diags
@@ -66,13 +66,20 @@ func resourceBackupScheduleRead(ctx context.Context, data *schema.ResourceData,
var resp *backupschedulemodels.VmwareTanzuManageV1alpha1ClusterDataprotectionScheduleResponse
config := m.(authctx.TanzuContext)
- model, err := tfModelResourceConverter.ConvertTFSchemaToAPIModel(data, []string{NameKey, ClusterNameKey, ManagementClusterNameKey, ProvisionerNameKey})
+ model, err := tfModelResourceConverter.ConvertTFSchemaToAPIModel(data, []string{ScopeKey, ClusterScopeKey, ClusterNameKey, ManagementClusterNameKey, ProvisionerNameKey})
if err != nil {
return diag.FromErr(errors.Wrapf(err, "Couldn't read Tanzu Mission Control backup schedule."))
}
backupScheduleFn := model.FullName
+
+ if name, ok := data.GetOk(NameKey); ok {
+ backupScheduleFn.Name = name.(string)
+ } else {
+ return diag.Errorf("Couldn't read Tanzu Mission Control backup name.")
+ }
+
resp, err = readResourceWait(ctx, &config, backupScheduleFn)
if err != nil {
@@ -119,13 +126,20 @@ func resourceBackupScheduleRead(ctx context.Context, data *schema.ResourceData,
func resourceBackupScheduleDelete(ctx context.Context, data *schema.ResourceData, m interface{}) (diags diag.Diagnostics) {
config := m.(authctx.TanzuContext)
- model, err := tfModelResourceConverter.ConvertTFSchemaToAPIModel(data, []string{NameKey, ClusterNameKey, ManagementClusterNameKey, ProvisionerNameKey})
+ model, err := tfModelResourceConverter.ConvertTFSchemaToAPIModel(data, []string{ScopeKey, ClusterScopeKey, ClusterNameKey, ManagementClusterNameKey, ProvisionerNameKey})
if err != nil {
return diag.FromErr(errors.Wrapf(err, "Couldn't delete Tanzu Mission Control backup schedule."))
}
backupScheduleFn := model.FullName
+
+ if name, ok := data.GetOk(NameKey); ok {
+ backupScheduleFn.Name = name.(string)
+ } else {
+ return diag.Errorf("Couldn't read Tanzu Mission Control backup name.")
+ }
+
err = config.TMCConnection.BackupScheduleService.BackupScheduleResourceServiceDelete(backupScheduleFn)
if err != nil && !clienterrors.IsNotFoundError(err) {
@@ -144,7 +158,7 @@ func resourceBackupScheduleUpdate(ctx context.Context, data *schema.ResourceData
return diag.FromErr(errors.Wrapf(err, "Couldn't update Tanzu Mission Control backup schedule."))
}
- diags = validateSchema(model, BackupScope(data.Get(ScopeKey).(string)))
+ diags = validateSchema(model, BackupScope(data.Get(BackupScopeKey).(string)))
if diags.HasError() {
return diags
diff --git a/internal/resources/cluster/backupschedule/resource_schema.go b/internal/resources/cluster/backupschedule/resource_schema.go
index 3cc57cec1..41e47d409 100644
--- a/internal/resources/cluster/backupschedule/resource_schema.go
+++ b/internal/resources/cluster/backupschedule/resource_schema.go
@@ -33,7 +33,7 @@ const (
SpecKey = "spec"
ProvisionerNameKey = "provisioner_name"
ManagementClusterNameKey = "management_cluster_name"
- ScopeKey = "scope"
+ BackupScopeKey = "backup_scope"
// Spec Directive Keys.
PausedKey = "paused"
@@ -96,14 +96,53 @@ var (
}
)
+var scopeSchema = &schema.Schema{
+ Type: schema.TypeList,
+ Description: "Scope block for Back up schedule (cluster/cluster group)",
+ Required: true,
+ MaxItems: 1,
+ Optional: false,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ ClusterGroupScopeKey: {
+ Type: schema.TypeList,
+ Optional: true,
+ Description: "Cluster group scope block",
+ MaxItems: 1,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ ClusterGroupNameKey: {
+ Type: schema.TypeString,
+ Description: "Cluster group name",
+ Required: true,
+ ForceNew: true,
+ },
+ },
+ },
+ },
+ ClusterScopeKey: {
+ Type: schema.TypeList,
+ Optional: true,
+ Description: "Cluster scope block",
+ MaxItems: 1,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ ClusterNameKey: clusterNameSchema,
+ ManagementClusterNameKey: managementClusterNameSchema,
+ ProvisionerNameKey: provisionerNameSchema,
+ },
+ },
+ },
+ },
+ },
+}
+
var backupScheduleResourceSchema = map[string]*schema.Schema{
- NameKey: nameSchema,
- ManagementClusterNameKey: managementClusterNameSchema,
- ProvisionerNameKey: provisionerNameSchema,
- ClusterNameKey: clusterNameSchema,
- ScopeKey: backupScopeSchema,
- SpecKey: specSchema,
- common.MetaKey: common.Meta,
+ NameKey: nameSchema,
+ ScopeKey: scopeSchema,
+ BackupScopeKey: backupScopeSchema,
+ SpecKey: specSchema,
+ common.MetaKey: common.Meta,
}
var nameSchema = &schema.Schema{
diff --git a/internal/resources/cluster/backupschedule/tests/datasource_tf_configs.go b/internal/resources/cluster/backupschedule/tests/datasource_tf_configs.go
index c4c289fc1..042790e7e 100644
--- a/internal/resources/cluster/backupschedule/tests/datasource_tf_configs.go
+++ b/internal/resources/cluster/backupschedule/tests/datasource_tf_configs.go
@@ -52,9 +52,11 @@ func (builder *DataSourceTFConfigBuilder) GetDataSourceConfig() string {
%s
data "%s" "%s" {
+ name = "%s"
scope {
- %s
- name = "%s"
+ cluster {
+ %s
+ }
}
depends_on = [%s]
@@ -63,7 +65,7 @@ func (builder *DataSourceTFConfigBuilder) GetDataSourceConfig() string {
builder.BackupScheduleRequiredResource,
backupscheduleres.ResourceName,
DataSourceName,
- builder.ClusterInfo,
LabelsBackupScheduleName,
+ builder.ClusterInfo,
LabelsBackupScheduleResourceFullName)
}
diff --git a/internal/resources/cluster/backupschedule/tests/resource_tf_configs.go b/internal/resources/cluster/backupschedule/tests/resource_tf_configs.go
index e82c6dd60..7bfe1fa17 100644
--- a/internal/resources/cluster/backupschedule/tests/resource_tf_configs.go
+++ b/internal/resources/cluster/backupschedule/tests/resource_tf_configs.go
@@ -104,9 +104,14 @@ func (builder *ResourceTFConfigBuilder) GetFullClusterBackupScheduleConfig() str
%s
resource "%s" "%s" {
- name = "%s"
- scope = "%s"
- %s
+ name = "%s"
+ scope {
+ cluster {
+ %s
+ }
+ }
+
+ backup_scope = "%s"
spec {
schedule {
@@ -137,8 +142,8 @@ func (builder *ResourceTFConfigBuilder) GetFullClusterBackupScheduleConfig() str
backupscheduleres.ResourceName,
FullClusterBackupScheduleResourceName,
FullClusterBackupScheduleName,
- backupscheduleres.FullClusterBackupScope,
builder.ClusterInfo,
+ backupscheduleres.FullClusterBackupScope,
builder.TargetLocationInfo,
dataprotectiontests.EnableDataProtectionResourceFullName,
targetlocationtests.TmcManagedResourceFullName)
@@ -151,9 +156,14 @@ func (builder *ResourceTFConfigBuilder) GetNamespacesBackupScheduleConfig() stri
%s
resource "%s" "%s" {
- name = "%s"
- scope = "%s"
- %s
+ name = "%s"
+ scope {
+ cluster {
+ %s
+ }
+ }
+
+ backup_scope = "%s"
spec {
schedule {
@@ -222,8 +232,8 @@ func (builder *ResourceTFConfigBuilder) GetNamespacesBackupScheduleConfig() stri
backupscheduleres.ResourceName,
NamespacesBackupScheduleResourceName,
NamespacesBackupScheduleName,
- backupscheduleres.NamespacesBackupScope,
builder.ClusterInfo,
+ backupscheduleres.NamespacesBackupScope,
builder.TargetLocationInfo,
dataprotectiontests.EnableDataProtectionResourceFullName,
targetlocationtests.TmcManagedResourceFullName)
@@ -236,9 +246,14 @@ func (builder *ResourceTFConfigBuilder) GetLabelsBackupScheduleConfig() string {
%s
resource "%s" "%s" {
- name = "%s"
- scope = "%s"
- %s
+ name = "%s"
+ scope {
+ cluster {
+ %s
+ }
+ }
+
+ backup_scope = "%s"
spec {
schedule {
@@ -271,8 +286,8 @@ func (builder *ResourceTFConfigBuilder) GetLabelsBackupScheduleConfig() string {
backupscheduleres.ResourceName,
LabelsBackupScheduleResourceName,
LabelsBackupScheduleName,
- backupscheduleres.LabelSelectorBackupScope,
builder.ClusterInfo,
+ backupscheduleres.LabelSelectorBackupScope,
builder.TargetLocationInfo,
dataprotectiontests.EnableDataProtectionResourceFullName,
targetlocationtests.TmcManagedResourceFullName)
diff --git a/internal/resources/cluster/dataprotection/tests/resource_tf_configs.go b/internal/resources/cluster/dataprotection/tests/resource_tf_configs.go
index 7a36997f3..f95c102ac 100644
--- a/internal/resources/cluster/dataprotection/tests/resource_tf_configs.go
+++ b/internal/resources/cluster/dataprotection/tests/resource_tf_configs.go
@@ -66,7 +66,11 @@ func (builder *ResourceTFConfigBuilder) GetEnableDataProtectionConfig() string {
%s
resource "%s" "%s" {
- %s
+ scope {
+ cluster {
+ %s
+ }
+ }
}
`,
builder.ClusterRequiredResource,