diff --git a/design/pv_backup_info.md b/design/pv_backup_info.md index 771f8255c2..01f520c4be 100644 --- a/design/pv_backup_info.md +++ b/design/pv_backup_info.md @@ -93,7 +93,8 @@ type PodVolumeBackupInfo struct { UploaderType string // The type of the uploader that uploads the data. The valid values are `kopia` and `restic`. It's useful for file-system backup and snapshot data mover. VolumeName string // The PVC's corresponding volume name used by Pod: https://github.com/kubernetes/kubernetes/blob/e4b74dd12fa8cb63c174091d5536a10b8ec19d34/pkg/apis/core/types.go#L48 - PodName string // The Pod name mounting this PVC. The format should be /. + PodName string // The Pod name mounting this PVC. + PodNamespace string // The Pod namespace. NodeName string // The PVB-taken k8s node's name. } diff --git a/pkg/backup/backup_test.go b/pkg/backup/backup_test.go index e9c15dbc46..6760d627b1 100644 --- a/pkg/backup/backup_test.go +++ b/pkg/backup/backup_test.go @@ -71,6 +71,7 @@ func TestBackedUpItemsMatchesTarballContents(t *testing.T) { req := &Request{ Backup: defaultBackup().Result(), SkippedPVTracker: NewSkipPVTracker(), + PVMap: map[string]PvcPvInfo{}, } backupFile := bytes.NewBuffer([]byte{}) @@ -84,8 +85,8 @@ func TestBackedUpItemsMatchesTarballContents(t *testing.T) { builder.ForDeployment("zoo", "raz").Result(), ), test.PVs( - builder.ForPersistentVolume("bar").Result(), - builder.ForPersistentVolume("baz").Result(), + builder.ForPersistentVolume("bar").ClaimRef("foo", "pvc1").Result(), + builder.ForPersistentVolume("baz").ClaimRef("bar", "pvc2").Result(), ), } for _, resource := range apiResources { diff --git a/pkg/backup/item_backupper.go b/pkg/backup/item_backupper.go index 61f6834d60..ae8074521b 100644 --- a/pkg/backup/item_backupper.go +++ b/pkg/backup/item_backupper.go @@ -250,6 +250,10 @@ func (ib *itemBackupper) backupItemInternal(logger logrus.FieldLogger, obj runti namespace = metadata.GetNamespace() if groupResource == kuberesource.PersistentVolumes { + if err := ib.addVolumeInfo(obj, log); err != nil { + backupErrs = append(backupErrs, err) + } + if err := ib.takePVSnapshot(obj, log); err != nil { backupErrs = append(backupErrs, err) } @@ -685,6 +689,39 @@ func (ib *itemBackupper) unTrackSkippedPV(obj runtime.Unstructured, groupResourc } } +func (ib *itemBackupper) addVolumeInfo(obj runtime.Unstructured, log logrus.FieldLogger) error { + pv := new(corev1api.PersistentVolume) + err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), pv) + if err != nil { + log.WithError(err).Warnf("Fail to convert PV") + return err + } + + if ib.backupRequest.PVMap == nil { + ib.backupRequest.PVMap = make(map[string]PvcPvInfo) + } + + pvcName := "" + pvcNamespace := "" + if pv.Spec.ClaimRef != nil { + pvcName = pv.Spec.ClaimRef.Name + pvcNamespace = pv.Spec.ClaimRef.Namespace + + ib.backupRequest.PVMap[pvcNamespace+"/"+pvcName] = PvcPvInfo{ + PVCName: pvcName, + PVCNamespace: pvcNamespace, + PV: *pv, + } + } + + ib.backupRequest.PVMap[pv.Name] = PvcPvInfo{ + PVCName: pvcName, + PVCNamespace: pvcNamespace, + PV: *pv, + } + return nil +} + // convert the input object to PV/PVC and get the PV name func getPVName(obj runtime.Unstructured, groupResource schema.GroupResource) (string, error) { if groupResource == kuberesource.PersistentVolumes { diff --git a/pkg/backup/item_backupper_test.go b/pkg/backup/item_backupper_test.go index 7bd7548bc7..481092a0ca 100644 --- a/pkg/backup/item_backupper_test.go +++ b/pkg/backup/item_backupper_test.go @@ -19,10 +19,12 @@ package backup import ( "testing" + "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" "k8s.io/apimachinery/pkg/runtime/schema" "github.com/vmware-tanzu/velero/pkg/kuberesource" + "github.com/vmware-tanzu/velero/pkg/volume" "github.com/stretchr/testify/assert" corev1api "k8s.io/api/core/v1" @@ -237,3 +239,55 @@ func TestRandom(t *testing.T) { err2 := runtime.DefaultUnstructuredConverter.FromUnstructured(o, pvc) t.Logf("err1: %v, err2: %v", err1, err2) } + +func TestAddVolumeInfo(t *testing.T) { + tests := []struct { + name string + pv *corev1api.PersistentVolume + expectedVolumeInfo map[string]PvcPvInfo + }{ + { + name: "PV has ClaimRef", + pv: builder.ForPersistentVolume("testPV").ClaimRef("testNS", "testPVC").Result(), + expectedVolumeInfo: map[string]PvcPvInfo{ + "testPV": { + PVCName: "testPVC", + PVCNamespace: "testNS", + PV: *builder.ForPersistentVolume("testPV").ClaimRef("testNS", "testPVC").Result(), + }, + "testNS/testPVC": { + PVCName: "testPVC", + PVCNamespace: "testNS", + PV: *builder.ForPersistentVolume("testPV").ClaimRef("testNS", "testPVC").Result(), + }, + }, + }, + { + name: "PV has no ClaimRef", + pv: builder.ForPersistentVolume("testPV").Result(), + expectedVolumeInfo: map[string]PvcPvInfo{ + "testPV": { + PVCName: "", + PVCNamespace: "", + PV: *builder.ForPersistentVolume("testPV").Result(), + }, + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ib := itemBackupper{} + ib.backupRequest = new(Request) + ib.backupRequest.VolumeInfos.VolumeInfos = make([]volume.VolumeInfo, 0) + + pvObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(tc.pv) + require.NoError(t, err) + logger := logrus.StandardLogger() + + err = ib.addVolumeInfo(&unstructured.Unstructured{Object: pvObj}, logger) + require.NoError(t, err) + require.Equal(t, tc.expectedVolumeInfo, ib.backupRequest.PVMap) + }) + } +} diff --git a/pkg/backup/pv_skip_tracker.go b/pkg/backup/pv_skip_tracker.go index 859456a374..64241a2406 100644 --- a/pkg/backup/pv_skip_tracker.go +++ b/pkg/backup/pv_skip_tracker.go @@ -10,6 +10,14 @@ type SkippedPV struct { Reasons []PVSkipReason `json:"reasons"` } +func (s *SkippedPV) SerializeSkipReasons() string { + ret := "" + for _, reason := range s.Reasons { + ret = ret + reason.Approach + ": " + reason.Reason + ";" + } + return ret +} + type PVSkipReason struct { Approach string `json:"approach"` Reason string `json:"reason"` diff --git a/pkg/backup/pv_skip_tracker_test.go b/pkg/backup/pv_skip_tracker_test.go index 9fdcb034f5..16de8f555c 100644 --- a/pkg/backup/pv_skip_tracker_test.go +++ b/pkg/backup/pv_skip_tracker_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestSummary(t *testing.T) { @@ -41,3 +42,14 @@ func TestSummary(t *testing.T) { } assert.Equal(t, expected, tracker.Summary()) } + +func TestSerializeSkipReasons(t *testing.T) { + tracker := NewSkipPVTracker() + //tracker.Track("pv5", "", "skipped due to policy") + tracker.Track("pv3", podVolumeApproach, "it's set to opt-out") + tracker.Track("pv3", csiSnapshotApproach, "not applicable for CSI ") + + for _, skippedPV := range tracker.Summary() { + require.Equal(t, "csiSnapshot: not applicable for CSI ;podvolume: it's set to opt-out;", skippedPV.SerializeSkipReasons()) + } +} diff --git a/pkg/backup/request.go b/pkg/backup/request.go index 44bc5578f2..594f419efc 100644 --- a/pkg/backup/request.go +++ b/pkg/backup/request.go @@ -20,6 +20,9 @@ import ( "fmt" "sort" + snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" + corev1api "k8s.io/api/core/v1" + "github.com/vmware-tanzu/velero/internal/hook" "github.com/vmware-tanzu/velero/internal/resourcepolicies" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" @@ -49,9 +52,20 @@ type Request struct { VolumeSnapshots []*volume.Snapshot PodVolumeBackups []*velerov1api.PodVolumeBackup BackedUpItems map[itemKey]struct{} + CSISnapshots []snapshotv1api.VolumeSnapshot itemOperationsList *[]*itemoperation.BackupOperation ResPolicies *resourcepolicies.Policies SkippedPVTracker *skipPVTracker + // A map contains the backup-included PV detail content. + // The key is PV name or PVC name(The format is PVC-namespace/PVC-name) + PVMap map[string]PvcPvInfo + VolumeInfos volume.VolumeInfos +} + +type PvcPvInfo struct { + PVCName string + PVCNamespace string + PV corev1api.PersistentVolume } // GetItemOperationsList returns ItemOperationsList, initializing it if necessary diff --git a/pkg/backup/snapshots.go b/pkg/backup/snapshots.go index a5c6597051..e9724b9e33 100644 --- a/pkg/backup/snapshots.go +++ b/pkg/backup/snapshots.go @@ -4,7 +4,6 @@ import ( "context" snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" - snapshotv1listers "github.com/kubernetes-csi/external-snapshotter/client/v4/listers/volumesnapshot/v1" "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/util/sets" kbclient "sigs.k8s.io/controller-runtime/pkg/client" @@ -17,25 +16,23 @@ import ( // Common function to update the status of CSI snapshots // returns VolumeSnapshot, VolumeSnapshotContent, VolumeSnapshotClasses referenced -func UpdateBackupCSISnapshotsStatus(client kbclient.Client, volumeSnapshotLister snapshotv1listers.VolumeSnapshotLister, backup *velerov1api.Backup, backupLog logrus.FieldLogger) (volumeSnapshots []snapshotv1api.VolumeSnapshot, volumeSnapshotContents []snapshotv1api.VolumeSnapshotContent, volumeSnapshotClasses []snapshotv1api.VolumeSnapshotClass) { +func UpdateBackupCSISnapshotsStatus(client kbclient.Client, globalCRClient kbclient.Client, backup *velerov1api.Backup, backupLog logrus.FieldLogger) (volumeSnapshots []snapshotv1api.VolumeSnapshot, volumeSnapshotContents []snapshotv1api.VolumeSnapshotContent, volumeSnapshotClasses []snapshotv1api.VolumeSnapshotClass) { if boolptr.IsSetToTrue(backup.Spec.SnapshotMoveData) { backupLog.Info("backup SnapshotMoveData is set to true, skip VolumeSnapshot resource persistence.") } else if features.IsEnabled(velerov1api.CSIFeatureFlag) { selector := label.NewSelectorForBackup(backup.Name) vscList := &snapshotv1api.VolumeSnapshotContentList{} - if volumeSnapshotLister != nil { - tmpVSs, err := volumeSnapshotLister.List(label.NewSelectorForBackup(backup.Name)) - if err != nil { - backupLog.Error(err) - } - for _, vs := range tmpVSs { - volumeSnapshots = append(volumeSnapshots, *vs) - } + vsList := new(snapshotv1api.VolumeSnapshotList) + err := globalCRClient.List(context.TODO(), vsList, &kbclient.ListOptions{ + LabelSelector: label.NewSelectorForBackup(backup.Name), + }) + if err != nil { + backupLog.Error(err) } + volumeSnapshots = append(volumeSnapshots, vsList.Items...) - err := client.List(context.Background(), vscList, &kbclient.ListOptions{LabelSelector: selector}) - if err != nil { + if err := client.List(context.Background(), vscList, &kbclient.ListOptions{LabelSelector: selector}); err != nil { backupLog.Error(err) } if len(vscList.Items) >= 0 { diff --git a/pkg/builder/volume_snapshot_builder.go b/pkg/builder/volume_snapshot_builder.go index bbaedd16ef..0abc48d2ac 100644 --- a/pkg/builder/volume_snapshot_builder.go +++ b/pkg/builder/volume_snapshot_builder.go @@ -18,6 +18,7 @@ package builder import ( snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -68,7 +69,21 @@ func (v *VolumeSnapshotBuilder) BoundVolumeSnapshotContentName(vscName string) * return v } +// SourcePVC set the built VolumeSnapshot's spec.Source.PersistentVolumeClaimName. func (v *VolumeSnapshotBuilder) SourcePVC(name string) *VolumeSnapshotBuilder { v.object.Spec.Source.PersistentVolumeClaimName = &name return v } + +// RestoreSize set the built VolumeSnapshot's status.RestoreSize. +func (v *VolumeSnapshotBuilder) RestoreSize(size string) *VolumeSnapshotBuilder { + resourceSize := resource.MustParse(size) + v.object.Status.RestoreSize = &resourceSize + return v +} + +// VolumeSnapshotClass set the built VolumeSnapshot's spec.VolumeSnapshotClassName value. +func (v *VolumeSnapshotBuilder) VolumeSnapshotClass(name string) *VolumeSnapshotBuilder { + v.object.Spec.VolumeSnapshotClassName = &name + return v +} diff --git a/pkg/builder/volume_snapshot_content_builder.go b/pkg/builder/volume_snapshot_content_builder.go index 734eeedf3d..bbfbe5477f 100644 --- a/pkg/builder/volume_snapshot_content_builder.go +++ b/pkg/builder/volume_snapshot_content_builder.go @@ -59,6 +59,7 @@ func (v *VolumeSnapshotContentBuilder) DeletionPolicy(policy snapshotv1api.Delet return v } +// VolumeSnapshotRef sets the built VolumeSnapshotContent's spec.VolumeSnapshotRef value. func (v *VolumeSnapshotContentBuilder) VolumeSnapshotRef(namespace, name string) *VolumeSnapshotContentBuilder { v.object.Spec.VolumeSnapshotRef = v1.ObjectReference{ APIVersion: "snapshot.storage.k8s.io/v1", @@ -68,3 +69,18 @@ func (v *VolumeSnapshotContentBuilder) VolumeSnapshotRef(namespace, name string) } return v } + +// VolumeSnapshotClassName sets the built VolumeSnapshotContent's spec.VolumeSnapshotClassName value. +func (v *VolumeSnapshotContentBuilder) VolumeSnapshotClassName(name string) *VolumeSnapshotContentBuilder { + v.object.Spec.VolumeSnapshotClassName = &name + return v +} + +// ObjectMeta applies functional options to the VolumeSnapshotContent's ObjectMeta. +func (v *VolumeSnapshotContentBuilder) ObjectMeta(opts ...ObjectMetaOpt) *VolumeSnapshotContentBuilder { + for _, opt := range opts { + opt(v.object) + } + + return v +} diff --git a/pkg/client/factory.go b/pkg/client/factory.go index 9ff2040c65..9fcb097fbd 100644 --- a/pkg/client/factory.go +++ b/pkg/client/factory.go @@ -24,6 +24,7 @@ import ( k8scheme "k8s.io/client-go/kubernetes/scheme" kbclient "sigs.k8s.io/controller-runtime/pkg/client" + snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" "github.com/pkg/errors" "github.com/spf13/pflag" "k8s.io/apimachinery/pkg/runtime" @@ -158,6 +159,9 @@ func (f *factory) KubebuilderClient() (kbclient.Client, error) { if err := apiextv1.AddToScheme(scheme); err != nil { return nil, err } + if err := snapshotv1api.AddToScheme(scheme); err != nil { + return nil, err + } kubebuilderClient, err := kbclient.New(clientConfig, kbclient.Options{ Scheme: scheme, }) diff --git a/pkg/cmd/server/server.go b/pkg/cmd/server/server.go index fb9d96cb38..3d91f9474e 100644 --- a/pkg/cmd/server/server.go +++ b/pkg/cmd/server/server.go @@ -23,21 +23,16 @@ import ( "net/http" "net/http/pprof" "os" - "reflect" "strings" "time" logrusr "github.com/bombsimon/logrusr/v3" snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" - snapshotv1client "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned" - snapshotv1informers "github.com/kubernetes-csi/external-snapshotter/client/v4/informers/externalversions" - snapshotv1listers "github.com/kubernetes-csi/external-snapshotter/client/v4/listers/volumesnapshot/v1" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/sirupsen/logrus" "github.com/spf13/cobra" corev1api "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" @@ -244,15 +239,17 @@ func NewCommand(f client.Factory) *cobra.Command { } type server struct { - namespace string - metricsAddress string - kubeClientConfig *rest.Config - kubeClient kubernetes.Interface - discoveryClient discovery.DiscoveryInterface - discoveryHelper velerodiscovery.Helper - dynamicClient dynamic.Interface - csiSnapshotClient *snapshotv1client.Clientset - csiSnapshotLister snapshotv1listers.VolumeSnapshotLister + namespace string + metricsAddress string + kubeClientConfig *rest.Config + kubeClient kubernetes.Interface + discoveryClient discovery.DiscoveryInterface + discoveryHelper velerodiscovery.Helper + dynamicClient dynamic.Interface + // controller-runtime client. the difference from the controller-manager's client + // is that the the controller-manager's client is limited to list namespaced-scoped + // resources in the namespace where Velero is installed, or the cluster-scoped + // resources. The crClient doesn't have the limitation. crClient ctrlclient.Client ctx context.Context cancelFunc context.CancelFunc @@ -399,23 +396,6 @@ func newServer(f client.Factory, config serverConfig, logger *logrus.Logger) (*s featureVerifier: featureVerifier, } - // Setup CSI snapshot client and lister - var csiSnapClient *snapshotv1client.Clientset - if features.IsEnabled(velerov1api.CSIFeatureFlag) { - csiSnapClient, err = snapshotv1client.NewForConfig(clientConfig) - if err != nil { - cancelFunc() - return nil, err - } - s.csiSnapshotClient = csiSnapClient - - s.csiSnapshotLister, err = s.getCSIVolumeSnapshotListers() - if err != nil { - cancelFunc() - return nil, err - } - } - return s, nil } @@ -615,40 +595,6 @@ func (s *server) initRepoManager() error { return nil } -func (s *server) getCSIVolumeSnapshotListers() (vsLister snapshotv1listers.VolumeSnapshotLister, err error) { - _, err = s.discoveryClient.ServerResourcesForGroupVersion(snapshotv1api.SchemeGroupVersion.String()) - switch { - case apierrors.IsNotFound(err): - // CSI is enabled, but the required CRDs aren't installed, so halt. - s.logger.Warnf("The '%s' feature flag was specified, but CSI API group [%s] was not found.", velerov1api.CSIFeatureFlag, snapshotv1api.SchemeGroupVersion.String()) - err = nil - case err == nil: - wrapper := NewCSIInformerFactoryWrapper(s.csiSnapshotClient) - - s.logger.Debug("Creating CSI listers") - // Access the wrapped factory directly here since we've already done the feature flag check above to know it's safe. - vsLister = wrapper.factory.Snapshot().V1().VolumeSnapshots().Lister() - - // start the informers & and wait for the caches to sync - wrapper.Start(s.ctx.Done()) - s.logger.Info("Waiting for informer caches to sync") - csiCacheSyncResults := wrapper.WaitForCacheSync(s.ctx.Done()) - s.logger.Info("Done waiting for informer caches to sync") - - for informer, synced := range csiCacheSyncResults { - if !synced { - err = errors.Errorf("cache was not synced for informer %v", informer) - return - } - s.logger.WithField("informer", informer).Info("Informer cache synced") - } - case err != nil: - s.logger.Errorf("fail to find snapshot v1 schema: %s", err) - } - - return vsLister, err -} - func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string) error { s.logger.Info("Starting controllers") @@ -775,10 +721,10 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string s.metrics, backupStoreGetter, s.config.formatFlag.Parse(), - s.csiSnapshotLister, s.credentialFileStore, s.config.maxConcurrentK8SConnections, s.config.defaultSnapshotMoveData, + s.crClient, ).SetupWithManager(s.mgr); err != nil { s.logger.Fatal(err, "unable to create controller", "controller", controller.Backup) } @@ -837,7 +783,7 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string cmd.CheckError(err) r := controller.NewBackupFinalizerReconciler( s.mgr.GetClient(), - s.csiSnapshotLister, + s.crClient, clock.RealClock{}, backupper, newPluginManager, @@ -1027,37 +973,6 @@ func (s *server) runProfiler() { } } -// CSIInformerFactoryWrapper is a proxy around the CSI SharedInformerFactory that checks the CSI feature flag before performing operations. -type CSIInformerFactoryWrapper struct { - factory snapshotv1informers.SharedInformerFactory -} - -func NewCSIInformerFactoryWrapper(c snapshotv1client.Interface) *CSIInformerFactoryWrapper { - // If no namespace is specified, all namespaces are watched. - // This is desirable for VolumeSnapshots, as we want to query for all VolumeSnapshots across all namespaces using this informer - w := &CSIInformerFactoryWrapper{} - - if features.IsEnabled(velerov1api.CSIFeatureFlag) { - w.factory = snapshotv1informers.NewSharedInformerFactoryWithOptions(c, 0) - } - return w -} - -// Start proxies the Start call to the CSI SharedInformerFactory. -func (w *CSIInformerFactoryWrapper) Start(stopCh <-chan struct{}) { - if features.IsEnabled(velerov1api.CSIFeatureFlag) { - w.factory.Start(stopCh) - } -} - -// WaitForCacheSync proxies the WaitForCacheSync call to the CSI SharedInformerFactory. -func (w *CSIInformerFactoryWrapper) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { - if features.IsEnabled(velerov1api.CSIFeatureFlag) { - return w.factory.WaitForCacheSync(stopCh) - } - return nil -} - // if there is a restarting during the reconciling of backups/restores/etc, these CRs may be stuck in progress status // markInProgressCRsFailed tries to mark the in progress CRs as failed when starting the server to avoid the issue func markInProgressCRsFailed(ctx context.Context, cfg *rest.Config, scheme *runtime.Scheme, namespace string, log logrus.FieldLogger) { diff --git a/pkg/controller/backup_controller.go b/pkg/controller/backup_controller.go index 746c9d7890..ad95e6e225 100644 --- a/pkg/controller/backup_controller.go +++ b/pkg/controller/backup_controller.go @@ -21,12 +21,11 @@ import ( "context" "fmt" "os" + "strconv" "strings" "time" snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" - snapshotterClientSet "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned" - snapshotv1listers "github.com/kubernetes-csi/external-snapshotter/client/v4/listers/volumesnapshot/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" corev1api "k8s.io/api/core/v1" @@ -43,14 +42,17 @@ import ( "github.com/vmware-tanzu/velero/internal/resourcepolicies" "github.com/vmware-tanzu/velero/internal/storage" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + velerov2alpha1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1" pkgbackup "github.com/vmware-tanzu/velero/pkg/backup" "github.com/vmware-tanzu/velero/pkg/discovery" "github.com/vmware-tanzu/velero/pkg/features" + "github.com/vmware-tanzu/velero/pkg/kuberesource" "github.com/vmware-tanzu/velero/pkg/label" "github.com/vmware-tanzu/velero/pkg/metrics" "github.com/vmware-tanzu/velero/pkg/persistence" "github.com/vmware-tanzu/velero/pkg/plugin/clientmgmt" "github.com/vmware-tanzu/velero/pkg/plugin/framework" + "github.com/vmware-tanzu/velero/pkg/plugin/velero" "github.com/vmware-tanzu/velero/pkg/util/boolptr" "github.com/vmware-tanzu/velero/pkg/util/collections" "github.com/vmware-tanzu/velero/pkg/util/encode" @@ -84,11 +86,10 @@ type backupReconciler struct { metrics *metrics.ServerMetrics backupStoreGetter persistence.ObjectBackupStoreGetter formatFlag logging.Format - volumeSnapshotLister snapshotv1listers.VolumeSnapshotLister - volumeSnapshotClient snapshotterClientSet.Interface credentialFileStore credentials.FileStore maxConcurrentK8SConnections int defaultSnapshotMoveData bool + globalCRClient kbclient.Client } func NewBackupReconciler( @@ -110,10 +111,10 @@ func NewBackupReconciler( metrics *metrics.ServerMetrics, backupStoreGetter persistence.ObjectBackupStoreGetter, formatFlag logging.Format, - volumeSnapshotLister snapshotv1listers.VolumeSnapshotLister, credentialStore credentials.FileStore, maxConcurrentK8SConnections int, defaultSnapshotMoveData bool, + globalCRClient kbclient.Client, ) *backupReconciler { b := &backupReconciler{ ctx: ctx, @@ -135,10 +136,10 @@ func NewBackupReconciler( metrics: metrics, backupStoreGetter: backupStoreGetter, formatFlag: formatFlag, - volumeSnapshotLister: volumeSnapshotLister, credentialFileStore: credentialStore, maxConcurrentK8SConnections: maxConcurrentK8SConnections, defaultSnapshotMoveData: defaultSnapshotMoveData, + globalCRClient: globalCRClient, } b.updateTotalBackupMetric() return b @@ -317,6 +318,7 @@ func (b *backupReconciler) prepareBackupRequest(backup *velerov1api.Backup, logg request := &pkgbackup.Request{ Backup: backup.DeepCopy(), // don't modify items in the cache SkippedPVTracker: pkgbackup.NewSkipPVTracker(), + PVMap: map[string]pkgbackup.PvcPvInfo{}, } // set backup major version - deprecated, use Status.FormatVersion @@ -665,7 +667,7 @@ func (b *backupReconciler) runBackup(backup *pkgbackup.Request) error { backup.Status.VolumeSnapshotsCompleted++ } } - volumeSnapshots, volumeSnapshotContents, volumeSnapshotClasses := pkgbackup.UpdateBackupCSISnapshotsStatus(b.kbClient, b.volumeSnapshotLister, backup.Backup, backupLog) + volumeSnapshots, volumeSnapshotContents, volumeSnapshotClasses := pkgbackup.UpdateBackupCSISnapshotsStatus(b.kbClient, b.globalCRClient, backup.Backup, backupLog) // Iterate over backup item operations and update progress. // Any errors on operations at this point should be added to backup errors. @@ -734,6 +736,12 @@ func (b *backupReconciler) runBackup(backup *pkgbackup.Request) error { if logFile, err := backupLog.GetPersistFile(); err != nil { fatalErrs = append(fatalErrs, errors.Wrap(err, "error getting backup log file")) } else { + var errors []error + backup.VolumeInfos.VolumeInfos, errors = generateVolumeInfo(backup, volumeSnapshotContents, volumeSnapshotClasses, b.globalCRClient, backupLog) + if len(errors) > 0 { + fatalErrs = append(fatalErrs, errors...) + } + if errs := persistBackup(backup, backupFile, logFile, backupStore, volumeSnapshots, volumeSnapshotContents, volumeSnapshotClasses, results); len(errs) > 0 { fatalErrs = append(fatalErrs, errs...) } @@ -796,7 +804,6 @@ func persistBackup(backup *pkgbackup.Request, ) []error { persistErrs := []error{} backupJSON := new(bytes.Buffer) - volumeInfos := make([]volume.VolumeInfo, 0) if err := encode.To(backup.Backup, "json", backupJSON); err != nil { persistErrs = append(persistErrs, errors.Wrap(err, "error encoding backup")) @@ -843,7 +850,7 @@ func persistBackup(backup *pkgbackup.Request, persistErrs = append(persistErrs, errs...) } - volumeInfoJSON, errs := encode.ToJSONGzip(volumeInfos, "backup volumes information") + volumeInfoJSON, errs := encode.ToJSONGzip(backup.VolumeInfos, "backup volumes information") if errs != nil { persistErrs = append(persistErrs, errs...) } @@ -908,3 +915,298 @@ func oldAndNewFilterParametersUsedTogether(backupSpec velerov1api.BackupSpec) bo return haveOldResourceFilterParameters && haveNewResourceFilterParameters } + +func generateVolumeInfo(backup *pkgbackup.Request, csiVolumeSnapshotContents []snapshotv1api.VolumeSnapshotContent, + csiVolumesnapshotClasses []snapshotv1api.VolumeSnapshotClass, crClient kbclient.Client, logger logrus.FieldLogger) ([]volume.VolumeInfo, []error) { + volumeInfos := make([]volume.VolumeInfo, 0) + errors := make([]error, 0) + + volumeInfos = append(volumeInfos, generateVolumeInfoForSkippedPV(backup)...) + + nativeSnapshotVolumeInfos, nativeSnapshotErrs := generateVolumeInfoForVeleroNativeSnapshot(backup) + volumeInfos = append(volumeInfos, nativeSnapshotVolumeInfos...) + errors = append(errors, nativeSnapshotErrs...) + + csiVolumeInfos, csiErrs := generateVolumeInfoForCSIVolumeSnapshot(backup, csiVolumeSnapshotContents, csiVolumesnapshotClasses) + volumeInfos = append(volumeInfos, csiVolumeInfos...) + errors = append(errors, csiErrs...) + + // Go through the PodVolumeBackups. + pvbVolumeInfos, pvbErrs := generateVolumeInfoFromPVB(*backup, crClient, logger) + volumeInfos = append(volumeInfos, pvbVolumeInfos...) + errors = append(errors, pvbErrs...) + + dataUploadVolumeInfos, duErrs := generateVolumeInfoFromDataUpload(backup, csiVolumesnapshotClasses, crClient) + volumeInfos = append(volumeInfos, dataUploadVolumeInfos...) + errors = append(errors, duErrs...) + + return volumeInfos, errors +} + +// generateVolumeInfoForSkippedPV generate VolumeInfos for SkippedPV. +func generateVolumeInfoForSkippedPV(backup *pkgbackup.Request) []volume.VolumeInfo { + tmpVolumeInfos := make([]volume.VolumeInfo, 0) + + for _, skippedPV := range backup.SkippedPVTracker.Summary() { + pvcPVInfo := backup.PVMap[skippedPV.Name] + volumeInfo := volume.VolumeInfo{ + PVCName: pvcPVInfo.PVCName, + PVCNamespace: pvcPVInfo.PVCNamespace, + PVName: skippedPV.Name, + SnapshotDataMoved: false, + Skipped: true, + SkippedReason: skippedPV.SerializeSkipReasons(), + PVInfo: volume.PVInfo{ + ReclaimPolicy: string(pvcPVInfo.PV.Spec.PersistentVolumeReclaimPolicy), + Labels: pvcPVInfo.PV.Labels, + }, + } + + tmpVolumeInfos = append(tmpVolumeInfos, volumeInfo) + } + + return tmpVolumeInfos +} + +// generateVolumeInfoForVeleroNativeSnapshot generate VolumeInfos for Velero native snapshot +func generateVolumeInfoForVeleroNativeSnapshot(backup *pkgbackup.Request) ([]volume.VolumeInfo, []error) { + tmpVolumeInfos := make([]volume.VolumeInfo, 0) + errors := make([]error, 0) + + for _, nativeSnapshot := range backup.VolumeSnapshots { + if nativeSnapshot.Spec.VolumeIOPS == nil { + errors = append(errors, fmt.Errorf("PV %s's Velero native snapshot's IOPS pointer is nil", nativeSnapshot.Spec.PersistentVolumeName)) + continue + } + pvcPVInfo := backup.PVMap[nativeSnapshot.Spec.PersistentVolumeName] + volumeInfo := volume.VolumeInfo{ + BackupMethod: volume.NativeSnapshot, + PVCName: pvcPVInfo.PVCName, + PVCNamespace: pvcPVInfo.PVCNamespace, + PVName: pvcPVInfo.PV.Name, + SnapshotDataMoved: false, + Skipped: false, + NativeSnapshotInfo: volume.NativeSnapshotInfo{ + SnapshotHandle: nativeSnapshot.Status.ProviderSnapshotID, + VolumeType: nativeSnapshot.Spec.VolumeType, + VolumeAZ: nativeSnapshot.Spec.VolumeAZ, + IOPS: strconv.FormatInt(*nativeSnapshot.Spec.VolumeIOPS, 10), + }, + PVInfo: volume.PVInfo{ + ReclaimPolicy: string(pvcPVInfo.PV.Spec.PersistentVolumeReclaimPolicy), + Labels: pvcPVInfo.PV.Labels, + }, + } + + tmpVolumeInfos = append(tmpVolumeInfos, volumeInfo) + } + + return tmpVolumeInfos, errors +} + +// generateVolumeInfoForCSIVolumeSnapshot generate VolumeInfos for CSI VolumeSnapshot +func generateVolumeInfoForCSIVolumeSnapshot(backup *pkgbackup.Request, csiVolumeSnapshotContents []snapshotv1api.VolumeSnapshotContent, + csiVolumesnapshotClasses []snapshotv1api.VolumeSnapshotClass) ([]volume.VolumeInfo, []error) { + tmpVolumeInfos := make([]volume.VolumeInfo, 0) + errors := make([]error, 0) + + for _, volumeSnapshot := range backup.CSISnapshots { + var volumeSnapshotClass *snapshotv1api.VolumeSnapshotClass + var volumeSnapshotContent *snapshotv1api.VolumeSnapshotContent + for index := range csiVolumesnapshotClasses { + if volumeSnapshot.Spec.VolumeSnapshotClassName == nil { + continue + } + + if *volumeSnapshot.Spec.VolumeSnapshotClassName == csiVolumesnapshotClasses[index].Name { + volumeSnapshotClass = &csiVolumesnapshotClasses[index] + } + } + + for index := range csiVolumeSnapshotContents { + if volumeSnapshot.Status == nil || volumeSnapshot.Status.BoundVolumeSnapshotContentName == nil { + continue + } + + if *volumeSnapshot.Status.BoundVolumeSnapshotContentName == csiVolumeSnapshotContents[index].Name { + volumeSnapshotContent = &csiVolumeSnapshotContents[index] + } + } + + if volumeSnapshotClass == nil || volumeSnapshotContent == nil { + errors = append(errors, fmt.Errorf("fail to get VolumeSnapshotContent or VolumeSnapshotClass for VolumeSnapshot: %s/%s", + volumeSnapshot.Namespace, volumeSnapshot.Name)) + continue + } + + if volumeSnapshot.Spec.Source.PersistentVolumeClaimName == nil { + errors = append(errors, fmt.Errorf("VolumeSnapshot: %s/%s doesn't have a source PVC", + volumeSnapshot.Namespace, volumeSnapshot.Name)) + continue + } + + var size int64 + if volumeSnapshot.Status.RestoreSize != nil { + size = volumeSnapshot.Status.RestoreSize.Value() + } + snapshotHandle := "" + if volumeSnapshotContent.Status.SnapshotHandle != nil { + snapshotHandle = *volumeSnapshot.Spec.VolumeSnapshotClassName + } + pvcPVInfo := backup.PVMap[*volumeSnapshot.Spec.Source.PersistentVolumeClaimName] + volumeInfo := volume.VolumeInfo{ + BackupMethod: volume.CSISnapshot, + PVCName: pvcPVInfo.PVCName, + PVCNamespace: pvcPVInfo.PVCNamespace, + PVName: pvcPVInfo.PV.Name, + Skipped: false, + SnapshotDataMoved: false, + CSISnapshotInfo: volume.CSISnapshotInfo{ + VSCName: *volumeSnapshot.Status.BoundVolumeSnapshotContentName, + Size: size, + Driver: volumeSnapshotClass.Driver, + SnapshotHandle: snapshotHandle, + }, + PVInfo: volume.PVInfo{ + ReclaimPolicy: string(pvcPVInfo.PV.Spec.PersistentVolumeReclaimPolicy), + Labels: pvcPVInfo.PV.Labels, + }, + } + + tmpVolumeInfos = append(tmpVolumeInfos, volumeInfo) + } + + return tmpVolumeInfos, errors +} + +// generateVolumeInfoFromPVB generate VolumeInfo for PVB. +func generateVolumeInfoFromPVB(backup pkgbackup.Request, crClient kbclient.Client, logger logrus.FieldLogger) ([]volume.VolumeInfo, []error) { + tmpVolumeInfos := make([]volume.VolumeInfo, 0) + errors := make([]error, 0) + + for _, pvb := range backup.PodVolumeBackups { + volumeInfo := volume.VolumeInfo{ + BackupMethod: volume.PodVolumeBackup, + SnapshotDataMoved: false, + Skipped: false, + StartTimestamp: pvb.Status.StartTimestamp, + PVBInfo: volume.PodVolumeBackupInfo{ + SnapshotHandle: pvb.Status.SnapshotID, + Size: pvb.Status.Progress.TotalBytes, + UploaderType: pvb.Spec.UploaderType, + VolumeName: pvb.Spec.Volume, + PodName: pvb.Spec.Pod.Name, + PodNamespace: pvb.Spec.Pod.Namespace, + NodeName: pvb.Spec.Node, + }, + } + + pod := new(corev1api.Pod) + pvcName := "" + err := crClient.Get(context.TODO(), kbclient.ObjectKey{Namespace: pvb.Spec.Pod.Namespace, Name: pvb.Spec.Pod.Name}, pod) + if err != nil { + logger.WithError(err).Warn("Fail to get pod for PodVolumeBackup: ", pvb.Name) + errors = append(errors, fmt.Errorf("fail to get pod for PVB %s/%s: %s", pvb.Namespace, pvb.Name, err.Error())) + continue + } + for _, volume := range pod.Spec.Volumes { + if volume.Name == pvb.Spec.Volume && volume.PersistentVolumeClaim != nil { + pvcName = volume.PersistentVolumeClaim.ClaimName + } + } + + if pvcName != "" { + pvcPVInfo := backup.PVMap[pod.Namespace+"/"+pvcName] + + volumeInfo.PVCName = pvcPVInfo.PVCName + volumeInfo.PVCNamespace = pvcPVInfo.PVCNamespace + volumeInfo.PVName = pvcPVInfo.PV.Name + volumeInfo.PVInfo = volume.PVInfo{ + ReclaimPolicy: string(pvcPVInfo.PV.Spec.PersistentVolumeReclaimPolicy), + Labels: pvcPVInfo.PV.Labels, + } + } else { + logger.Debug("The PVB doesn't have a corresponding PVC: %s", pvb.Name) + } + + tmpVolumeInfos = append(tmpVolumeInfos, volumeInfo) + } + + return tmpVolumeInfos, errors +} + +// generateVolumeInfoFromDataUpload generate VolumeInfo for DataUpload. +func generateVolumeInfoFromDataUpload(backup *pkgbackup.Request, csiVolumesnapshotClasses []snapshotv1api.VolumeSnapshotClass, + crClient kbclient.Client) ([]volume.VolumeInfo, []error) { + tmpVolumeInfos := make([]volume.VolumeInfo, 0) + errors := make([]error, 0) + + for _, operation := range *backup.GetItemOperationsList() { + if operation.Spec.ResourceIdentifier.GroupResource.String() == kuberesource.PersistentVolumeClaims.String() { + var duIdentifier velero.ResourceIdentifier + + for _, identifier := range operation.Spec.PostOperationItems { + if identifier.GroupResource.String() == "datauploads.velero.io" { + duIdentifier = identifier + } + } + if duIdentifier.Empty() { + errors = append(errors, fmt.Errorf("cannot find DataUpload for PVC %s/%s backup async operation", + operation.Spec.ResourceIdentifier.Namespace, operation.Spec.ResourceIdentifier.Name)) + continue + } + + dataUpload := new(velerov2alpha1.DataUpload) + err := crClient.Get( + context.TODO(), + kbclient.ObjectKey{ + Namespace: duIdentifier.Namespace, + Name: duIdentifier.Name}, + dataUpload, + ) + if err != nil { + errors = append(errors, fmt.Errorf("fail to get DataUpload for operation %s: %s", operation.Spec.OperationID, err.Error())) + continue + } + + volumeSnapshotClass := &snapshotv1api.VolumeSnapshotClass{} + for index := range csiVolumesnapshotClasses { + if csiVolumesnapshotClasses[index].Name == dataUpload.Spec.CSISnapshot.SnapshotClass { + volumeSnapshotClass = &csiVolumesnapshotClasses[index] + } + } + if volumeSnapshotClass == nil { + errors = append(errors, fmt.Errorf("fail to find VolumeSnapshotClass for operation %s", operation.Spec.OperationID)) + continue + } + + pvcPVInfo := backup.PVMap[operation.Spec.ResourceIdentifier.Namespace+"/"+operation.Spec.ResourceIdentifier.Name] + + volumeInfo := volume.VolumeInfo{ + BackupMethod: volume.CSISnapshot, + PVCName: pvcPVInfo.PVCName, + PVCNamespace: pvcPVInfo.PVCNamespace, + PVName: pvcPVInfo.PV.Name, + SnapshotDataMoved: true, + Skipped: false, + CSISnapshotInfo: volume.CSISnapshotInfo{ + Driver: volumeSnapshotClass.Driver, + }, + SnapshotDataMovementInfo: volume.SnapshotDataMovementInfo{ + DataMover: "velero", + UploaderType: "kopia", + RetainedSnapshot: dataUpload.Namespace + "/" + dataUpload.Spec.CSISnapshot.VolumeSnapshot, + SnapshotHandle: dataUpload.Status.SnapshotID, + }, + PVInfo: volume.PVInfo{ + ReclaimPolicy: string(pvcPVInfo.PV.Spec.PersistentVolumeReclaimPolicy), + Labels: pvcPVInfo.PV.Labels, + }, + } + + tmpVolumeInfos = append(tmpVolumeInfos, volumeInfo) + } + } + + return tmpVolumeInfos, errors +} diff --git a/pkg/controller/backup_controller_test.go b/pkg/controller/backup_controller_test.go index df2e22a22a..cf1c515acd 100644 --- a/pkg/controller/backup_controller_test.go +++ b/pkg/controller/backup_controller_test.go @@ -29,13 +29,12 @@ import ( "github.com/google/go-cmp/cmp" snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" - snapshotfake "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/fake" - snapshotinformers "github.com/kubernetes-csi/external-snapshotter/client/v4/informers/externalversions" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + corev1api "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" @@ -45,7 +44,9 @@ import ( ctrl "sigs.k8s.io/controller-runtime" kbclient "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/vmware-tanzu/velero/pkg/backup" kubeutil "github.com/vmware-tanzu/velero/pkg/util/kube" + "github.com/vmware-tanzu/velero/pkg/volume" fakeClient "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -1062,7 +1063,7 @@ func TestProcessBackupCompletions(t *testing.T) { CSIVolumeSnapshotsCompleted: 0, }, }, - volumeSnapshot: builder.ForVolumeSnapshot("velero", "testVS").ObjectMeta(builder.WithLabels(velerov1api.BackupNameLabel, "backup-1")).Result(), + volumeSnapshot: builder.ForVolumeSnapshot("velero", "testVS").VolumeSnapshotClass("testClass").Status().BoundVolumeSnapshotContentName("testVSC").RestoreSize("10G").SourcePVC("testPVC").ObjectMeta(builder.WithLabels(velerov1api.BackupNameLabel, "backup-1")).Result(), }, { name: "backup with snapshot data movement set to false when CSI feature is enabled", @@ -1103,7 +1104,7 @@ func TestProcessBackupCompletions(t *testing.T) { CSIVolumeSnapshotsCompleted: 0, }, }, - volumeSnapshot: builder.ForVolumeSnapshot("velero", "testVS").ObjectMeta(builder.WithLabels(velerov1api.BackupNameLabel, "backup-1")).Result(), + volumeSnapshot: builder.ForVolumeSnapshot("velero", "testVS").VolumeSnapshotClass("testClass").Status().BoundVolumeSnapshotContentName("testVSC").RestoreSize("10G").SourcePVC("testPVC").ObjectMeta(builder.WithLabels(velerov1api.BackupNameLabel, "backup-1")).Result(), }, { name: "backup with snapshot data movement not set when CSI feature is enabled", @@ -1143,7 +1144,7 @@ func TestProcessBackupCompletions(t *testing.T) { CSIVolumeSnapshotsCompleted: 0, }, }, - volumeSnapshot: builder.ForVolumeSnapshot("velero", "testVS").ObjectMeta(builder.WithLabels(velerov1api.BackupNameLabel, "backup-1")).Result(), + volumeSnapshot: builder.ForVolumeSnapshot("velero", "testVS").VolumeSnapshotClass("testClass").Status().BoundVolumeSnapshotContentName("testVSC").RestoreSize("10G").SourcePVC("testPVC").ObjectMeta(builder.WithLabels(velerov1api.BackupNameLabel, "backup-1")).Result(), }, { name: "backup with snapshot data movement set to true and defaultSnapshotMoveData set to false", @@ -1184,7 +1185,7 @@ func TestProcessBackupCompletions(t *testing.T) { CSIVolumeSnapshotsCompleted: 0, }, }, - volumeSnapshot: builder.ForVolumeSnapshot("velero", "testVS").ObjectMeta(builder.WithLabels(velerov1api.BackupNameLabel, "backup-1")).Result(), + volumeSnapshot: builder.ForVolumeSnapshot("velero", "testVS").VolumeSnapshotClass("testClass").Status().BoundVolumeSnapshotContentName("testVSC").RestoreSize("10G").SourcePVC("testPVC").ObjectMeta(builder.WithLabels(velerov1api.BackupNameLabel, "backup-1")).Result(), }, { name: "backup with snapshot data movement set to false and defaultSnapshotMoveData set to true", @@ -1225,7 +1226,7 @@ func TestProcessBackupCompletions(t *testing.T) { CSIVolumeSnapshotsCompleted: 0, }, }, - volumeSnapshot: builder.ForVolumeSnapshot("velero", "testVS").ObjectMeta(builder.WithLabels(velerov1api.BackupNameLabel, "backup-1")).Result(), + volumeSnapshot: builder.ForVolumeSnapshot("velero", "testVS").VolumeSnapshotClass("testClass").Status().BoundVolumeSnapshotContentName("testVSC").RestoreSize("10G").SourcePVC("testPVC").ObjectMeta(builder.WithLabels(velerov1api.BackupNameLabel, "backup-1")).Result(), }, { name: "backup with snapshot data movement not set and defaultSnapshotMoveData set to true", @@ -1266,35 +1267,43 @@ func TestProcessBackupCompletions(t *testing.T) { CSIVolumeSnapshotsCompleted: 0, }, }, - volumeSnapshot: builder.ForVolumeSnapshot("velero", "testVS").ObjectMeta(builder.WithLabels(velerov1api.BackupNameLabel, "backup-1")).Result(), + volumeSnapshot: builder.ForVolumeSnapshot("velero", "testVS").VolumeSnapshotClass("testClass").Status().BoundVolumeSnapshotContentName("testVSC").RestoreSize("10G").SourcePVC("testPVC").ObjectMeta(builder.WithLabels(velerov1api.BackupNameLabel, "backup-1")).Result(), }, } + snapshotHandle := "testSnapshotID" + for _, test := range tests { t.Run(test.name, func(t *testing.T) { formatFlag := logging.FormatText var ( - logger = logging.DefaultLogger(logrus.DebugLevel, formatFlag) - pluginManager = new(pluginmocks.Manager) - backupStore = new(persistencemocks.BackupStore) - backupper = new(fakeBackupper) - snapshotClient = snapshotfake.NewSimpleClientset() - sharedInformer = snapshotinformers.NewSharedInformerFactory(snapshotClient, 0) - snapshotLister = sharedInformer.Snapshot().V1().VolumeSnapshots().Lister() + logger = logging.DefaultLogger(logrus.DebugLevel, formatFlag) + pluginManager = new(pluginmocks.Manager) + backupStore = new(persistencemocks.BackupStore) + backupper = new(fakeBackupper) + fakeGlobalClient = velerotest.NewFakeControllerRuntimeClient(t) ) var fakeClient kbclient.Client // add the test's backup storage location if it's different than the default if test.backupLocation != nil && test.backupLocation != defaultBackupLocation { - fakeClient = velerotest.NewFakeControllerRuntimeClient(t, test.backupLocation) + fakeClient = velerotest.NewFakeControllerRuntimeClient(t, test.backupLocation, + builder.ForVolumeSnapshotClass("testClass").Driver("testDriver").Result(), + builder.ForVolumeSnapshotContent("testVSC").ObjectMeta(builder.WithLabels(velerov1api.BackupNameLabel, "backup-1")).VolumeSnapshotClassName("testClass").Status(&snapshotv1api.VolumeSnapshotContentStatus{ + SnapshotHandle: &snapshotHandle, + }).Result(), + ) } else { - fakeClient = velerotest.NewFakeControllerRuntimeClient(t) + fakeClient = velerotest.NewFakeControllerRuntimeClient(t, + builder.ForVolumeSnapshotClass("testClass").Driver("testDriver").Result(), + builder.ForVolumeSnapshotContent("testVSC").ObjectMeta(builder.WithLabels(velerov1api.BackupNameLabel, "backup-1")).VolumeSnapshotClassName("testClass").Status(&snapshotv1api.VolumeSnapshotContentStatus{ + SnapshotHandle: &snapshotHandle, + }).Result(), + ) } if test.volumeSnapshot != nil { - snapshotClient.SnapshotV1().VolumeSnapshots(test.volumeSnapshot.Namespace).Create(context.Background(), test.volumeSnapshot, metav1.CreateOptions{}) - sharedInformer.Snapshot().V1().VolumeSnapshots().Informer().GetStore().Add(test.volumeSnapshot) - sharedInformer.WaitForCacheSync(make(chan struct{})) + require.NoError(t, fakeGlobalClient.Create(context.TODO(), test.volumeSnapshot)) } apiServer := velerotest.NewAPIServer(t) @@ -1328,8 +1337,7 @@ func TestProcessBackupCompletions(t *testing.T) { backupStoreGetter: NewFakeSingleObjectBackupStoreGetter(backupStore), backupper: backupper, formatFlag: formatFlag, - volumeSnapshotClient: snapshotClient, - volumeSnapshotLister: snapshotLister, + globalCRClient: fakeGlobalClient, } pluginManager.On("GetBackupItemActionsV2").Return(nil, nil) @@ -1731,3 +1739,128 @@ func TestPatchResourceWorksWithStatus(t *testing.T) { } } +func TestGenerateVolumeInfo(t *testing.T) {} + +func TestGenerateVolumeInfoFromPVB(t *testing.T) { + tests := []struct { + name string + pvb *velerov1api.PodVolumeBackup + pod *corev1api.Pod + pvMap map[string]backup.PvcPvInfo + expectedVolumeInfos []volume.VolumeInfo + expectedErrs []error + }{ + { + name: "cannot find PVB's pod, should fail", + pvb: builder.ForPodVolumeBackup("velero", "testPVB").PodName("testPod").PodNamespace("velero").Result(), + expectedVolumeInfos: []volume.VolumeInfo{}, + expectedErrs: []error{fmt.Errorf("fail to get pod for PVB velero/testPVB: pods \"testPod\" not found")}, + }, + { + name: "PVB doesn't have a related PVC", + pvb: builder.ForPodVolumeBackup("velero", "testPVB").PodName("testPod").PodNamespace("velero").Result(), + pod: builder.ForPod("velero", "testPod").Containers(&corev1api.Container{ + Name: "test", + VolumeMounts: []corev1api.VolumeMount{ + { + Name: "testVolume", + MountPath: "/data", + }, + }, + }).Volumes( + &corev1api.Volume{ + Name: "", + VolumeSource: corev1api.VolumeSource{ + HostPath: &corev1api.HostPathVolumeSource{}, + }, + }, + ).Result(), + expectedVolumeInfos: []volume.VolumeInfo{ + { + PVCName: "", + PVCNamespace: "", + PVName: "", + BackupMethod: volume.PodVolumeBackup, + PVBInfo: volume.PodVolumeBackupInfo{ + PodName: "testPod", + PodNamespace: "velero", + }, + }, + }, + expectedErrs: []error{}, + }, + { + name: "PVB's volume has a PVC", + pvMap: map[string]backup.PvcPvInfo{ + "velero/testPVC": { + PVCName: "testPVC", + PVCNamespace: "velero", + PV: corev1api.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testPV", + Labels: map[string]string{"a": "b"}, + }, + Spec: corev1api.PersistentVolumeSpec{ + PersistentVolumeReclaimPolicy: corev1api.PersistentVolumeReclaimDelete, + }, + }, + }, + }, + pvb: builder.ForPodVolumeBackup("velero", "testPVB").PodName("testPod").PodNamespace("velero").Result(), + pod: builder.ForPod("velero", "testPod").Containers(&corev1api.Container{ + Name: "test", + VolumeMounts: []corev1api.VolumeMount{ + { + Name: "testVolume", + MountPath: "/data", + }, + }, + }).Volumes( + &corev1api.Volume{ + Name: "", + VolumeSource: corev1api.VolumeSource{ + PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{ + ClaimName: "testPVC", + }, + }, + }, + ).Result(), + expectedVolumeInfos: []volume.VolumeInfo{ + { + PVCName: "testPVC", + PVCNamespace: "velero", + PVName: "testPV", + BackupMethod: volume.PodVolumeBackup, + PVBInfo: volume.PodVolumeBackupInfo{ + PodName: "testPod", + PodNamespace: "velero", + }, + PVInfo: volume.PVInfo{ + ReclaimPolicy: string(corev1api.PersistentVolumeReclaimDelete), + Labels: map[string]string{"a": "b"}, + }, + }, + }, + expectedErrs: []error{}, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + crClient := velerotest.NewFakeControllerRuntimeClient(t) + logger := logging.DefaultLogger(logrus.DebugLevel, logging.FormatJSON) + request := new(pkgbackup.Request) + request.PodVolumeBackups = append(request.PodVolumeBackups, tc.pvb) + if tc.pvMap != nil { + request.PVMap = tc.pvMap + } + if tc.pod != nil { + require.NoError(t, crClient.Create(context.TODO(), tc.pod)) + } + + volumeInfos, errors := generateVolumeInfoFromPVB(*request, crClient, logger) + require.Equal(t, tc.expectedErrs, errors) + require.Equal(t, tc.expectedVolumeInfos, volumeInfos) + }) + } +} diff --git a/pkg/controller/backup_finalizer_controller.go b/pkg/controller/backup_finalizer_controller.go index eb99f6ee53..ea9c0364b2 100644 --- a/pkg/controller/backup_finalizer_controller.go +++ b/pkg/controller/backup_finalizer_controller.go @@ -29,8 +29,6 @@ import ( ctrl "sigs.k8s.io/controller-runtime" kbclient "sigs.k8s.io/controller-runtime/pkg/client" - snapshotv1listers "github.com/kubernetes-csi/external-snapshotter/client/v4/listers/volumesnapshot/v1" - velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" pkgbackup "github.com/vmware-tanzu/velero/pkg/backup" "github.com/vmware-tanzu/velero/pkg/metrics" @@ -42,21 +40,21 @@ import ( // backupFinalizerReconciler reconciles a Backup object type backupFinalizerReconciler struct { - client kbclient.Client - volumeSnapshotLister snapshotv1listers.VolumeSnapshotLister - clock clocks.WithTickerAndDelayedExecution - backupper pkgbackup.Backupper - newPluginManager func(logrus.FieldLogger) clientmgmt.Manager - backupTracker BackupTracker - metrics *metrics.ServerMetrics - backupStoreGetter persistence.ObjectBackupStoreGetter - log logrus.FieldLogger + client kbclient.Client + globalCRClient kbclient.Client + clock clocks.WithTickerAndDelayedExecution + backupper pkgbackup.Backupper + newPluginManager func(logrus.FieldLogger) clientmgmt.Manager + backupTracker BackupTracker + metrics *metrics.ServerMetrics + backupStoreGetter persistence.ObjectBackupStoreGetter + log logrus.FieldLogger } // NewBackupFinalizerReconciler initializes and returns backupFinalizerReconciler struct. func NewBackupFinalizerReconciler( client kbclient.Client, - volumeSnapshotLister snapshotv1listers.VolumeSnapshotLister, + globalCRClient kbclient.Client, clock clocks.WithTickerAndDelayedExecution, backupper pkgbackup.Backupper, newPluginManager func(logrus.FieldLogger) clientmgmt.Manager, @@ -67,6 +65,7 @@ func NewBackupFinalizerReconciler( ) *backupFinalizerReconciler { return &backupFinalizerReconciler{ client: client, + globalCRClient: globalCRClient, clock: clock, backupper: backupper, newPluginManager: newPluginManager, @@ -191,7 +190,7 @@ func (r *backupFinalizerReconciler) Reconcile(ctx context.Context, req ctrl.Requ backup.Status.CompletionTimestamp = &metav1.Time{Time: r.clock.Now()} recordBackupMetrics(log, backup, outBackupFile, r.metrics, true) - pkgbackup.UpdateBackupCSISnapshotsStatus(r.client, r.volumeSnapshotLister, backup, log) + pkgbackup.UpdateBackupCSISnapshotsStatus(r.client, r.globalCRClient, backup, log) // update backup metadata in object store backupJSON := new(bytes.Buffer) if err := encode.To(backup, "json", backupJSON); err != nil { diff --git a/pkg/controller/backup_finalizer_controller_test.go b/pkg/controller/backup_finalizer_controller_test.go index f759d03187..74f6da57c5 100644 --- a/pkg/controller/backup_finalizer_controller_test.go +++ b/pkg/controller/backup_finalizer_controller_test.go @@ -23,7 +23,6 @@ import ( "testing" "time" - snapshotv1listers "github.com/kubernetes-csi/external-snapshotter/client/v4/listers/volumesnapshot/v1" "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -44,14 +43,13 @@ import ( "github.com/vmware-tanzu/velero/pkg/plugin/framework" "github.com/vmware-tanzu/velero/pkg/plugin/velero" velerotest "github.com/vmware-tanzu/velero/pkg/test" - velerotestmocks "github.com/vmware-tanzu/velero/pkg/test/mocks" ) -func mockBackupFinalizerReconciler(fakeClient kbclient.Client, fakeVolumeSnapshotLister snapshotv1listers.VolumeSnapshotLister, fakeClock *testclocks.FakeClock) (*backupFinalizerReconciler, *fakeBackupper) { +func mockBackupFinalizerReconciler(fakeClient kbclient.Client, fakeGlobalClient kbclient.Client, fakeClock *testclocks.FakeClock) (*backupFinalizerReconciler, *fakeBackupper) { backupper := new(fakeBackupper) return NewBackupFinalizerReconciler( fakeClient, - fakeVolumeSnapshotLister, + fakeGlobalClient, fakeClock, backupper, func(logrus.FieldLogger) clientmgmt.Manager { return pluginManager }, @@ -164,9 +162,9 @@ func TestBackupFinalizerReconcile(t *testing.T) { fakeClient := velerotest.NewFakeControllerRuntimeClient(t, initObjs...) - fakeVolumeSnapshotLister := velerotestmocks.NewVolumeSnapshotLister(t) + fakeGlobalClient := velerotest.NewFakeControllerRuntimeClient(t, initObjs...) - reconciler, backupper := mockBackupFinalizerReconciler(fakeClient, fakeVolumeSnapshotLister, fakeClock) + reconciler, backupper := mockBackupFinalizerReconciler(fakeClient, fakeGlobalClient, fakeClock) pluginManager.On("CleanupClients").Return(nil) backupStore.On("GetBackupItemOperations", test.backup.Name).Return(test.backupOperations, nil) backupStore.On("GetBackupContents", mock.Anything).Return(io.NopCloser(bytes.NewReader([]byte("hello world"))), nil) diff --git a/pkg/volume/volume_info_common.go b/pkg/volume/volume_info_common.go index cfca31df9b..dda43822a1 100644 --- a/pkg/volume/volume_info_common.go +++ b/pkg/volume/volume_info_common.go @@ -69,6 +69,7 @@ type VolumeInfo struct { SnapshotDataMovementInfo SnapshotDataMovementInfo `json:"snapshotDataMovementInfo,omitempty"` NativeSnapshotInfo NativeSnapshotInfo `json:"nativeSnapshotInfo,omitempty"` PVBInfo PodVolumeBackupInfo `json:"pvbInfo,omitempty"` + PVInfo PVInfo `json:"pvInfo,omitempty"` } // CSISnapshotInfo is used for displaying the CSI snapshot status @@ -139,9 +140,22 @@ type PodVolumeBackupInfo struct { // https://github.com/kubernetes/kubernetes/blob/e4b74dd12fa8cb63c174091d5536a10b8ec19d34/pkg/apis/core/types.go#L48 VolumeName string `json:"volumeName"` - // The Pod name mounting this PVC. The format should be /. + // The Pod name mounting this PVC. PodName string `json:"podName"` + // The Pod namespace + PodNamespace string `json:"podNamespace"` + // The PVB-taken k8s node's name. NodeName string `json:"nodeName"` } + +// PVInfo is used to store some PV information modified after creation. +// Those information are lost after PV recreation. +type PVInfo struct { + // ReclaimPolicy of PV. It could be different from the referenced StorageClass. + ReclaimPolicy string `json:"reclaimPolicy"` + + // The PV's labels should be kept after recreation. + Labels map[string]string `json:"labels"` +}