From 532d9d6c390374d82861010473bf1cc3c11fe5cb Mon Sep 17 00:00:00 2001 From: nfyxhan Date: Fri, 12 Jul 2024 14:04:27 +0800 Subject: [PATCH] fix: Resource status restore doesn't work for resources without status subresource #7993 Signed-off-by: nfyxhan --- go.mod | 1 + go.sum | 2 ++ pkg/restore/restore.go | 71 +++++++++++++++++++++++++++++++++++++++--- 3 files changed, 70 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 03c275c13d..9343a6f23e 100644 --- a/go.mod +++ b/go.mod @@ -19,6 +19,7 @@ require ( github.com/bombsimon/logrusr/v3 v3.0.0 github.com/evanphx/json-patch/v5 v5.8.0 github.com/fatih/color v1.16.0 + github.com/gertd/go-pluralize v0.2.1 github.com/gobwas/glob v0.2.3 github.com/golang/protobuf v1.5.4 github.com/google/go-cmp v0.6.0 diff --git a/go.sum b/go.sum index 84f4f45a3d..b288a325dc 100644 --- a/go.sum +++ b/go.sum @@ -240,6 +240,8 @@ github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4 github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/gertd/go-pluralize v0.2.1 h1:M3uASbVjMnTsPb0PNqg+E/24Vwigyo/tvyMTtAlLgiA= +github.com/gertd/go-pluralize v0.2.1/go.mod h1:rbYaKDbsXxmRfr8uygAEKhOWsjyrrqrkHVpZvoOp8zk= github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= diff --git a/pkg/restore/restore.go b/pkg/restore/restore.go index 63c26538ba..3008c31014 100644 --- a/pkg/restore/restore.go +++ b/pkg/restore/restore.go @@ -29,11 +29,13 @@ import ( "sync" "time" + "github.com/gertd/go-pluralize" "github.com/google/uuid" snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" + apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -1079,6 +1081,60 @@ func (ctx *restoreContext) getResource(groupResource schema.GroupResource, obj * return u, nil } +var pluralizeClient = pluralize.NewClient() + +func (ctx *restoreContext) getSubResourceStatusEnabled(obj *unstructured.Unstructured, groupResource schema.GroupResource) (bool, error) { + var result = true + crdGVR := schema.GroupVersionResource{ + Group: "apiextensions.k8s.io", + Resource: "customresourcedefinitions", + Version: "v1", + } + crdUnstructured := &unstructured.Unstructured{} + crdUnstructured.SetAPIVersion(crdGVR.GroupVersion().String()) + crdUnstructured.SetKind(crdGVR.Resource) + crdGR := crdGVR.GroupResource() + resourceClient, err := ctx.getResourceClient(crdGR, crdUnstructured, "") + + name := fmt.Sprintf("%s.%s", pluralizeClient.Plural(strings.ToLower(obj.GetKind())), obj.GetObjectKind().GroupVersionKind().Group) + available, err := ctx.crdAvailable(name, resourceClient) + if err != nil { + ctx.log.Errorf("check crd available name=%s err=%s", name, err) + return result, err + } + if !available { + return result, nil + } + + if !ctx.disableInformerCache { + crdUnstructured, err = ctx.getResource(crdGR, crdUnstructured, "", name) + } else { + crdUnstructured, err = resourceClient.Get(name, metav1.GetOptions{}) + } + if err != nil { + ctx.log.Errorf("get crd resource failed disableInformerCache=%v err=%s", ctx.disableInformerCache, err) + return result, err + } + data, err := crdUnstructured.MarshalJSON() + if err != nil { + return result, err + } + crd := &apiextv1.CustomResourceDefinition{} + if err := json.Unmarshal(data, crd); err != nil { + return result, err + } + for _, v := range crd.Spec.Versions { + if v.Name != obj.GroupVersionKind().Version { + continue + } + subresource := v.Subresources + if subresource == nil || subresource.Status == nil { + return false, nil + } + } + return result, nil +} + func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupResource schema.GroupResource, namespace string) (results.Result, results.Result, bool) { warnings, errs := results.Result{}, results.Result{} // itemExists bool is used to determine whether to include this item in the "wait for additional items" list @@ -1294,9 +1350,14 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso } } + statusEnabled, err := ctx.getSubResourceStatusEnabled(obj, groupResource) + if err != nil { + ctx.log.Infof("check subResource status is enabled: %s", err) + // TODO: + } objStatus, statusFieldExists, statusFieldErr := unstructured.NestedFieldCopy(obj.Object, "status") // Clear out non-core metadata fields and status. - if obj, err = resetMetadataAndStatus(obj); err != nil { + if obj, err = resetMetadataAndStatus(obj, statusEnabled); err != nil { errs.Add(namespace, err) return warnings, errs, itemExists } @@ -1535,7 +1596,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso itemStatus.itemExists = itemExists ctx.restoredItems[itemKey] = itemStatus // Remove insubstantial metadata. - fromCluster, err = resetMetadataAndStatus(fromCluster) + fromCluster, err = resetMetadataAndStatus(fromCluster, statusEnabled) if err != nil { ctx.log.Infof("Error trying to reset metadata for %s: %v", kube.NamespaceAndName(obj), err) warnings.Add(namespace, err) @@ -2080,12 +2141,14 @@ func resetStatus(obj *unstructured.Unstructured) { unstructured.RemoveNestedField(obj.UnstructuredContent(), "status") } -func resetMetadataAndStatus(obj *unstructured.Unstructured) (*unstructured.Unstructured, error) { +func resetMetadataAndStatus(obj *unstructured.Unstructured, statusEnabled bool) (*unstructured.Unstructured, error) { _, err := resetMetadata(obj) if err != nil { return nil, err } - resetStatus(obj) + if statusEnabled { + resetStatus(obj) + } return obj, nil }