diff --git a/pkg/gitops-engine/pkg/utils/kube/convert.go b/pkg/gitops-engine/pkg/utils/kube/convert.go new file mode 100644 index 0000000..826c384 --- /dev/null +++ b/pkg/gitops-engine/pkg/utils/kube/convert.go @@ -0,0 +1,26 @@ +package kube + +import ( + "github.com/argoproj/gitops-engine/pkg/utils/kube/scheme" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func convertToVersionWithScheme(obj *unstructured.Unstructured, group string, version string) (*unstructured.Unstructured, error) { + s := scheme.Scheme + object, err := s.ConvertToVersion(obj, runtime.InternalGroupVersioner) + if err != nil { + return nil, err + } + unmarshalledObj, err := s.ConvertToVersion(object, schema.GroupVersion{Group: group, Version: version}) + if err != nil { + return nil, err + } + unstrBody, err := runtime.DefaultUnstructuredConverter.ToUnstructured(unmarshalledObj) + if err != nil { + return nil, err + } + return &unstructured.Unstructured{Object: unstrBody}, nil +} diff --git a/pkg/gitops-engine/pkg/utils/kube/ctl.go b/pkg/gitops-engine/pkg/utils/kube/ctl.go new file mode 100644 index 0000000..3f494b3 --- /dev/null +++ b/pkg/gitops-engine/pkg/utils/kube/ctl.go @@ -0,0 +1,350 @@ +package kube + +import ( + "context" + "fmt" + "os" + "strings" + + "github.com/go-logr/logr" + "golang.org/x/sync/errgroup" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/managedfields" + "k8s.io/client-go/discovery" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/rest" + "k8s.io/kube-openapi/pkg/util/proto" + "k8s.io/kubectl/pkg/util/openapi" + + utils "github.com/argoproj/gitops-engine/pkg/utils/io" + "github.com/argoproj/gitops-engine/pkg/utils/tracing" +) + +type CleanupFunc func() + +type OnKubectlRunFunc func(command string) (CleanupFunc, error) + +type Kubectl interface { + ManageResources(config *rest.Config, openAPISchema openapi.Resources) (ResourceOperations, func(), error) + LoadOpenAPISchema(config *rest.Config) (openapi.Resources, *managedfields.GvkParser, error) + ConvertToVersion(obj *unstructured.Unstructured, group, version string) (*unstructured.Unstructured, error) + DeleteResource(ctx context.Context, config *rest.Config, gvk schema.GroupVersionKind, name string, namespace string, deleteOptions metav1.DeleteOptions) error + GetResource(ctx context.Context, config *rest.Config, gvk schema.GroupVersionKind, name string, namespace string) (*unstructured.Unstructured, error) + CreateResource(ctx context.Context, config *rest.Config, gvk schema.GroupVersionKind, name string, namespace string, obj *unstructured.Unstructured, createOptions metav1.CreateOptions, subresources ...string) (*unstructured.Unstructured, error) + PatchResource(ctx context.Context, config *rest.Config, gvk schema.GroupVersionKind, name string, namespace string, patchType types.PatchType, patchBytes []byte, subresources ...string) (*unstructured.Unstructured, error) + GetAPIResources(config *rest.Config, preferred bool, resourceFilter ResourceFilter) ([]APIResourceInfo, error) + GetServerVersion(config *rest.Config) (string, error) + NewDynamicClient(config *rest.Config) (dynamic.Interface, error) + SetOnKubectlRun(onKubectlRun OnKubectlRunFunc) +} + +type KubectlCmd struct { + Log logr.Logger + Tracer tracing.Tracer + OnKubectlRun OnKubectlRunFunc +} + +type APIResourceInfo struct { + GroupKind schema.GroupKind + Meta metav1.APIResource + GroupVersionResource schema.GroupVersionResource +} + +type filterFunc func(apiResource *metav1.APIResource) bool + +func (k *KubectlCmd) filterAPIResources(config *rest.Config, preferred bool, resourceFilter ResourceFilter, filter filterFunc) ([]APIResourceInfo, error) { + disco, err := discovery.NewDiscoveryClientForConfig(config) + if err != nil { + return nil, err + } + + var serverResources []*metav1.APIResourceList + if preferred { + serverResources, err = disco.ServerPreferredResources() + } else { + _, serverResources, err = disco.ServerGroupsAndResources() + } + + if err != nil { + if len(serverResources) == 0 { + return nil, err + } + k.Log.Error(err, "Partial success when performing preferred resource discovery") + } + apiResIfs := make([]APIResourceInfo, 0) + for _, apiResourcesList := range serverResources { + gv, err := schema.ParseGroupVersion(apiResourcesList.GroupVersion) + if err != nil { + gv = schema.GroupVersion{} + } + for _, apiResource := range apiResourcesList.APIResources { + + if resourceFilter.IsExcludedResource(gv.Group, apiResource.Kind, config.Host) { + continue + } + + if filter(&apiResource) { + resource := ToGroupVersionResource(apiResourcesList.GroupVersion, &apiResource) + gv, err := schema.ParseGroupVersion(apiResourcesList.GroupVersion) + if err != nil { + return nil, err + } + apiResIf := APIResourceInfo{ + GroupKind: schema.GroupKind{Group: gv.Group, Kind: apiResource.Kind}, + Meta: apiResource, + GroupVersionResource: resource, + } + apiResIfs = append(apiResIfs, apiResIf) + } + } + } + return apiResIfs, nil +} + +// isSupportedVerb returns whether or not a APIResource supports a specific verb. +// The verb will be matched case-insensitive. +func isSupportedVerb(apiResource *metav1.APIResource, verb string) bool { + if verb == "" || verb == "*" { + return true + } + for _, v := range apiResource.Verbs { + if strings.EqualFold(v, verb) { + return true + } + } + return false +} + +// LoadOpenAPISchema will load all existing resource schemas from the cluster +// and return: +// - openapi.Resources: used for getting the proto.Schema from a GVK +// - managedfields.GvkParser: used for building a ParseableType to be used in +// structured-merge-diffs +func (k *KubectlCmd) LoadOpenAPISchema(config *rest.Config) (openapi.Resources, *managedfields.GvkParser, error) { + disco, err := discovery.NewDiscoveryClientForConfig(config) + if err != nil { + return nil, nil, err + } + + oapiGetter := openapi.NewOpenAPIGetter(disco) + oapiResources, err := openapi.NewOpenAPIParser(oapiGetter).Parse() + if err != nil { + return nil, nil, fmt.Errorf("error getting openapi resources: %s", err) + } + gvkParser, err := k.newGVKParser(oapiGetter) + if err != nil { + return oapiResources, nil, fmt.Errorf("error getting gvk parser: %s", err) + } + return oapiResources, gvkParser, nil +} + +func (k *KubectlCmd) newGVKParser(oapiGetter discovery.OpenAPISchemaInterface) (*managedfields.GvkParser, error) { + doc, err := oapiGetter.OpenAPISchema() + if err != nil { + return nil, fmt.Errorf("error getting openapi schema: %s", err) + } + models, err := proto.NewOpenAPIData(doc) + if err != nil { + return nil, fmt.Errorf("error getting openapi data: %s", err) + } + var taintedGVKs []schema.GroupVersionKind + models, taintedGVKs = newUniqueModels(models) + if len(taintedGVKs) > 0 { + k.Log.Info("Duplicate GVKs detected in OpenAPI schema. This could cause inaccurate diffs.", "gvks", taintedGVKs) + } + gvkParser, err := managedfields.NewGVKParser(models, false) + if err != nil { + return nil, err + } + return gvkParser, nil +} + +func (k *KubectlCmd) GetAPIResources(config *rest.Config, preferred bool, resourceFilter ResourceFilter) ([]APIResourceInfo, error) { + span := k.Tracer.StartSpan("GetAPIResources") + defer span.Finish() + apiResIfs, err := k.filterAPIResources(config, preferred, resourceFilter, func(apiResource *metav1.APIResource) bool { + return isSupportedVerb(apiResource, listVerb) && isSupportedVerb(apiResource, watchVerb) + }) + if err != nil { + return nil, err + } + return apiResIfs, err +} + +// GetResource returns resource +func (k *KubectlCmd) GetResource(ctx context.Context, config *rest.Config, gvk schema.GroupVersionKind, name string, namespace string) (*unstructured.Unstructured, error) { + span := k.Tracer.StartSpan("GetResource") + span.SetBaggageItem("kind", gvk.Kind) + span.SetBaggageItem("name", name) + defer span.Finish() + dynamicIf, err := dynamic.NewForConfig(config) + if err != nil { + return nil, err + } + disco, err := discovery.NewDiscoveryClientForConfig(config) + if err != nil { + return nil, err + } + apiResource, err := ServerResourceForGroupVersionKind(disco, gvk, "get") + if err != nil { + return nil, err + } + resource := gvk.GroupVersion().WithResource(apiResource.Name) + resourceIf := ToResourceInterface(dynamicIf, apiResource, resource, namespace) + return resourceIf.Get(ctx, name, metav1.GetOptions{}) +} + +// CreateResource creates resource +func (k *KubectlCmd) CreateResource(ctx context.Context, config *rest.Config, gvk schema.GroupVersionKind, name string, namespace string, obj *unstructured.Unstructured, createOptions metav1.CreateOptions, subresources ...string) (*unstructured.Unstructured, error) { + span := k.Tracer.StartSpan("CreateResource") + span.SetBaggageItem("kind", gvk.Kind) + span.SetBaggageItem("name", name) + defer span.Finish() + dynamicIf, err := dynamic.NewForConfig(config) + if err != nil { + return nil, err + } + disco, err := discovery.NewDiscoveryClientForConfig(config) + if err != nil { + return nil, err + } + apiResource, err := ServerResourceForGroupVersionKind(disco, gvk, "create") + if err != nil { + return nil, err + } + resource := gvk.GroupVersion().WithResource(apiResource.Name) + resourceIf := ToResourceInterface(dynamicIf, apiResource, resource, namespace) + return resourceIf.Create(ctx, obj, createOptions, subresources...) +} + +// PatchResource patches resource +func (k *KubectlCmd) PatchResource(ctx context.Context, config *rest.Config, gvk schema.GroupVersionKind, name string, namespace string, patchType types.PatchType, patchBytes []byte, subresources ...string) (*unstructured.Unstructured, error) { + span := k.Tracer.StartSpan("PatchResource") + span.SetBaggageItem("kind", gvk.Kind) + span.SetBaggageItem("name", name) + defer span.Finish() + dynamicIf, err := dynamic.NewForConfig(config) + if err != nil { + return nil, err + } + disco, err := discovery.NewDiscoveryClientForConfig(config) + if err != nil { + return nil, err + } + apiResource, err := ServerResourceForGroupVersionKind(disco, gvk, "patch") + if err != nil { + return nil, err + } + resource := gvk.GroupVersion().WithResource(apiResource.Name) + resourceIf := ToResourceInterface(dynamicIf, apiResource, resource, namespace) + return resourceIf.Patch(ctx, name, patchType, patchBytes, metav1.PatchOptions{}, subresources...) +} + +// DeleteResource deletes resource +func (k *KubectlCmd) DeleteResource(ctx context.Context, config *rest.Config, gvk schema.GroupVersionKind, name string, namespace string, deleteOptions metav1.DeleteOptions) error { + span := k.Tracer.StartSpan("DeleteResource") + span.SetBaggageItem("kind", gvk.Kind) + span.SetBaggageItem("name", name) + defer span.Finish() + dynamicIf, err := dynamic.NewForConfig(config) + if err != nil { + return err + } + disco, err := discovery.NewDiscoveryClientForConfig(config) + if err != nil { + return err + } + apiResource, err := ServerResourceForGroupVersionKind(disco, gvk, "delete") + if err != nil { + return err + } + resource := gvk.GroupVersion().WithResource(apiResource.Name) + resourceIf := ToResourceInterface(dynamicIf, apiResource, resource, namespace) + + if deleteOptions.PropagationPolicy == nil { + propagationPolicy := metav1.DeletePropagationForeground + deleteOptions = metav1.DeleteOptions{PropagationPolicy: &propagationPolicy} + } + return resourceIf.Delete(ctx, name, deleteOptions) +} + +func (k *KubectlCmd) ManageResources(config *rest.Config, openAPISchema openapi.Resources) (ResourceOperations, func(), error) { + f, err := os.CreateTemp(utils.TempDir, "") + if err != nil { + return nil, nil, fmt.Errorf("failed to generate temp file for kubeconfig: %v", err) + } + _ = f.Close() + err = WriteKubeConfig(config, "", f.Name()) + if err != nil { + utils.DeleteFile(f.Name()) + return nil, nil, fmt.Errorf("failed to write kubeconfig: %v", err) + } + fact := kubeCmdFactory(f.Name(), "", config) + cleanup := func() { + utils.DeleteFile(f.Name()) + } + return &kubectlResourceOperations{ + config: config, + fact: fact, + openAPISchema: openAPISchema, + tracer: k.Tracer, + log: k.Log, + onKubectlRun: k.OnKubectlRun, + }, cleanup, nil +} + +// ConvertToVersion converts an unstructured object into the specified group/version +func (k *KubectlCmd) ConvertToVersion(obj *unstructured.Unstructured, group string, version string) (*unstructured.Unstructured, error) { + span := k.Tracer.StartSpan("ConvertToVersion") + from := obj.GroupVersionKind().GroupVersion() + span.SetBaggageItem("from", from.String()) + span.SetBaggageItem("to", schema.GroupVersion{Group: group, Version: version}.String()) + defer span.Finish() + if from.Group == group && from.Version == version { + return obj.DeepCopy(), nil + } + return convertToVersionWithScheme(obj, group, version) +} + +func (k *KubectlCmd) GetServerVersion(config *rest.Config) (string, error) { + span := k.Tracer.StartSpan("GetServerVersion") + defer span.Finish() + client, err := discovery.NewDiscoveryClientForConfig(config) + if err != nil { + return "", err + } + v, err := client.ServerVersion() + if err != nil { + return "", err + } + return fmt.Sprintf("%s.%s", v.Major, v.Minor), nil +} + +func (k *KubectlCmd) NewDynamicClient(config *rest.Config) (dynamic.Interface, error) { + return dynamic.NewForConfig(config) +} + +func (k *KubectlCmd) SetOnKubectlRun(onKubectlRun OnKubectlRunFunc) { + k.OnKubectlRun = onKubectlRun +} + +func RunAllAsync(count int, action func(i int) error) error { + g, ctx := errgroup.WithContext(context.Background()) +loop: + for i := 0; i < count; i++ { + index := i + g.Go(func() error { + return action(index) + }) + select { + case <-ctx.Done(): + // Something went wrong already, stop spawning tasks. + break loop + default: + } + } + return g.Wait() +} diff --git a/pkg/gitops-engine/pkg/utils/kube/kube.go b/pkg/gitops-engine/pkg/utils/kube/kube.go new file mode 100644 index 0000000..f88ed17 --- /dev/null +++ b/pkg/gitops-engine/pkg/utils/kube/kube.go @@ -0,0 +1,423 @@ +// Package kube provides helper utilities common for kubernetes +package kube + +import ( + "bytes" + "context" + "fmt" + "io" + "regexp" + "strings" + "time" + + "github.com/go-logr/logr" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierr "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/wait" + kubeyaml "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/discovery" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + "sigs.k8s.io/yaml" +) + +const ( + listVerb = "list" + watchVerb = "watch" +) + +const ( + SecretKind = "Secret" + ServiceKind = "Service" + ServiceAccountKind = "ServiceAccount" + EndpointsKind = "Endpoints" + DeploymentKind = "Deployment" + ReplicaSetKind = "ReplicaSet" + StatefulSetKind = "StatefulSet" + DaemonSetKind = "DaemonSet" + IngressKind = "Ingress" + JobKind = "Job" + PersistentVolumeClaimKind = "PersistentVolumeClaim" + CustomResourceDefinitionKind = "CustomResourceDefinition" + PodKind = "Pod" + APIServiceKind = "APIService" + NamespaceKind = "Namespace" + HorizontalPodAutoscalerKind = "HorizontalPodAutoscaler" +) + +type ResourceInfoProvider interface { + IsNamespaced(gk schema.GroupKind) (bool, error) +} + +func IsNamespacedOrUnknown(provider ResourceInfoProvider, gk schema.GroupKind) bool { + namespaced, err := provider.IsNamespaced(gk) + return namespaced || err != nil +} + +type ResourceKey struct { + Group string + Kind string + Namespace string + Name string +} + +func (k *ResourceKey) String() string { + return fmt.Sprintf("%s/%s/%s/%s", k.Group, k.Kind, k.Namespace, k.Name) +} + +func (k ResourceKey) GroupKind() schema.GroupKind { + return schema.GroupKind{Group: k.Group, Kind: k.Kind} +} + +func NewResourceKey(group string, kind string, namespace string, name string) ResourceKey { + return ResourceKey{Group: group, Kind: kind, Namespace: namespace, Name: name} +} + +func GetResourceKey(obj *unstructured.Unstructured) ResourceKey { + gvk := obj.GroupVersionKind() + return NewResourceKey(gvk.Group, gvk.Kind, obj.GetNamespace(), obj.GetName()) +} + +func GetObjectRef(obj *unstructured.Unstructured) v1.ObjectReference { + return v1.ObjectReference{ + UID: obj.GetUID(), + APIVersion: obj.GetAPIVersion(), + Kind: obj.GetKind(), + Name: obj.GetName(), + Namespace: obj.GetNamespace(), + } +} + +// TestConfig tests to make sure the REST config is usable +func TestConfig(config *rest.Config) error { + kubeclientset, err := kubernetes.NewForConfig(config) + if err != nil { + return fmt.Errorf("REST config invalid: %s", err) + } + _, err = kubeclientset.ServerVersion() + if err != nil { + return fmt.Errorf("REST config invalid: %s", err) + } + return nil +} + +// ToUnstructured converts a concrete K8s API type to a un unstructured object +func ToUnstructured(obj interface{}) (*unstructured.Unstructured, error) { + uObj, err := runtime.NewTestUnstructuredConverter(equality.Semantic).ToUnstructured(obj) + if err != nil { + return nil, err + } + return &unstructured.Unstructured{Object: uObj}, nil +} + +// MustToUnstructured converts a concrete K8s API type to a un unstructured object and panics if not successful +func MustToUnstructured(obj interface{}) *unstructured.Unstructured { + uObj, err := ToUnstructured(obj) + if err != nil { + panic(err) + } + return uObj +} + +// GetAppInstanceLabel returns the application instance name from labels +func GetAppInstanceLabel(un *unstructured.Unstructured, key string) string { + if labels := un.GetLabels(); labels != nil { + return labels[key] + } + return "" +} + +// UnsetLabel removes our app labels from an unstructured object +func UnsetLabel(target *unstructured.Unstructured, key string) { + if labels := target.GetLabels(); labels != nil { + if _, ok := labels[key]; ok { + delete(labels, key) + if len(labels) == 0 { + unstructured.RemoveNestedField(target.Object, "metadata", "labels") + } else { + target.SetLabels(labels) + } + } + } +} + +func ToGroupVersionResource(groupVersion string, apiResource *metav1.APIResource) schema.GroupVersionResource { + gvk := schema.FromAPIVersionAndKind(groupVersion, apiResource.Kind) + gv := gvk.GroupVersion() + return gv.WithResource(apiResource.Name) +} + +func ToResourceInterface(dynamicIf dynamic.Interface, apiResource *metav1.APIResource, resource schema.GroupVersionResource, namespace string) dynamic.ResourceInterface { + if apiResource.Namespaced { + return dynamicIf.Resource(resource).Namespace(namespace) + } + return dynamicIf.Resource(resource) +} + +func IsCRDGroupVersionKind(gvk schema.GroupVersionKind) bool { + return gvk.Kind == CustomResourceDefinitionKind && gvk.Group == "apiextensions.k8s.io" +} + +func IsCRD(obj *unstructured.Unstructured) bool { + return IsCRDGroupVersionKind(obj.GroupVersionKind()) +} + +// ServerResourceForGroupVersionKind looks up and returns the API resource from +// the server for a given GVK scheme. If verb is set to the non-empty string, +// it will return the API resource which supports the verb. There are some edge +// cases, where the same GVK is represented by more than one API. +// +// See: https://github.com/ksonnet/ksonnet/blob/master/utils/client.go +func ServerResourceForGroupVersionKind(disco discovery.DiscoveryInterface, gvk schema.GroupVersionKind, verb string) (*metav1.APIResource, error) { + // default is to return a not found for the requested resource + retErr := apierr.NewNotFound(schema.GroupResource{Group: gvk.Group, Resource: gvk.Kind}, "") + resources, err := disco.ServerResourcesForGroupVersion(gvk.GroupVersion().String()) + if err != nil { + return nil, err + } + for _, r := range resources.APIResources { + if r.Kind == gvk.Kind { + if isSupportedVerb(&r, verb) { + return &r, nil + } else { + // We have a match, but the API does not support the action + // that was requested. Memorize this. + retErr = apierr.NewMethodNotSupported(schema.GroupResource{Group: gvk.Group, Resource: gvk.Kind}, verb) + } + } + } + return nil, retErr +} + +var ( + kubectlErrOutRegexp = regexp.MustCompile(`^(error: )?(error validating|error when creating|error when creating) "\S+": `) + + // See ApplyOpts::Run() + // cmdutil.AddSourceToErr(fmt.Sprintf("applying patch:\n%s\nto:\n%v\nfor:", patchBytes, info), info.Source, err) + kubectlApplyPatchErrOutRegexp = regexp.MustCompile(`(?s)^error when applying patch:.*\nfor: "\S+": `) +) + +// cleanKubectlOutput makes the error output of kubectl a little better to read +func cleanKubectlOutput(s string) string { + s = strings.TrimSpace(s) + s = kubectlErrOutRegexp.ReplaceAllString(s, "") + s = kubectlApplyPatchErrOutRegexp.ReplaceAllString(s, "") + s = strings.Replace(s, "; if you choose to ignore these errors, turn validation off with --validate=false", "", -1) + return s +} + +// WriteKubeConfig takes a rest.Config and writes it as a kubeconfig at the specified path +func WriteKubeConfig(restConfig *rest.Config, namespace, filename string) error { + kubeConfig := NewKubeConfig(restConfig, namespace) + return clientcmd.WriteToFile(*kubeConfig, filename) +} + +// NewKubeConfig converts a clientcmdapi.Config (kubeconfig) from a rest.Config +func NewKubeConfig(restConfig *rest.Config, namespace string) *clientcmdapi.Config { + var proxyUrl string + if restConfig.Proxy != nil { + if u, err := restConfig.Proxy(nil); err == nil { + proxyUrl = u.String() + } + } + return &clientcmdapi.Config{ + CurrentContext: restConfig.Host, + Contexts: map[string]*clientcmdapi.Context{ + restConfig.Host: { + Cluster: restConfig.Host, + AuthInfo: restConfig.Host, + Namespace: namespace, + }, + }, + Clusters: map[string]*clientcmdapi.Cluster{ + restConfig.Host: { + Server: restConfig.Host, + TLSServerName: restConfig.TLSClientConfig.ServerName, + InsecureSkipTLSVerify: restConfig.TLSClientConfig.Insecure, + CertificateAuthority: restConfig.TLSClientConfig.CAFile, + CertificateAuthorityData: restConfig.TLSClientConfig.CAData, + ProxyURL: proxyUrl, + }, + }, + AuthInfos: map[string]*clientcmdapi.AuthInfo{ + restConfig.Host: newAuthInfo(restConfig), + }, + } +} + +// newAuthInfo returns an AuthInfo from a rest config, detecting if the rest.Config is an +// in-cluster config and automatically setting the token path appropriately. +func newAuthInfo(restConfig *rest.Config) *clientcmdapi.AuthInfo { + authInfo := clientcmdapi.AuthInfo{} + haveCredentials := false + if restConfig.TLSClientConfig.CertFile != "" { + authInfo.ClientCertificate = restConfig.TLSClientConfig.CertFile + haveCredentials = true + } + if len(restConfig.TLSClientConfig.CertData) > 0 { + authInfo.ClientCertificateData = restConfig.TLSClientConfig.CertData + haveCredentials = true + } + if restConfig.TLSClientConfig.KeyFile != "" { + authInfo.ClientKey = restConfig.TLSClientConfig.KeyFile + haveCredentials = true + } + if len(restConfig.TLSClientConfig.KeyData) > 0 { + authInfo.ClientKeyData = restConfig.TLSClientConfig.KeyData + haveCredentials = true + } + if restConfig.Username != "" { + authInfo.Username = restConfig.Username + haveCredentials = true + } + if restConfig.Password != "" { + authInfo.Password = restConfig.Password + haveCredentials = true + } + if restConfig.BearerToken != "" { + authInfo.Token = restConfig.BearerToken + haveCredentials = true + } + if restConfig.ExecProvider != nil { + authInfo.Exec = restConfig.ExecProvider + haveCredentials = true + } + if restConfig.ExecProvider == nil && !haveCredentials { + // If no credentials were set (or there was no exec provider), we assume in-cluster config. + // In-cluster configs from the go-client will no longer set bearer tokens, so we set the + // well known token path. See issue #774 + authInfo.TokenFile = "/var/run/secrets/kubernetes.io/serviceaccount/token" + } + return &authInfo +} + +// SplitYAML splits a YAML file into unstructured objects. Returns list of all unstructured objects +// found in the yaml. If an error occurs, returns objects that have been parsed so far too. +func SplitYAML(yamlData []byte) ([]*unstructured.Unstructured, error) { + var objs []*unstructured.Unstructured + ymls, err := SplitYAMLToString(yamlData) + if err != nil { + return nil, err + } + for _, yml := range ymls { + u := &unstructured.Unstructured{} + if err := yaml.Unmarshal([]byte(yml), u); err != nil { + return objs, fmt.Errorf("failed to unmarshal manifest: %v", err) + } + objs = append(objs, u) + } + return objs, nil +} + +// SplitYAMLToString splits a YAML file into strings. Returns list of yamls +// found in the yaml. If an error occurs, returns objects that have been parsed so far too. +func SplitYAMLToString(yamlData []byte) ([]string, error) { + // Similar way to what kubectl does + // https://github.com/kubernetes/cli-runtime/blob/master/pkg/resource/visitor.go#L573-L600 + // Ideally k8s.io/cli-runtime/pkg/resource.Builder should be used instead of this method. + // E.g. Builder does list unpacking and flattening and this code does not. + d := kubeyaml.NewYAMLOrJSONDecoder(bytes.NewReader(yamlData), 4096) + var objs []string + for { + ext := runtime.RawExtension{} + if err := d.Decode(&ext); err != nil { + if err == io.EOF { + break + } + return objs, fmt.Errorf("failed to unmarshal manifest: %v", err) + } + ext.Raw = bytes.TrimSpace(ext.Raw) + if len(ext.Raw) == 0 || bytes.Equal(ext.Raw, []byte("null")) { + continue + } + objs = append(objs, string(ext.Raw)) + } + return objs, nil +} + +// WatchWithRetry returns channel of watch events or errors of failed to call watch API. +func WatchWithRetry(ctx context.Context, getWatch func() (watch.Interface, error)) chan struct { + *watch.Event + Error error +} { + ch := make(chan struct { + *watch.Event + Error error + }) + execute := func() (bool, error) { + w, err := getWatch() + if err != nil { + return false, err + } + defer w.Stop() + + for { + select { + case event, ok := <-w.ResultChan(): + if ok { + ch <- struct { + *watch.Event + Error error + }{Event: &event, Error: nil} + } else { + return true, nil + } + case <-ctx.Done(): + return false, nil + } + } + } + go func() { + defer close(ch) + for { + retry, err := execute() + if err != nil { + ch <- struct { + *watch.Event + Error error + }{Error: err} + } + if !retry { + return + } + time.Sleep(time.Second) + } + }() + return ch +} + +func GetDeploymentReplicas(u *unstructured.Unstructured) *int64 { + val, found, err := unstructured.NestedInt64(u.Object, "spec", "replicas") + if !found || err != nil { + return nil + } + return &val +} + +// RetryUntilSucceed keep retrying given action with specified interval until action succeed or specified context is done. +func RetryUntilSucceed(ctx context.Context, interval time.Duration, desc string, log logr.Logger, action func() error) { + pollErr := wait.PollUntilContextCancel(ctx, interval, true, func(ctx context.Context) (bool /*done*/, error) { + log.V(1).Info(fmt.Sprintf("Start %s", desc)) + err := action() + if err == nil { + log.V(1).Info(fmt.Sprintf("Completed %s", desc)) + return true, nil + } + log.V(1).Info(fmt.Sprintf("Failed to %s: %+v, retrying in %v", desc, err, interval)) + return false, nil + }) + if pollErr != nil { + // The only error that can happen here is wait.ErrWaitTimeout if ctx is done. + log.V(1).Info(fmt.Sprintf("Stop retrying %s", desc)) + } +} diff --git a/pkg/gitops-engine/pkg/utils/kube/resource_filter.go b/pkg/gitops-engine/pkg/utils/kube/resource_filter.go new file mode 100644 index 0000000..2a33731 --- /dev/null +++ b/pkg/gitops-engine/pkg/utils/kube/resource_filter.go @@ -0,0 +1,5 @@ +package kube + +type ResourceFilter interface { + IsExcludedResource(group, kind, cluster string) bool +} diff --git a/pkg/gitops-engine/pkg/utils/kube/resource_ops.go b/pkg/gitops-engine/pkg/utils/kube/resource_ops.go new file mode 100644 index 0000000..40e63ac --- /dev/null +++ b/pkg/gitops-engine/pkg/utils/kube/resource_ops.go @@ -0,0 +1,513 @@ +package kube + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "os" + "strings" + + "github.com/go-logr/logr" + "github.com/spf13/cobra" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/printers" + "k8s.io/cli-runtime/pkg/resource" + "k8s.io/client-go/discovery" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/kubectl/pkg/cmd/auth" + "k8s.io/kubectl/pkg/cmd/create" + "k8s.io/kubectl/pkg/cmd/delete" + "k8s.io/kubectl/pkg/cmd/replace" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/scheme" + "k8s.io/kubectl/pkg/util/openapi" + + "github.com/argoproj/gitops-engine/pkg/diff" + "github.com/argoproj/gitops-engine/pkg/utils/io" + "github.com/argoproj/gitops-engine/pkg/utils/tracing" + + "github.com/zapier/kubechecks/pkg/kubectl/apply" +) + +// ResourceOperations provides methods to manage k8s resources +type ResourceOperations interface { + ApplyResource(ctx context.Context, obj *unstructured.Unstructured, dryRunStrategy cmdutil.DryRunStrategy, force, validate, serverSideApply bool, manager string, serverSideDiff bool) (string, error) + ReplaceResource(ctx context.Context, obj *unstructured.Unstructured, dryRunStrategy cmdutil.DryRunStrategy, force bool) (string, error) + CreateResource(ctx context.Context, obj *unstructured.Unstructured, dryRunStrategy cmdutil.DryRunStrategy, validate bool) (string, error) + UpdateResource(ctx context.Context, obj *unstructured.Unstructured, dryRunStrategy cmdutil.DryRunStrategy) (*unstructured.Unstructured, error) +} + +type kubectlResourceOperations struct { + config *rest.Config + log logr.Logger + tracer tracing.Tracer + onKubectlRun OnKubectlRunFunc + fact cmdutil.Factory + openAPISchema openapi.Resources +} + +type commandExecutor func(f cmdutil.Factory, ioStreams genericclioptions.IOStreams, fileName string) error + +func (k *kubectlResourceOperations) runResourceCommand(ctx context.Context, obj *unstructured.Unstructured, dryRunStrategy cmdutil.DryRunStrategy, serverSideDiff bool, executor commandExecutor) (string, error) { + manifestBytes, err := json.Marshal(obj) + if err != nil { + return "", err + } + manifestFile, err := os.CreateTemp(io.TempDir, "") + if err != nil { + return "", fmt.Errorf("Failed to generate temp file for manifest: %v", err) + } + defer io.DeleteFile(manifestFile.Name()) + if _, err = manifestFile.Write(manifestBytes); err != nil { + return "", fmt.Errorf("Failed to write manifest: %v", err) + } + if err = manifestFile.Close(); err != nil { + return "", fmt.Errorf("Failed to close manifest: %v", err) + } + + // log manifest + if k.log.V(1).Enabled() { + var obj unstructured.Unstructured + err := json.Unmarshal(manifestBytes, &obj) + if err != nil { + return "", err + } + redacted, _, err := diff.HideSecretData(&obj, nil) + if err != nil { + return "", err + } + redactedBytes, err := json.Marshal(redacted) + if err != nil { + return "", err + } + k.log.V(1).Info(string(redactedBytes)) + } + + var out []string + // rbac resouces are first applied with auth reconcile kubectl feature. + // serverSideDiff should avoid this step as the resources are not being actually + // applied but just running in dryrun mode. Also, kubectl auth reconcile doesn't + // currently support running dryrun in server mode. + if obj.GetAPIVersion() == "rbac.authorization.k8s.io/v1" && !serverSideDiff { + outReconcile, err := k.rbacReconcile(ctx, obj, manifestFile.Name(), dryRunStrategy) + if err != nil { + return "", fmt.Errorf("error running rbacReconcile: %s", err) + } + out = append(out, outReconcile) + // We still want to fallthrough and run `kubectl apply` in order set the + // last-applied-configuration annotation in the object. + } + + // Run kubectl apply + ioStreams := genericclioptions.IOStreams{ + In: &bytes.Buffer{}, + Out: &bytes.Buffer{}, + ErrOut: &bytes.Buffer{}, + } + err = executor(k.fact, ioStreams, manifestFile.Name()) + if err != nil { + return "", errors.New(cleanKubectlOutput(err.Error())) + } + if buf := strings.TrimSpace(ioStreams.Out.(*bytes.Buffer).String()); len(buf) > 0 { + out = append(out, buf) + } + if buf := strings.TrimSpace(ioStreams.ErrOut.(*bytes.Buffer).String()); len(buf) > 0 { + out = append(out, buf) + } + return strings.Join(out, ". "), nil +} + +// rbacReconcile will perform reconciliation for RBAC resources. It will run +// the following command: +// +// kubectl auth reconcile +// +// This is preferred over `kubectl apply`, which cannot tolerate changes in +// roleRef, which is an immutable field. +// See: https://github.com/kubernetes/kubernetes/issues/66353 +// `auth reconcile` will delete and recreate the resource if necessary +func (k *kubectlResourceOperations) rbacReconcile(ctx context.Context, obj *unstructured.Unstructured, fileName string, dryRunStrategy cmdutil.DryRunStrategy) (string, error) { + cleanup, err := k.processKubectlRun("auth") + if err != nil { + return "", fmt.Errorf("error processing kubectl run auth: %w", err) + } + defer cleanup() + outReconcile, err := k.authReconcile(ctx, obj, fileName, dryRunStrategy) + if err != nil { + return "", fmt.Errorf("error running kubectl auth reconcile: %w", err) + } + return outReconcile, nil +} + +func kubeCmdFactory(kubeconfig, ns string, config *rest.Config) cmdutil.Factory { + kubeConfigFlags := genericclioptions.NewConfigFlags(true) + if ns != "" { + kubeConfigFlags.Namespace = &ns + } + kubeConfigFlags.KubeConfig = &kubeconfig + kubeConfigFlags.WithDiscoveryBurst(config.Burst) + kubeConfigFlags.WithDiscoveryQPS(config.QPS) + kubeConfigFlags.Impersonate = &config.Impersonate.UserName + kubeConfigFlags.ImpersonateUID = &config.Impersonate.UID + kubeConfigFlags.ImpersonateGroup = &config.Impersonate.Groups + matchVersionKubeConfigFlags := cmdutil.NewMatchVersionFlags(kubeConfigFlags) + return cmdutil.NewFactory(matchVersionKubeConfigFlags) +} + +func (k *kubectlResourceOperations) ReplaceResource(ctx context.Context, obj *unstructured.Unstructured, dryRunStrategy cmdutil.DryRunStrategy, force bool) (string, error) { + span := k.tracer.StartSpan("ReplaceResource") + span.SetBaggageItem("kind", obj.GetKind()) + span.SetBaggageItem("name", obj.GetName()) + defer span.Finish() + k.log.Info(fmt.Sprintf("Replacing resource %s/%s in cluster: %s, namespace: %s", obj.GetKind(), obj.GetName(), k.config.Host, obj.GetNamespace())) + return k.runResourceCommand(ctx, obj, dryRunStrategy, false, func(f cmdutil.Factory, ioStreams genericclioptions.IOStreams, fileName string) error { + cleanup, err := k.processKubectlRun("replace") + if err != nil { + return err + } + defer cleanup() + + replaceOptions, err := k.newReplaceOptions(k.config, f, ioStreams, fileName, obj.GetNamespace(), force, dryRunStrategy) + if err != nil { + return err + } + return replaceOptions.Run(f) + }) +} + +func (k *kubectlResourceOperations) CreateResource(ctx context.Context, obj *unstructured.Unstructured, dryRunStrategy cmdutil.DryRunStrategy, validate bool) (string, error) { + gvk := obj.GroupVersionKind() + span := k.tracer.StartSpan("CreateResource") + span.SetBaggageItem("kind", gvk.Kind) + span.SetBaggageItem("name", obj.GetName()) + defer span.Finish() + return k.runResourceCommand(ctx, obj, dryRunStrategy, false, func(f cmdutil.Factory, ioStreams genericclioptions.IOStreams, fileName string) error { + cleanup, err := k.processKubectlRun("create") + if err != nil { + return err + } + defer cleanup() + + createOptions, err := k.newCreateOptions(ioStreams, fileName, dryRunStrategy) + if err != nil { + return err + } + command := &cobra.Command{} + saveConfig := false + command.Flags().BoolVar(&saveConfig, "save-config", false, "") + val := false + command.Flags().BoolVar(&val, "validate", false, "") + if validate { + _ = command.Flags().Set("validate", "true") + } + + return createOptions.RunCreate(f, command) + }) +} + +func (k *kubectlResourceOperations) UpdateResource(ctx context.Context, obj *unstructured.Unstructured, dryRunStrategy cmdutil.DryRunStrategy) (*unstructured.Unstructured, error) { + gvk := obj.GroupVersionKind() + span := k.tracer.StartSpan("UpdateResource") + span.SetBaggageItem("kind", gvk.Kind) + span.SetBaggageItem("name", obj.GetName()) + defer span.Finish() + dynamicIf, err := dynamic.NewForConfig(k.config) + if err != nil { + return nil, err + } + disco, err := discovery.NewDiscoveryClientForConfig(k.config) + if err != nil { + return nil, err + } + apiResource, err := ServerResourceForGroupVersionKind(disco, gvk, "update") + if err != nil { + return nil, err + } + resource := gvk.GroupVersion().WithResource(apiResource.Name) + resourceIf := ToResourceInterface(dynamicIf, apiResource, resource, obj.GetNamespace()) + + updateOptions := metav1.UpdateOptions{} + switch dryRunStrategy { + case cmdutil.DryRunClient, cmdutil.DryRunServer: + updateOptions.DryRun = []string{metav1.DryRunAll} + } + return resourceIf.Update(ctx, obj, updateOptions) +} + +// ApplyResource performs an apply of a unstructured resource +func (k *kubectlResourceOperations) ApplyResource(ctx context.Context, obj *unstructured.Unstructured, dryRunStrategy cmdutil.DryRunStrategy, force, validate, serverSideApply bool, manager string, serverSideDiff bool) (string, error) { + span := k.tracer.StartSpan("ApplyResource") + span.SetBaggageItem("kind", obj.GetKind()) + span.SetBaggageItem("name", obj.GetName()) + defer span.Finish() + k.log.WithValues( + "dry-run", [...]string{"none", "client", "server"}[dryRunStrategy], + "manager", manager, + "serverSideApply", serverSideApply, + "serverSideDiff", serverSideDiff).Info(fmt.Sprintf("Applying resource %s/%s in cluster: %s, namespace: %s", obj.GetKind(), obj.GetName(), k.config.Host, obj.GetNamespace())) + + return k.runResourceCommand(ctx, obj, dryRunStrategy, serverSideDiff, func(f cmdutil.Factory, ioStreams genericclioptions.IOStreams, fileName string) error { + cleanup, err := k.processKubectlRun("apply") + if err != nil { + return err + } + defer cleanup() + + applyOpts, err := k.newApplyOptions(ioStreams, obj, fileName, validate, force, serverSideApply, dryRunStrategy, manager, serverSideDiff) + if err != nil { + return err + } + return applyOpts.Run() + }) +} + +func (k *kubectlResourceOperations) newApplyOptions(ioStreams genericclioptions.IOStreams, obj *unstructured.Unstructured, fileName string, validate bool, force, serverSideApply bool, dryRunStrategy cmdutil.DryRunStrategy, manager string, serverSideDiff bool) (*apply.ApplyOptions, error) { + flags := apply.NewApplyFlags(ioStreams) + o := &apply.ApplyOptions{ + IOStreams: ioStreams, + VisitedUids: sets.Set[types.UID]{}, + VisitedNamespaces: sets.Set[string]{}, + Recorder: genericclioptions.NoopRecorder{}, + PrintFlags: flags.PrintFlags, + Overwrite: true, + OpenAPIPatch: true, + ServerSideApply: serverSideApply, + } + dynamicClient, err := dynamic.NewForConfig(k.config) + if err != nil { + return nil, err + } + o.DynamicClient = dynamicClient + o.DeleteOptions, err = delete.NewDeleteFlags("").ToOptions(dynamicClient, ioStreams) + if err != nil { + return nil, err + } + o.OpenAPIGetter = k.fact + o.DryRunStrategy = dryRunStrategy + o.FieldManager = manager + validateDirective := metav1.FieldValidationIgnore + if validate { + validateDirective = metav1.FieldValidationStrict + } + o.Validator, err = k.fact.Validator(validateDirective) + if err != nil { + return nil, err + } + o.Builder = k.fact.NewBuilder() + o.Mapper, err = k.fact.ToRESTMapper() + if err != nil { + return nil, err + } + + o.ToPrinter = func(operation string) (printers.ResourcePrinter, error) { + o.PrintFlags.NamePrintFlags.Operation = operation + switch o.DryRunStrategy { + case cmdutil.DryRunClient: + err = o.PrintFlags.Complete("%s (dry run)") + if err != nil { + return nil, err + } + case cmdutil.DryRunServer: + if serverSideDiff { + // managedFields are required by server-side diff to identify + // changes made by mutation webhooks. + o.PrintFlags.JSONYamlPrintFlags.ShowManagedFields = true + p, err := o.PrintFlags.JSONYamlPrintFlags.ToPrinter("json") + if err != nil { + return nil, fmt.Errorf("error configuring server-side diff printer: %w", err) + } + return p, nil + } else { + err = o.PrintFlags.Complete("%s (server dry run)") + if err != nil { + return nil, fmt.Errorf("error configuring server dryrun printer: %w", err) + } + } + } + return o.PrintFlags.ToPrinter() + } + o.DeleteOptions.FilenameOptions.Filenames = []string{fileName} + o.Namespace = obj.GetNamespace() + o.DeleteOptions.ForceDeletion = force + o.DryRunStrategy = dryRunStrategy + if manager != "" { + o.FieldManager = manager + } + if serverSideApply || serverSideDiff { + o.ForceConflicts = true + } + return o, nil +} + +func (k *kubectlResourceOperations) newCreateOptions(ioStreams genericclioptions.IOStreams, fileName string, dryRunStrategy cmdutil.DryRunStrategy) (*create.CreateOptions, error) { + o := create.NewCreateOptions(ioStreams) + + recorder, err := o.RecordFlags.ToRecorder() + if err != nil { + return nil, err + } + o.Recorder = recorder + + switch dryRunStrategy { + case cmdutil.DryRunClient: + err = o.PrintFlags.Complete("%s (dry run)") + if err != nil { + return nil, err + } + case cmdutil.DryRunServer: + err = o.PrintFlags.Complete("%s (server dry run)") + if err != nil { + return nil, err + } + } + o.DryRunStrategy = dryRunStrategy + + printer, err := o.PrintFlags.ToPrinter() + if err != nil { + return nil, err + } + o.PrintObj = func(obj runtime.Object) error { + return printer.PrintObj(obj, o.Out) + } + o.FilenameOptions.Filenames = []string{fileName} + return o, nil +} + +func (k *kubectlResourceOperations) newReplaceOptions(config *rest.Config, f cmdutil.Factory, ioStreams genericclioptions.IOStreams, fileName string, namespace string, force bool, dryRunStrategy cmdutil.DryRunStrategy) (*replace.ReplaceOptions, error) { + o := replace.NewReplaceOptions(ioStreams) + + recorder, err := o.RecordFlags.ToRecorder() + if err != nil { + return nil, err + } + o.Recorder = recorder + + dynamicClient, err := dynamic.NewForConfig(config) + if err != nil { + return nil, err + } + + o.DeleteOptions, err = o.DeleteFlags.ToOptions(dynamicClient, o.IOStreams) + if err != nil { + return nil, err + } + + o.Builder = func() *resource.Builder { + return f.NewBuilder() + } + + switch dryRunStrategy { + case cmdutil.DryRunClient: + err = o.PrintFlags.Complete("%s (dry run)") + if err != nil { + return nil, err + } + case cmdutil.DryRunServer: + err = o.PrintFlags.Complete("%s (server dry run)") + if err != nil { + return nil, err + } + } + o.DryRunStrategy = dryRunStrategy + + printer, err := o.PrintFlags.ToPrinter() + if err != nil { + return nil, err + } + o.PrintObj = func(obj runtime.Object) error { + return printer.PrintObj(obj, o.Out) + } + + o.DeleteOptions.FilenameOptions.Filenames = []string{fileName} + o.Namespace = namespace + o.DeleteOptions.ForceDeletion = force + return o, nil +} + +func newReconcileOptions(f cmdutil.Factory, kubeClient *kubernetes.Clientset, fileName string, ioStreams genericclioptions.IOStreams, namespace string, dryRunStrategy cmdutil.DryRunStrategy) (*auth.ReconcileOptions, error) { + o := auth.NewReconcileOptions(ioStreams) + o.RBACClient = kubeClient.RbacV1() + o.NamespaceClient = kubeClient.CoreV1() + o.FilenameOptions.Filenames = []string{fileName} + o.DryRun = dryRunStrategy != cmdutil.DryRunNone + + r := f.NewBuilder(). + WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...). + NamespaceParam(namespace).DefaultNamespace(). + FilenameParam(false, o.FilenameOptions). + Flatten(). + Do() + o.Visitor = r + + if o.DryRun { + err := o.PrintFlags.Complete("%s (dry run)") + if err != nil { + return nil, err + } + } + printer, err := o.PrintFlags.ToPrinter() + if err != nil { + return nil, err + } + o.PrintObject = printer.PrintObj + return o, nil +} + +func (k *kubectlResourceOperations) authReconcile(ctx context.Context, obj *unstructured.Unstructured, manifestFile string, dryRunStrategy cmdutil.DryRunStrategy) (string, error) { + kubeClient, err := kubernetes.NewForConfig(k.config) + if err != nil { + return "", err + } + // `kubectl auth reconcile` has a side effect of auto-creating namespaces if it doesn't exist. + // See: https://github.com/kubernetes/kubernetes/issues/71185. This is behavior which we do + // not want. We need to check if the namespace exists, before know if it is safe to run this + // command. Skip this for dryRuns. + if dryRunStrategy == cmdutil.DryRunNone && obj.GetNamespace() != "" { + _, err = kubeClient.CoreV1().Namespaces().Get(ctx, obj.GetNamespace(), metav1.GetOptions{}) + if err != nil { + return "", err + } + } + ioStreams := genericclioptions.IOStreams{ + In: &bytes.Buffer{}, + Out: &bytes.Buffer{}, + ErrOut: &bytes.Buffer{}, + } + reconcileOpts, err := newReconcileOptions(k.fact, kubeClient, manifestFile, ioStreams, obj.GetNamespace(), dryRunStrategy) + if err != nil { + return "", fmt.Errorf("error calling newReconcileOptions: %w", err) + } + err = reconcileOpts.Validate() + if err != nil { + return "", errors.New(cleanKubectlOutput(err.Error())) + } + err = reconcileOpts.RunReconcile() + if err != nil { + return "", errors.New(cleanKubectlOutput(err.Error())) + } + + var out []string + if buf := strings.TrimSpace(ioStreams.Out.(*bytes.Buffer).String()); len(buf) > 0 { + out = append(out, buf) + } + if buf := strings.TrimSpace(ioStreams.ErrOut.(*bytes.Buffer).String()); len(buf) > 0 { + out = append(out, buf) + } + return strings.Join(out, ". "), nil +} + +func (k *kubectlResourceOperations) processKubectlRun(cmd string) (CleanupFunc, error) { + if k.onKubectlRun != nil { + return k.onKubectlRun(cmd) + } + return func() {}, nil +} diff --git a/pkg/gitops-engine/pkg/utils/kube/uniqueprotomodels.go b/pkg/gitops-engine/pkg/utils/kube/uniqueprotomodels.go new file mode 100644 index 0000000..93013d4 --- /dev/null +++ b/pkg/gitops-engine/pkg/utils/kube/uniqueprotomodels.go @@ -0,0 +1,191 @@ +package kube + +import ( + "fmt" + "sort" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/kube-openapi/pkg/util/proto" +) + +/** +The upstream Kubernetes NewGVKParser method causes problems for Argo CD. +https://github.com/kubernetes/apimachinery/blob/eb26334eeb0f769be8f0c5665ff34713cfdec83e/pkg/util/managedfields/gvkparser.go#L73 + +The function fails in instances where it is probably more desirable for Argo CD to simply ignore the error and move on. +But since the upstream implementation doesn't offer the option to ignore the error, we have to mutate the input to the +function to completely avoid the case that can produce the error. + +When encountering the error from NewGVKParser, we used to just set the internal GVKParser instance to nil, log the +error as info, and move on. + +But Argo CD increasingly relies on the GVKParser to produce reliable diffs, especially with server-side diffing. And +we're better off with an incorrectly-initialized GVKParser than no GVKParser at all. + +To understand why NewGVKParser fails, we need to understand how Kubernetes constructs its OpenAPI models. + +Kubernetes contains a built-in OpenAPI document containing the `definitions` for every built-in Kubernetes API. This +document includes shared structs like APIResourceList. Some of these definitions include an +x-kubernetes-group-version-kind extension. + +Aggregated APIs produce their own OpenAPI documents, which are merged with the built-in OpenAPI document. The aggregated +API documents generally include all the definitions of all the structs which are used anywhere by the API. This often +includes some of the same structs as the built-in OpenAPI document. + +So when Kubernetes constructs the complete OpenAPI document (the one served at /openapi/v2), it merges the built-in +OpenAPI document with the aggregated API OpenAPI documents. + +When the aggregator encounters two different definitions for the same struct (as determined by a deep compare) with the +same GVK (as determined by the value in the x-kubernetes-group-version-kind extension), it appends a `_vX` suffix to the +definition name in the OpenAPI document (where X is the count of the number of times the aggregator has seen the same +definition). Basically, it's communicating "different APIs have different opinions about the structure of structs with +this GVK, so I'm going to give them different names and let you sort it out." +https://github.com/kubernetes/kube-openapi/blob/b456828f718bab62dc3013d192665eb3d17f8fe9/pkg/aggregator/aggregator.go#L238-L279 + +This behavior is fine from the perspective of a typical Kubernetes API user. They download the OpenAPI document, they +see that there are two different "opinions" about the structure of a struct, and they can choose which one they want to +rely on. + +But Argo CD has to be generic. We need to take the provided OpenAPI document and use it to construct a GVKParser. And +the GVKParser (reasonably) rejects the OpenAPI document if it contains two definitions for the same struct. + +So we have to do some work to make the OpenAPI document palatable to the GVKParser. We have to remove the duplicate +definitions. Specifically, we take the first one and log a warning for each subsequent definition with the same GVK. + +In practice, this probably generally appears when a common aggregated API was built at a time significantly before the +current Kubernetes version. The most common case is that the metrics server is built against an older version of the +Kubernetes libraries, using old versions of the structs. When the metrics server is updated to use the latest version of +the Kubernetes libraries, the problems go away, because the aggregated API and Kubernetes agree about the shape of the +struct. + +Using the first encountered definition is imperfect and could result in unreliable diffs. But it's better than +constructing completely-wrong diffs due to the lack of a GVKParser. +*/ + +// uniqueModels is a model provider that ensures that no two models share the same gvk. Use newUniqueModels to +// initialize it and enforce uniqueness. +type uniqueModels struct { + models map[string]proto.Schema +} + +// LookupModel is public through the interface of Models. It +// returns a visitable schema from the given model name. +// Copied verbatim from here: https://github.com/kubernetes/kube-openapi/blob/b456828f718bab62dc3013d192665eb3d17f8fe9/pkg/util/proto/document.go#L322-L326 +func (d *uniqueModels) LookupModel(model string) proto.Schema { + return d.models[model] +} + +// Copied verbatim from here: https://github.com/kubernetes/kube-openapi/blob/b456828f718bab62dc3013d192665eb3d17f8fe9/pkg/util/proto/document.go#L328-L337 +func (d *uniqueModels) ListModels() []string { + models := []string{} + + for model := range d.models { + models = append(models, model) + } + + sort.Strings(models) + return models +} + +// newUniqueModels returns a new uniqueModels instance and a list of warnings for models that share the same gvk. +func newUniqueModels(models proto.Models) (proto.Models, []schema.GroupVersionKind) { + var taintedGVKs []schema.GroupVersionKind + gvks := map[schema.GroupVersionKind]string{} + um := &uniqueModels{models: map[string]proto.Schema{}} + for _, modelName := range models.ListModels() { + model := models.LookupModel(modelName) + if model == nil { + panic(fmt.Sprintf("ListModels returns a model that can't be looked-up for: %v", modelName)) + } + gvkList := parseGroupVersionKind(model) + gvk, wasProcessed := modelGvkWasAlreadyProcessed(model, gvks) + if !wasProcessed { + um.models[modelName] = model + + // Add GVKs to the map, so we can detect a duplicate GVK later. + for _, gvk := range gvkList { + if len(gvk.Kind) > 0 { + gvks[gvk] = modelName + } + } + } else { + taintedGVKs = append(taintedGVKs, gvk) + } + } + return um, taintedGVKs +} + +// modelGvkWasAlreadyProcessed inspects a model to determine if it would trigger a duplicate GVK error. The gvks map +// holds the GVKs of all the models that have already been processed. If the model would trigger a duplicate GVK error, +// the function returns the GVK that would trigger the error and true. Otherwise, it returns an empty GVK and false. +func modelGvkWasAlreadyProcessed(model proto.Schema, gvks map[schema.GroupVersionKind]string) (schema.GroupVersionKind, bool) { + gvkList := parseGroupVersionKind(model) + // Not every model has a GVK extension specified. For those models, this loop will be skipped. + for _, gvk := range gvkList { + // The kind length check is copied from managedfields.NewGVKParser. It's unclear what edge case it's handling, + // but the behavior of this function should match NewGVKParser. + if len(gvk.Kind) > 0 { + _, ok := gvks[gvk] + if ok { + // This is the only condition under which NewGVKParser would return a duplicate GVK error. + return gvk, true + } + } + } + return schema.GroupVersionKind{}, false +} + +// groupVersionKindExtensionKey is the key used to lookup the +// GroupVersionKind value for an object definition from the +// definition's "extensions" map. +// Copied verbatim from: https://github.com/kubernetes/apimachinery/blob/eb26334eeb0f769be8f0c5665ff34713cfdec83e/pkg/util/managedfields/gvkparser.go#L29-L32 +const groupVersionKindExtensionKey = "x-kubernetes-group-version-kind" + +// parseGroupVersionKind gets and parses GroupVersionKind from the extension. Returns empty if it doesn't have one. +// Copied verbatim from: https://github.com/kubernetes/apimachinery/blob/eb26334eeb0f769be8f0c5665ff34713cfdec83e/pkg/util/managedfields/gvkparser.go#L82-L128 +func parseGroupVersionKind(s proto.Schema) []schema.GroupVersionKind { + extensions := s.GetExtensions() + + gvkListResult := []schema.GroupVersionKind{} + + // Get the extensions + gvkExtension, ok := extensions[groupVersionKindExtensionKey] + if !ok { + return []schema.GroupVersionKind{} + } + + // gvk extension must be a list of at least 1 element. + gvkList, ok := gvkExtension.([]interface{}) + if !ok { + return []schema.GroupVersionKind{} + } + + for _, gvk := range gvkList { + // gvk extension list must be a map with group, version, and + // kind fields + gvkMap, ok := gvk.(map[interface{}]interface{}) + if !ok { + continue + } + group, ok := gvkMap["group"].(string) + if !ok { + continue + } + version, ok := gvkMap["version"].(string) + if !ok { + continue + } + kind, ok := gvkMap["kind"].(string) + if !ok { + continue + } + + gvkListResult = append(gvkListResult, schema.GroupVersionKind{ + Group: group, + Version: version, + Kind: kind, + }) + } + + return gvkListResult +} diff --git a/pkg/kubectl/apply/apply.go b/pkg/kubectl/apply/apply.go new file mode 100644 index 0000000..6ea921e --- /dev/null +++ b/pkg/kubectl/apply/apply.go @@ -0,0 +1,1131 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apply + +import ( + "context" + "fmt" + "io" + "net/http" + + "github.com/spf13/cobra" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/genericiooptions" + "k8s.io/cli-runtime/pkg/printers" + "k8s.io/cli-runtime/pkg/resource" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/openapi3" + "k8s.io/client-go/util/csaupgrade" + "k8s.io/component-base/version" + "k8s.io/klog/v2" + cmddelete "k8s.io/kubectl/pkg/cmd/delete" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/scheme" + "k8s.io/kubectl/pkg/util" + "k8s.io/kubectl/pkg/util/i18n" + "k8s.io/kubectl/pkg/util/openapi" + "k8s.io/kubectl/pkg/util/prune" + "k8s.io/kubectl/pkg/util/templates" + "k8s.io/kubectl/pkg/validation" +) + +// ApplyFlags directly reflect the information that CLI is gathering via flags. They will be converted to Options, which +// reflect the runtime requirements for the command. This structure reduces the transformation to wiring and makes +// the logic itself easy to unit test +type ApplyFlags struct { + RecordFlags *genericclioptions.RecordFlags + PrintFlags *genericclioptions.PrintFlags + + DeleteFlags *cmddelete.DeleteFlags + + FieldManager string + Selector string + Prune bool + PruneResources []prune.Resource + ApplySetRef string + All bool + Overwrite bool + OpenAPIPatch bool + Subresource string + + PruneAllowlist []string + + genericiooptions.IOStreams +} + +// ApplyOptions defines flags and other configuration parameters for the `apply` command +type ApplyOptions struct { + Recorder genericclioptions.Recorder + + PrintFlags *genericclioptions.PrintFlags + ToPrinter func(string) (printers.ResourcePrinter, error) + + DeleteOptions *cmddelete.DeleteOptions + + ServerSideApply bool + ForceConflicts bool + FieldManager string + Selector string + DryRunStrategy cmdutil.DryRunStrategy + Prune bool + PruneResources []prune.Resource + cmdBaseName string + All bool + Overwrite bool + OpenAPIPatch bool + Subresource string + + ValidationDirective string + Validator validation.Schema + Builder *resource.Builder + Mapper meta.RESTMapper + DynamicClient dynamic.Interface + OpenAPIGetter openapi.OpenAPIResourcesGetter + OpenAPIV3Root openapi3.Root + + Namespace string + EnforceNamespace bool + + genericiooptions.IOStreams + + // Objects (and some denormalized data) which are to be + // applied. The standard way to fill in this structure + // is by calling "GetObjects()", which will use the + // resource builder if "objectsCached" is false. The other + // way to set this field is to use "SetObjects()". + // Subsequent calls to "GetObjects()" after setting would + // not call the resource builder; only return the set objects. + objects []*resource.Info + objectsCached bool + + // Stores visited objects/namespaces for later use + // calculating the set of objects to prune. + VisitedUids sets.Set[types.UID] + VisitedNamespaces sets.Set[string] + + // Function run after the objects are generated and + // stored in the "objects" field, but before the + // apply is run on these objects. + PreProcessorFn func() error + // Function run after all objects have been applied. + // The standard PostProcessorFn is "PrintAndPrunePostProcessor()". + PostProcessorFn func() error + + // ApplySet tracks the set of objects that have been applied, for the purposes of pruning. + // See git.k8s.io/enhancements/keps/sig-cli/3659-kubectl-apply-prune + ApplySet *ApplySet +} + +var ( + applyLong = templates.LongDesc(i18n.T(` + Apply a configuration to a resource by file name or stdin. + The resource name must be specified. This resource will be created if it doesn't exist yet. + To use 'apply', always create the resource initially with either 'apply' or 'create --save-config'. + + JSON and YAML formats are accepted. + + Alpha Disclaimer: the --prune functionality is not yet complete. Do not use unless you are aware of what the current state is. See https://issues.k8s.io/34274.`)) + + applyExample = templates.Examples(i18n.T(` + # Apply the configuration in pod.json to a pod + kubectl apply -f ./pod.json + + # Apply resources from a directory containing kustomization.yaml - e.g. dir/kustomization.yaml + kubectl apply -k dir/ + + # Apply the JSON passed into stdin to a pod + cat pod.json | kubectl apply -f - + + # Apply the configuration from all files that end with '.json' + kubectl apply -f '*.json' + + # Note: --prune is still in Alpha + # Apply the configuration in manifest.yaml that matches label app=nginx and delete all other resources that are not in the file and match label app=nginx + kubectl apply --prune -f manifest.yaml -l app=nginx + + # Apply the configuration in manifest.yaml and delete all the other config maps that are not in the file + kubectl apply --prune -f manifest.yaml --all --prune-allowlist=core/v1/ConfigMap`)) + + warningNoLastAppliedConfigAnnotation = "Warning: resource %[1]s is missing the %[2]s annotation which is required by %[3]s apply. %[3]s apply should only be used on resources created declaratively by either %[3]s create --save-config or %[3]s apply. The missing annotation will be patched automatically.\n" + warningChangesOnDeletingResource = "Warning: Detected changes to resource %[1]s which is currently being deleted.\n" + warningMigrationLastAppliedFailed = "Warning: failed to migrate kubectl.kubernetes.io/last-applied-configuration for Server-Side Apply. This is non-fatal and will be retried next time you apply. Error: %[1]s\n" + warningMigrationPatchFailed = "Warning: server rejected managed fields migration to Server-Side Apply. This is non-fatal and will be retried next time you apply. Error: %[1]s\n" + warningMigrationReapplyFailed = "Warning: failed to re-apply configuration after performing Server-Side Apply migration. This is non-fatal and will be retried next time you apply. Error: %[1]s\n" +) + +var ApplySetToolVersion = version.Get().GitVersion + +// NewApplyFlags returns a default ApplyFlags +func NewApplyFlags(streams genericiooptions.IOStreams) *ApplyFlags { + return &ApplyFlags{ + RecordFlags: genericclioptions.NewRecordFlags(), + DeleteFlags: cmddelete.NewDeleteFlags("The files that contain the configurations to apply."), + PrintFlags: genericclioptions.NewPrintFlags("created").WithTypeSetter(scheme.Scheme), + + Overwrite: true, + OpenAPIPatch: true, + + IOStreams: streams, + } +} + +// NewCmdApply creates the `apply` command +func NewCmdApply(baseName string, f cmdutil.Factory, ioStreams genericiooptions.IOStreams) *cobra.Command { + flags := NewApplyFlags(ioStreams) + + cmd := &cobra.Command{ + Use: "apply (-f FILENAME | -k DIRECTORY)", + DisableFlagsInUseLine: true, + Short: i18n.T("Apply a configuration to a resource by file name or stdin"), + Long: applyLong, + Example: applyExample, + Run: func(cmd *cobra.Command, args []string) { + o, err := flags.ToOptions(f, cmd, baseName, args) + cmdutil.CheckErr(err) + cmdutil.CheckErr(o.Validate()) + cmdutil.CheckErr(o.Run()) + }, + } + + flags.AddFlags(cmd) + + // apply subcommands + cmd.AddCommand(NewCmdApplyViewLastApplied(f, flags.IOStreams)) + cmd.AddCommand(NewCmdApplySetLastApplied(f, flags.IOStreams)) + cmd.AddCommand(NewCmdApplyEditLastApplied(f, flags.IOStreams)) + + return cmd +} + +// AddFlags registers flags for a cli +func (flags *ApplyFlags) AddFlags(cmd *cobra.Command) { + // bind flag structs + flags.DeleteFlags.AddFlags(cmd) + flags.RecordFlags.AddFlags(cmd) + flags.PrintFlags.AddFlags(cmd) + + cmdutil.AddValidateFlags(cmd) + cmdutil.AddDryRunFlag(cmd) + cmdutil.AddServerSideApplyFlags(cmd) + cmdutil.AddFieldManagerFlagVar(cmd, &flags.FieldManager, FieldManagerClientSideApply) + cmdutil.AddLabelSelectorFlagVar(cmd, &flags.Selector) + cmdutil.AddPruningFlags(cmd, &flags.Prune, &flags.PruneAllowlist, &flags.All, &flags.ApplySetRef) + cmd.Flags().BoolVar(&flags.Overwrite, "overwrite", flags.Overwrite, "Automatically resolve conflicts between the modified and live configuration by using values from the modified configuration") + cmd.Flags().BoolVar(&flags.OpenAPIPatch, "openapi-patch", flags.OpenAPIPatch, "If true, use openapi to calculate diff when the openapi presents and the resource can be found in the openapi spec. Otherwise, fall back to use baked-in types.") + cmdutil.AddSubresourceFlags(cmd, &flags.Subresource, "If specified, apply will operate on the subresource of the requested object. Only allowed when using --server-side.") +} + +// ToOptions converts from CLI inputs to runtime inputs +func (flags *ApplyFlags) ToOptions(f cmdutil.Factory, cmd *cobra.Command, baseName string, args []string) (*ApplyOptions, error) { + if len(args) != 0 { + return nil, cmdutil.UsageErrorf(cmd, "Unexpected args: %v", args) + } + + serverSideApply := cmdutil.GetServerSideApplyFlag(cmd) + forceConflicts := cmdutil.GetForceConflictsFlag(cmd) + dryRunStrategy, err := cmdutil.GetDryRunStrategy(cmd) + if err != nil { + return nil, err + } + + dynamicClient, err := f.DynamicClient() + if err != nil { + return nil, err + } + + fieldManager := GetApplyFieldManagerFlag(cmd, serverSideApply) + + // allow for a success message operation to be specified at print time + toPrinter := func(operation string) (printers.ResourcePrinter, error) { + flags.PrintFlags.NamePrintFlags.Operation = operation + cmdutil.PrintFlagsWithDryRunStrategy(flags.PrintFlags, dryRunStrategy) + return flags.PrintFlags.ToPrinter() + } + + flags.RecordFlags.Complete(cmd) + recorder, err := flags.RecordFlags.ToRecorder() + if err != nil { + return nil, err + } + + deleteOptions, err := flags.DeleteFlags.ToOptions(dynamicClient, flags.IOStreams) + if err != nil { + return nil, err + } + + err = deleteOptions.FilenameOptions.RequireFilenameOrKustomize() + if err != nil { + return nil, err + } + + var openAPIV3Root openapi3.Root + if !cmdutil.OpenAPIV3Patch.IsDisabled() { + openAPIV3Client, err := f.OpenAPIV3Client() + if err == nil { + openAPIV3Root = openapi3.NewRoot(openAPIV3Client) + } else { + klog.V(4).Infof("warning: OpenAPI V3 Patch is enabled but is unable to be loaded. Will fall back to OpenAPI V2") + } + } + + validationDirective, err := cmdutil.GetValidationDirective(cmd) + if err != nil { + return nil, err + } + validator, err := f.Validator(validationDirective) + if err != nil { + return nil, err + } + builder := f.NewBuilder() + mapper, err := f.ToRESTMapper() + if err != nil { + return nil, err + } + + namespace, enforceNamespace, err := f.ToRawKubeConfigLoader().Namespace() + if err != nil { + return nil, err + } + + var applySet *ApplySet + if flags.ApplySetRef != "" { + parent, err := ParseApplySetParentRef(flags.ApplySetRef, mapper) + if err != nil { + return nil, fmt.Errorf("invalid parent reference %q: %w", flags.ApplySetRef, err) + } + // ApplySet uses the namespace value from the flag, but not from the kubeconfig or defaults + // This means the namespace flag is required when using a namespaced parent. + if enforceNamespace && parent.IsNamespaced() { + parent.Namespace = namespace + } + tooling := ApplySetTooling{Name: baseName, Version: ApplySetToolVersion} + restClient, err := f.UnstructuredClientForMapping(parent.RESTMapping) + if err != nil { + return nil, fmt.Errorf("failed to initialize RESTClient for ApplySet: %w", err) + } + if restClient == nil { + return nil, fmt.Errorf("could not build RESTClient for ApplySet") + } + applySet = NewApplySet(parent, tooling, mapper, restClient) + } + if flags.Prune { + flags.PruneResources, err = prune.ParseResources(mapper, flags.PruneAllowlist) + if err != nil { + return nil, err + } + } + + o := &ApplyOptions{ + // Store baseName for use in printing warnings / messages involving the base command name. + // This is useful for downstream command that wrap this one. + cmdBaseName: baseName, + + PrintFlags: flags.PrintFlags, + + DeleteOptions: deleteOptions, + ToPrinter: toPrinter, + ServerSideApply: serverSideApply, + ForceConflicts: forceConflicts, + FieldManager: fieldManager, + Selector: flags.Selector, + DryRunStrategy: dryRunStrategy, + Prune: flags.Prune, + PruneResources: flags.PruneResources, + All: flags.All, + Overwrite: flags.Overwrite, + OpenAPIPatch: flags.OpenAPIPatch, + Subresource: flags.Subresource, + + Recorder: recorder, + Namespace: namespace, + EnforceNamespace: enforceNamespace, + Validator: validator, + ValidationDirective: validationDirective, + Builder: builder, + Mapper: mapper, + DynamicClient: dynamicClient, + OpenAPIGetter: f, + OpenAPIV3Root: openAPIV3Root, + + IOStreams: flags.IOStreams, + + objects: []*resource.Info{}, + objectsCached: false, + + VisitedUids: sets.New[types.UID](), + VisitedNamespaces: sets.New[string](), + + ApplySet: applySet, + } + + o.PostProcessorFn = o.PrintAndPrunePostProcessor() + + return o, nil +} + +// Validate verifies if ApplyOptions are valid and without conflicts. +func (o *ApplyOptions) Validate() error { + if o.ForceConflicts && !o.ServerSideApply { + return fmt.Errorf("--force-conflicts only works with --server-side") + } + + if o.DryRunStrategy == cmdutil.DryRunClient && o.ServerSideApply { + return fmt.Errorf("--dry-run=client doesn't work with --server-side (did you mean --dry-run=server instead?)") + } + + if o.ServerSideApply && o.DeleteOptions.ForceDeletion { + return fmt.Errorf("--force cannot be used with --server-side") + } + + if o.DryRunStrategy == cmdutil.DryRunServer && o.DeleteOptions.ForceDeletion { + return fmt.Errorf("--dry-run=server cannot be used with --force") + } + + if o.All && len(o.Selector) > 0 { + return fmt.Errorf("cannot set --all and --selector at the same time") + } + + if o.ApplySet != nil { + if !o.Prune { + return fmt.Errorf("--applyset requires --prune") + } + if err := o.ApplySet.Validate(context.TODO(), o.DynamicClient); err != nil { + return err + } + } + if o.Prune { + // Do not force the recreation of an object(s) if we're pruning; this can cause + // undefined behavior since object UID's change. + if o.DeleteOptions.ForceDeletion { + return fmt.Errorf("--force cannot be used with --prune") + } + + if o.ApplySet != nil { + if o.All { + return fmt.Errorf("--all is incompatible with --applyset") + } else if o.Selector != "" { + return fmt.Errorf("--selector is incompatible with --applyset") + } else if len(o.PruneResources) > 0 { + return fmt.Errorf("--prune-allowlist is incompatible with --applyset") + } + } else { + if !o.All && o.Selector == "" { + return fmt.Errorf("all resources selected for prune without explicitly passing --all. To prune all resources, pass the --all flag. If you did not mean to prune all resources, specify a label selector") + } + if o.ServerSideApply { + return fmt.Errorf("--prune is in alpha and doesn't currently work on objects created by server-side apply") + } + } + } + if len(o.Subresource) > 0 && !o.ServerSideApply { + return fmt.Errorf("--subresource can only be specified for --server-side") + } + + return nil +} + +func isIncompatibleServerError(err error) bool { + // 415: Unsupported media type means we're talking to a server which doesn't + // support server-side apply. + if _, ok := err.(*errors.StatusError); !ok { + // Non-StatusError means the error isn't because the server is incompatible. + return false + } + return err.(*errors.StatusError).Status().Code == http.StatusUnsupportedMediaType +} + +// GetObjects returns a (possibly cached) version of all the valid objects to apply +// as a slice of pointer to resource.Info and an error if one or more occurred. +// IMPORTANT: This function can return both valid objects AND an error, since +// "ContinueOnError" is set on the builder. This function should not be called +// until AFTER the "complete" and "validate" methods have been called to ensure that +// the ApplyOptions is filled in and valid. +func (o *ApplyOptions) GetObjects() ([]*resource.Info, error) { + var err error = nil + if !o.objectsCached { + r := o.Builder. + Unstructured(). + Schema(o.Validator). + ContinueOnError(). + NamespaceParam(o.Namespace).DefaultNamespace(). + FilenameParam(o.EnforceNamespace, &o.DeleteOptions.FilenameOptions). + LabelSelectorParam(o.Selector). + Flatten(). + Do() + + o.objects, err = r.Infos() + + if o.ApplySet != nil { + if err := o.ApplySet.AddLabels(o.objects...); err != nil { + return nil, err + } + } + + o.objectsCached = true + } + return o.objects, err +} + +// SetObjects stores the set of objects (as resource.Info) to be +// subsequently applied. +func (o *ApplyOptions) SetObjects(infos []*resource.Info) { + o.objects = infos + o.objectsCached = true +} + +// Run executes the `apply` command. +func (o *ApplyOptions) Run() error { + if o.PreProcessorFn != nil { + klog.V(4).Infof("Running apply pre-processor function") + if err := o.PreProcessorFn(); err != nil { + return err + } + } + + // Enforce CLI specified namespace on server request. + if o.EnforceNamespace { + o.VisitedNamespaces.Insert(o.Namespace) + } + + // Generates the objects using the resource builder if they have not + // already been stored by calling "SetObjects()" in the pre-processor. + errs := []error{} + infos, err := o.GetObjects() + if err != nil { + errs = append(errs, err) + } + if len(infos) == 0 && len(errs) == 0 { + return fmt.Errorf("no objects passed to apply") + } + + if o.ApplySet != nil { + if err := o.ApplySet.BeforeApply(infos, o.DryRunStrategy, o.ValidationDirective); err != nil { + return err + } + } + + // Iterate through all objects, applying each one. + for _, info := range infos { + if err := o.applyOneObject(info); err != nil { + errs = append(errs, err) + } + } + // If any errors occurred during apply, then return error (or + // aggregate of errors). + if len(errs) == 1 { + return errs[0] + } + if len(errs) > 1 { + return utilerrors.NewAggregate(errs) + } + + if o.PostProcessorFn != nil { + klog.V(4).Infof("Running apply post-processor function") + if err := o.PostProcessorFn(); err != nil { + return err + } + } + + return nil +} + +func (o *ApplyOptions) applyOneObject(info *resource.Info) error { + o.MarkNamespaceVisited(info) + + if err := o.Recorder.Record(info.Object); err != nil { + klog.V(4).Infof("error recording current command: %v", err) + } + + if len(info.Name) == 0 { + metadata, _ := meta.Accessor(info.Object) + generatedName := metadata.GetGenerateName() + if len(generatedName) > 0 { + return fmt.Errorf("from %s: cannot use generate name with apply", generatedName) + } + } + + helper := resource.NewHelper(info.Client, info.Mapping). + DryRun(o.DryRunStrategy == cmdutil.DryRunServer). + WithFieldManager(o.FieldManager). + WithFieldValidation(o.ValidationDirective) + + if o.ServerSideApply { + // Send the full object to be applied on the server side. + data, err := runtime.Encode(unstructured.UnstructuredJSONScheme, info.Object) + if err != nil { + return cmdutil.AddSourceToErr("serverside-apply", info.Source, err) + } + + options := metav1.PatchOptions{ + Force: &o.ForceConflicts, + } + obj, err := helper. + WithSubresource(o.Subresource). + Patch( + info.Namespace, + info.Name, + types.ApplyPatchType, + data, + &options, + ) + if err != nil { + if isIncompatibleServerError(err) { + err = fmt.Errorf("Server-side apply not available on the server: (%v)", err) + } + if errors.IsConflict(err) { + err = fmt.Errorf(`%v +Please review the fields above--they currently have other managers. Here +are the ways you can resolve this warning: +* If you intend to manage all of these fields, please re-run the apply + command with the `+"`--force-conflicts`"+` flag. +* If you do not intend to manage all of the fields, please edit your + manifest to remove references to the fields that should keep their + current managers. +* You may co-own fields by updating your manifest to match the existing + value; in this case, you'll become the manager if the other manager(s) + stop managing the field (remove it from their configuration). +See https://kubernetes.io/docs/reference/using-api/server-side-apply/#conflicts`, err) + } + return err + } + + info.Refresh(obj, true) + + // Migrate managed fields if necessary. + // + // By checking afterward instead of fetching the object beforehand and + // unconditionally fetching we can make 3 network requests in the rare + // case of migration and 1 request if migration is unnecessary. + // + // To check beforehand means 2 requests for most operations, and 3 + // requests in worst case. + if err = o.saveLastApplyAnnotationIfNecessary(helper, info); err != nil { + fmt.Fprintf(o.ErrOut, warningMigrationLastAppliedFailed, err.Error()) + } else if performedMigration, err := o.migrateToSSAIfNecessary(helper, info); err != nil { + // Print-error as a warning. + // This is a non-fatal error because object was successfully applied + // above, but it might have issues since migration failed. + // + // This migration will be re-attempted if necessary upon next + // apply. + fmt.Fprintf(o.ErrOut, warningMigrationPatchFailed, err.Error()) + } else if performedMigration { + if obj, err = helper.Patch( + info.Namespace, + info.Name, + types.ApplyPatchType, + data, + &options, + ); err != nil { + // Re-send original SSA patch (this will allow dropped fields to + // finally be removed) + fmt.Fprintf(o.ErrOut, warningMigrationReapplyFailed, err.Error()) + } else { + info.Refresh(obj, false) + } + } + + WarnIfDeleting(info.Object, o.ErrOut) + + if err := o.MarkObjectVisited(info); err != nil { + return err + } + + if o.shouldPrintObject() { + return nil + } + + printer, err := o.ToPrinter("serverside-applied") + if err != nil { + return err + } + + if err = printer.PrintObj(info.Object, o.Out); err != nil { + return err + } + return nil + } + + // Get the modified configuration of the object. Embed the result + // as an annotation in the modified configuration, so that it will appear + // in the patch sent to the server. + modified, err := util.GetModifiedConfiguration(info.Object, true, unstructured.UnstructuredJSONScheme) + if err != nil { + return cmdutil.AddSourceToErr(fmt.Sprintf("retrieving modified configuration from:\n%s\nfor:", info.String()), info.Source, err) + } + + if err := info.Get(); err != nil { + if !errors.IsNotFound(err) { + return cmdutil.AddSourceToErr(fmt.Sprintf("retrieving current configuration of:\n%s\nfrom server for:", info.String()), info.Source, err) + } + + // Create the resource if it doesn't exist + // First, update the annotation used by kubectl apply + if err := util.CreateApplyAnnotation(info.Object, unstructured.UnstructuredJSONScheme); err != nil { + return cmdutil.AddSourceToErr("creating", info.Source, err) + } + + // prune nulls when client-side apply does a create to match what will happen when client-side applying an update. + // do this after CreateApplyAnnotation so the annotation matches what will be persisted on an update apply of the same manifest. + if u, ok := info.Object.(runtime.Unstructured); ok { + pruneNullsFromMap(u.UnstructuredContent()) + } + + if o.DryRunStrategy != cmdutil.DryRunClient { + // Then create the resource and skip the three-way merge + obj, err := helper.Create(info.Namespace, true, info.Object) + if err != nil { + return cmdutil.AddSourceToErr("creating", info.Source, err) + } + info.Refresh(obj, true) + } + + if err := o.MarkObjectVisited(info); err != nil { + return err + } + + if o.shouldPrintObject() { + return nil + } + + printer, err := o.ToPrinter("created") + if err != nil { + return err + } + if err = printer.PrintObj(info.Object, o.Out); err != nil { + return err + } + return nil + } + + if err := o.MarkObjectVisited(info); err != nil { + return err + } + + if o.DryRunStrategy != cmdutil.DryRunClient { + metadata, _ := meta.Accessor(info.Object) + annotationMap := metadata.GetAnnotations() + if _, ok := annotationMap[corev1.LastAppliedConfigAnnotation]; !ok { + fmt.Fprintf(o.ErrOut, warningNoLastAppliedConfigAnnotation, info.ObjectName(), corev1.LastAppliedConfigAnnotation, o.cmdBaseName) + } + + patcher, err := newPatcher(o, info, helper) + if err != nil { + return err + } + patchBytes, patchedObject, err := patcher.Patch(info.Object, modified, info.Source, info.Namespace, info.Name, o.ErrOut) + if err != nil { + return cmdutil.AddSourceToErr(fmt.Sprintf("applying patch:\n%s\nto:\n%v\nfor:", patchBytes, info), info.Source, err) + } + + info.Refresh(patchedObject, true) + + WarnIfDeleting(info.Object, o.ErrOut) + + if string(patchBytes) == "{}" && !o.shouldPrintObject() { + printer, err := o.ToPrinter("unchanged") + if err != nil { + return err + } + if err = printer.PrintObj(info.Object, o.Out); err != nil { + return err + } + return nil + } + } + + if o.shouldPrintObject() { + return nil + } + + printer, err := o.ToPrinter("configured") + if err != nil { + return err + } + if err = printer.PrintObj(info.Object, o.Out); err != nil { + return err + } + + return nil +} + +func pruneNullsFromMap(data map[string]interface{}) { + for k, v := range data { + if v == nil { + delete(data, k) + } else { + pruneNulls(v) + } + } +} +func pruneNullsFromSlice(data []interface{}) { + for _, v := range data { + pruneNulls(v) + } +} +func pruneNulls(v interface{}) { + switch v := v.(type) { + case map[string]interface{}: + pruneNullsFromMap(v) + case []interface{}: + pruneNullsFromSlice(v) + } +} + +// Saves the last-applied-configuration annotation in a separate SSA field manager +// to prevent it from being dropped by users who have transitioned to SSA. +// +// If this operation is not performed, then the last-applied-configuration annotation +// would be removed from the object upon the first SSA usage. We want to keep it +// around for a few releases since it is required to downgrade to +// SSA per [1] and [2]. This code should be removed once the annotation is +// deprecated. +// +// - [1] https://kubernetes.io/docs/reference/using-api/server-side-apply/#downgrading-from-server-side-apply-to-client-side-apply +// - [2] https://github.com/kubernetes/kubernetes/pull/90187 +// +// If the annotation is not already present, or if it is already managed by the +// separate SSA fieldmanager, this is a no-op. +func (o *ApplyOptions) saveLastApplyAnnotationIfNecessary( + helper *resource.Helper, + info *resource.Info, +) error { + if o.FieldManager != fieldManagerServerSideApply { + // There is no point in preserving the annotation if the field manager + // will not remain default. This is because the server will not keep + // the annotation up to date. + return nil + } + + // Send an apply patch with the last-applied-annotation + // so that it is not orphaned by SSA in the following patch: + accessor, err := meta.Accessor(info.Object) + if err != nil { + return err + } + + // Get the current annotations from the object. + annots := accessor.GetAnnotations() + if annots == nil { + annots = map[string]string{} + } + + fieldManager := fieldManagerLastAppliedAnnotation + originalAnnotation, hasAnnotation := annots[corev1.LastAppliedConfigAnnotation] + + // If the annotation does not already exist, we do not do anything + if !hasAnnotation { + return nil + } + + // If there is already an SSA field manager which owns the field, then there + // is nothing to do here. + if owners := csaupgrade.FindFieldsOwners( + accessor.GetManagedFields(), + metav1.ManagedFieldsOperationApply, + lastAppliedAnnotationFieldPath, + ); len(owners) > 0 { + return nil + } + + justAnnotation := &unstructured.Unstructured{} + justAnnotation.SetGroupVersionKind(info.Mapping.GroupVersionKind) + justAnnotation.SetName(accessor.GetName()) + justAnnotation.SetNamespace(accessor.GetNamespace()) + justAnnotation.SetAnnotations(map[string]string{ + corev1.LastAppliedConfigAnnotation: originalAnnotation, + }) + + modified, err := runtime.Encode(unstructured.UnstructuredJSONScheme, justAnnotation) + if err != nil { + return nil + } + + helperCopy := *helper + newObj, err := helperCopy.WithFieldManager(fieldManager).Patch( + info.Namespace, + info.Name, + types.ApplyPatchType, + modified, + nil, + ) + + if err != nil { + return err + } + + return info.Refresh(newObj, false) +} + +// Check if the returned object needs to have its kubectl-client-side-apply +// managed fields migrated server-side-apply. +// +// field ownership metadata is stored in three places: +// - server-side managed fields +// - client-side managed fields +// - and the last_applied_configuration annotation. +// +// The migration merges the client-side-managed fields into the +// server-side-managed fields, leaving the last_applied_configuration +// annotation in place. Server will keep the annotation up to date +// after every server-side-apply where the following conditions are ment: +// +// 1. field manager is 'kubectl' +// 2. annotation already exists +func (o *ApplyOptions) migrateToSSAIfNecessary( + helper *resource.Helper, + info *resource.Info, +) (migrated bool, err error) { + accessor, err := meta.Accessor(info.Object) + if err != nil { + return false, err + } + + // To determine which field managers were used by kubectl for client-side-apply + // we search for a manager used in `Update` operations which owns the + // last-applied-annotation. + // + // This is the last client-side-apply manager which changed the field. + // + // There may be multiple owners if multiple managers wrote the same exact + // configuration. In this case there are multiple owners, we want to migrate + // them all. + csaManagers := csaupgrade.FindFieldsOwners( + accessor.GetManagedFields(), + metav1.ManagedFieldsOperationUpdate, + lastAppliedAnnotationFieldPath) + + managerNames := sets.New[string]() + for _, entry := range csaManagers { + managerNames.Insert(entry.Manager) + } + + // Re-attempt patch as many times as it is conflicting due to ResourceVersion + // test failing + for i := 0; i < maxPatchRetry; i++ { + var patchData []byte + var obj runtime.Object + + patchData, err = csaupgrade.UpgradeManagedFieldsPatch( + info.Object, managerNames, o.FieldManager) + + if err != nil { + // If patch generation failed there was likely a bug. + return false, err + } else if patchData == nil { + // nil patch data means nothing to do - object is already migrated + return false, nil + } + + // Send the patch to upgrade the managed fields if it is non-nil + obj, err = helper.Patch( + info.Namespace, + info.Name, + types.JSONPatchType, + patchData, + nil, + ) + + if err == nil { + // Stop retrying upon success. + info.Refresh(obj, false) + return true, nil + } else if !errors.IsConflict(err) { + // Only retry if there was a conflict + return false, err + } + + // Refresh the object for next iteration + err = info.Get() + if err != nil { + // If there was an error fetching, return error + return false, err + } + } + + // Reaching this point with non-nil error means there was a conflict and + // max retries was hit + // Return the last error witnessed (which will be a conflict) + return false, err +} + +func (o *ApplyOptions) shouldPrintObject() bool { + // Print object only if output format other than "name" is specified + shouldPrint := false + output := *o.PrintFlags.OutputFormat + shortOutput := output == "name" + if len(output) > 0 && !shortOutput { + shouldPrint = true + } + return shouldPrint +} + +func (o *ApplyOptions) printObjects() error { + + if !o.shouldPrintObject() { + return nil + } + + infos, err := o.GetObjects() + if err != nil { + return err + } + + if len(infos) > 0 { + printer, err := o.ToPrinter("") + if err != nil { + return err + } + + objToPrint := infos[0].Object + if len(infos) > 1 { + objs := []runtime.Object{} + for _, info := range infos { + objs = append(objs, info.Object) + } + list := &corev1.List{ + TypeMeta: metav1.TypeMeta{ + Kind: "List", + APIVersion: "v1", + }, + ListMeta: metav1.ListMeta{}, + } + if err := meta.SetList(list, objs); err != nil { + return err + } + + objToPrint = list + } + if err := printer.PrintObj(objToPrint, o.Out); err != nil { + return err + } + } + + return nil +} + +// MarkNamespaceVisited keeps track of which namespaces the applied +// objects belong to. Used for pruning. +func (o *ApplyOptions) MarkNamespaceVisited(info *resource.Info) { + if info.Namespaced() { + o.VisitedNamespaces.Insert(info.Namespace) + } +} + +// MarkObjectVisited keeps track of UIDs of the applied +// objects. Used for pruning. +func (o *ApplyOptions) MarkObjectVisited(info *resource.Info) error { + metadata, err := meta.Accessor(info.Object) + if err != nil { + return err + } + o.VisitedUids.Insert(metadata.GetUID()) + + return nil +} + +// PrintAndPrunePostProcessor returns a function which meets the PostProcessorFn +// function signature. This returned function prints all the +// objects as a list (if configured for that), and prunes the +// objects not applied. The returned function is the standard +// apply post processor. +func (o *ApplyOptions) PrintAndPrunePostProcessor() func() error { + + return func() error { + ctx := context.TODO() + if err := o.printObjects(); err != nil { + return err + } + + if o.Prune { + if cmdutil.ApplySet.IsEnabled() && o.ApplySet != nil { + if err := o.ApplySet.Prune(ctx, o); err != nil { + // Do not update the ApplySet. If pruning failed, we want to keep the superset + // of the previous and current resources in the ApplySet, so that the pruning + // step of the next apply will be able to clean up the set correctly. + return err + } + } else { + p := newPruner(o) + return p.pruneAll(o) + } + } + + return nil + } +} + +const ( + // FieldManagerClientSideApply is the default client-side apply field manager. + // + // The default field manager is not `kubectl-apply` to distinguish from + // server-side apply. + FieldManagerClientSideApply = "kubectl-client-side-apply" + // The default server-side apply field manager is `kubectl` + // instead of a field manager like `kubectl-server-side-apply` + // for backward compatibility to not conflict with old versions + // of kubectl server-side apply where `kubectl` has already been the field manager. + fieldManagerServerSideApply = "kubectl" + + fieldManagerLastAppliedAnnotation = "kubectl-last-applied" +) + +var ( + lastAppliedAnnotationFieldPath = fieldpath.NewSet( + fieldpath.MakePathOrDie( + "metadata", "annotations", + corev1.LastAppliedConfigAnnotation), + ) +) + +// GetApplyFieldManagerFlag gets the field manager for kubectl apply +// if it is not set. +// +// The default field manager is not `kubectl-apply` to distinguish between +// client-side and server-side apply. +func GetApplyFieldManagerFlag(cmd *cobra.Command, serverSide bool) string { + // The field manager flag was set + if cmd.Flag("field-manager").Changed { + return cmdutil.GetFlagString(cmd, "field-manager") + } + + if serverSide { + return fieldManagerServerSideApply + } + + return FieldManagerClientSideApply +} + +// WarnIfDeleting prints a warning if a resource is being deleted +func WarnIfDeleting(obj runtime.Object, stderr io.Writer) { + metadata, _ := meta.Accessor(obj) + if metadata != nil && metadata.GetDeletionTimestamp() != nil { + // just warn the user about the conflict + fmt.Fprintf(stderr, warningChangesOnDeletingResource, metadata.GetName()) + } +} diff --git a/pkg/kubectl/apply/apply_edit_last_applied.go b/pkg/kubectl/apply/apply_edit_last_applied.go new file mode 100644 index 0000000..ec3fcad --- /dev/null +++ b/pkg/kubectl/apply/apply_edit_last_applied.go @@ -0,0 +1,89 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apply + +import ( + "github.com/spf13/cobra" + + "k8s.io/cli-runtime/pkg/genericiooptions" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/cmd/util/editor" + "k8s.io/kubectl/pkg/util/completion" + "k8s.io/kubectl/pkg/util/i18n" + "k8s.io/kubectl/pkg/util/templates" +) + +var ( + applyEditLastAppliedLong = templates.LongDesc(i18n.T(` + Edit the latest last-applied-configuration annotations of resources from the default editor. + + The edit-last-applied command allows you to directly edit any API resource you can retrieve via the + command-line tools. It will open the editor defined by your KUBE_EDITOR, or EDITOR + environment variables, or fall back to 'vi' for Linux or 'notepad' for Windows. + You can edit multiple objects, although changes are applied one at a time. The command + accepts file names as well as command-line arguments, although the files you point to must + be previously saved versions of resources. + + The default format is YAML. To edit in JSON, specify "-o json". + + The flag --windows-line-endings can be used to force Windows line endings, + otherwise the default for your operating system will be used. + + In the event an error occurs while updating, a temporary file will be created on disk + that contains your unapplied changes. The most common error when updating a resource + is another editor changing the resource on the server. When this occurs, you will have + to apply your changes to the newer version of the resource, or update your temporary + saved copy to include the latest resource version.`)) + + applyEditLastAppliedExample = templates.Examples(` + # Edit the last-applied-configuration annotations by type/name in YAML + kubectl apply edit-last-applied deployment/nginx + + # Edit the last-applied-configuration annotations by file in JSON + kubectl apply edit-last-applied -f deploy.yaml -o json`) +) + +// NewCmdApplyEditLastApplied created the cobra CLI command for the `apply edit-last-applied` command. +func NewCmdApplyEditLastApplied(f cmdutil.Factory, ioStreams genericiooptions.IOStreams) *cobra.Command { + o := editor.NewEditOptions(editor.ApplyEditMode, ioStreams) + + cmd := &cobra.Command{ + Use: "edit-last-applied (RESOURCE/NAME | -f FILENAME)", + DisableFlagsInUseLine: true, + Short: i18n.T("Edit latest last-applied-configuration annotations of a resource/object"), + Long: applyEditLastAppliedLong, + Example: applyEditLastAppliedExample, + ValidArgsFunction: completion.ResourceTypeAndNameCompletionFunc(f), + Run: func(cmd *cobra.Command, args []string) { + cmdutil.CheckErr(o.Complete(f, args, cmd)) + cmdutil.CheckErr(o.Run()) + }, + } + + // bind flag structs + o.RecordFlags.AddFlags(cmd) + o.PrintFlags.AddFlags(cmd) + + usage := "to use to edit the resource" + cmdutil.AddFilenameOptionFlags(cmd, &o.FilenameOptions, usage) + cmd.Flags().BoolVar(&o.WindowsLineEndings, "windows-line-endings", o.WindowsLineEndings, + "Defaults to the line ending native to your platform.") + cmdutil.AddFieldManagerFlagVar(cmd, &o.FieldManager, FieldManagerClientSideApply) + cmdutil.AddValidateFlags(cmd) + + return cmd +} diff --git a/pkg/kubectl/apply/apply_set_last_applied.go b/pkg/kubectl/apply/apply_set_last_applied.go new file mode 100644 index 0000000..02e54cd --- /dev/null +++ b/pkg/kubectl/apply/apply_set_last_applied.go @@ -0,0 +1,219 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apply + +import ( + "bytes" + "fmt" + + "github.com/spf13/cobra" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/genericiooptions" + "k8s.io/cli-runtime/pkg/printers" + "k8s.io/cli-runtime/pkg/resource" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/cmd/util/editor" + "k8s.io/kubectl/pkg/scheme" + "k8s.io/kubectl/pkg/util" + "k8s.io/kubectl/pkg/util/i18n" + "k8s.io/kubectl/pkg/util/templates" +) + +// SetLastAppliedOptions defines options for the `apply set-last-applied` command.` +type SetLastAppliedOptions struct { + CreateAnnotation bool + + PrintFlags *genericclioptions.PrintFlags + PrintObj printers.ResourcePrinterFunc + + FilenameOptions resource.FilenameOptions + + infoList []*resource.Info + namespace string + enforceNamespace bool + dryRunStrategy cmdutil.DryRunStrategy + shortOutput bool + output string + patchBufferList []PatchBuffer + builder *resource.Builder + unstructuredClientForMapping func(mapping *meta.RESTMapping) (resource.RESTClient, error) + + genericiooptions.IOStreams +} + +// PatchBuffer caches changes that are to be applied. +type PatchBuffer struct { + Patch []byte + PatchType types.PatchType +} + +var ( + applySetLastAppliedLong = templates.LongDesc(i18n.T(` + Set the latest last-applied-configuration annotations by setting it to match the contents of a file. + This results in the last-applied-configuration being updated as though 'kubectl apply -f ' was run, + without updating any other parts of the object.`)) + + applySetLastAppliedExample = templates.Examples(i18n.T(` + # Set the last-applied-configuration of a resource to match the contents of a file + kubectl apply set-last-applied -f deploy.yaml + + # Execute set-last-applied against each configuration file in a directory + kubectl apply set-last-applied -f path/ + + # Set the last-applied-configuration of a resource to match the contents of a file; will create the annotation if it does not already exist + kubectl apply set-last-applied -f deploy.yaml --create-annotation=true + `)) +) + +// NewSetLastAppliedOptions takes option arguments from a CLI stream and returns it at SetLastAppliedOptions type. +func NewSetLastAppliedOptions(ioStreams genericiooptions.IOStreams) *SetLastAppliedOptions { + return &SetLastAppliedOptions{ + PrintFlags: genericclioptions.NewPrintFlags("configured").WithTypeSetter(scheme.Scheme), + IOStreams: ioStreams, + } +} + +// NewCmdApplySetLastApplied creates the cobra CLI `apply` subcommand `set-last-applied`.` +func NewCmdApplySetLastApplied(f cmdutil.Factory, ioStreams genericiooptions.IOStreams) *cobra.Command { + o := NewSetLastAppliedOptions(ioStreams) + cmd := &cobra.Command{ + Use: "set-last-applied -f FILENAME", + DisableFlagsInUseLine: true, + Short: i18n.T("Set the last-applied-configuration annotation on a live object to match the contents of a file"), + Long: applySetLastAppliedLong, + Example: applySetLastAppliedExample, + Run: func(cmd *cobra.Command, args []string) { + cmdutil.CheckErr(o.Complete(f, cmd)) + cmdutil.CheckErr(o.Validate()) + cmdutil.CheckErr(o.RunSetLastApplied()) + }, + } + + o.PrintFlags.AddFlags(cmd) + + cmdutil.AddDryRunFlag(cmd) + cmd.Flags().BoolVar(&o.CreateAnnotation, "create-annotation", o.CreateAnnotation, "Will create 'last-applied-configuration' annotations if current objects doesn't have one") + cmdutil.AddJsonFilenameFlag(cmd.Flags(), &o.FilenameOptions.Filenames, "Filename, directory, or URL to files that contains the last-applied-configuration annotations") + + return cmd +} + +// Complete populates dry-run and output flag options. +func (o *SetLastAppliedOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) error { + var err error + o.dryRunStrategy, err = cmdutil.GetDryRunStrategy(cmd) + if err != nil { + return err + } + o.output = cmdutil.GetFlagString(cmd, "output") + o.shortOutput = o.output == "name" + + o.namespace, o.enforceNamespace, err = f.ToRawKubeConfigLoader().Namespace() + if err != nil { + return err + } + o.builder = f.NewBuilder() + o.unstructuredClientForMapping = f.UnstructuredClientForMapping + + cmdutil.PrintFlagsWithDryRunStrategy(o.PrintFlags, o.dryRunStrategy) + printer, err := o.PrintFlags.ToPrinter() + if err != nil { + return err + } + o.PrintObj = printer.PrintObj + + return nil +} + +// Validate checks SetLastAppliedOptions for validity. +func (o *SetLastAppliedOptions) Validate() error { + r := o.builder. + Unstructured(). + NamespaceParam(o.namespace).DefaultNamespace(). + FilenameParam(o.enforceNamespace, &o.FilenameOptions). + Flatten(). + Do() + + err := r.Visit(func(info *resource.Info, err error) error { + if err != nil { + return err + } + patchBuf, diffBuf, patchType, err := editor.GetApplyPatch(info.Object.(runtime.Unstructured)) + if err != nil { + return err + } + + // Verify the object exists in the cluster before trying to patch it. + if err := info.Get(); err != nil { + if errors.IsNotFound(err) { + return err + } + return cmdutil.AddSourceToErr(fmt.Sprintf("retrieving current configuration of:\n%s\nfrom server for:", info.String()), info.Source, err) + } + originalBuf, err := util.GetOriginalConfiguration(info.Object) + if err != nil { + return cmdutil.AddSourceToErr(fmt.Sprintf("retrieving current configuration of:\n%s\nfrom server for:", info.String()), info.Source, err) + } + if originalBuf == nil && !o.CreateAnnotation { + return fmt.Errorf("no last-applied-configuration annotation found on resource: %s, to create the annotation, run the command with --create-annotation", info.Name) + } + + //only add to PatchBufferList when changed + if !bytes.Equal(cmdutil.StripComments(originalBuf), cmdutil.StripComments(diffBuf)) { + p := PatchBuffer{Patch: patchBuf, PatchType: patchType} + o.patchBufferList = append(o.patchBufferList, p) + o.infoList = append(o.infoList, info) + } else { + fmt.Fprintf(o.Out, "set-last-applied %s: no changes required.\n", info.Name) + } + + return nil + }) + return err +} + +// RunSetLastApplied executes the `set-last-applied` command according to SetLastAppliedOptions. +func (o *SetLastAppliedOptions) RunSetLastApplied() error { + for i, patch := range o.patchBufferList { + info := o.infoList[i] + finalObj := info.Object + + if o.dryRunStrategy != cmdutil.DryRunClient { + mapping := info.ResourceMapping() + client, err := o.unstructuredClientForMapping(mapping) + if err != nil { + return err + } + helper := resource. + NewHelper(client, mapping). + DryRun(o.dryRunStrategy == cmdutil.DryRunServer) + finalObj, err = helper.Patch(info.Namespace, info.Name, patch.PatchType, patch.Patch, nil) + if err != nil { + return err + } + } + if err := o.PrintObj(finalObj, o.Out); err != nil { + return err + } + } + return nil +} diff --git a/pkg/kubectl/apply/apply_view_last_applied.go b/pkg/kubectl/apply/apply_view_last_applied.go new file mode 100644 index 0000000..bd2f6a8 --- /dev/null +++ b/pkg/kubectl/apply/apply_view_last_applied.go @@ -0,0 +1,174 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apply + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" + "k8s.io/cli-runtime/pkg/genericiooptions" + "k8s.io/cli-runtime/pkg/resource" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/util" + "k8s.io/kubectl/pkg/util/completion" + "k8s.io/kubectl/pkg/util/i18n" + "k8s.io/kubectl/pkg/util/templates" + "sigs.k8s.io/yaml" +) + +// ViewLastAppliedOptions defines options for the `apply view-last-applied` command.` +type ViewLastAppliedOptions struct { + FilenameOptions resource.FilenameOptions + Selector string + LastAppliedConfigurationList []string + OutputFormat string + All bool + Factory cmdutil.Factory + + genericiooptions.IOStreams +} + +var ( + applyViewLastAppliedLong = templates.LongDesc(i18n.T(` + View the latest last-applied-configuration annotations by type/name or file. + + The default output will be printed to stdout in YAML format. You can use the -o option + to change the output format.`)) + + applyViewLastAppliedExample = templates.Examples(i18n.T(` + # View the last-applied-configuration annotations by type/name in YAML + kubectl apply view-last-applied deployment/nginx + + # View the last-applied-configuration annotations by file in JSON + kubectl apply view-last-applied -f deploy.yaml -o json`)) +) + +// NewViewLastAppliedOptions takes option arguments from a CLI stream and returns it at ViewLastAppliedOptions type. +func NewViewLastAppliedOptions(ioStreams genericiooptions.IOStreams) *ViewLastAppliedOptions { + return &ViewLastAppliedOptions{ + OutputFormat: "yaml", + + IOStreams: ioStreams, + } +} + +// NewCmdApplyViewLastApplied creates the cobra CLI `apply` subcommand `view-last-applied`.` +func NewCmdApplyViewLastApplied(f cmdutil.Factory, ioStreams genericiooptions.IOStreams) *cobra.Command { + options := NewViewLastAppliedOptions(ioStreams) + + cmd := &cobra.Command{ + Use: "view-last-applied (TYPE [NAME | -l label] | TYPE/NAME | -f FILENAME)", + DisableFlagsInUseLine: true, + Short: i18n.T("View the latest last-applied-configuration annotations of a resource/object"), + Long: applyViewLastAppliedLong, + Example: applyViewLastAppliedExample, + ValidArgsFunction: completion.ResourceTypeAndNameCompletionFunc(f), + Run: func(cmd *cobra.Command, args []string) { + cmdutil.CheckErr(options.Complete(cmd, f, args)) + cmdutil.CheckErr(options.Validate()) + cmdutil.CheckErr(options.RunApplyViewLastApplied(cmd)) + }, + } + + cmd.Flags().StringVarP(&options.OutputFormat, "output", "o", options.OutputFormat, `Output format. Must be one of (yaml, json)`) + cmd.Flags().BoolVar(&options.All, "all", options.All, "Select all resources in the namespace of the specified resource types") + usage := "that contains the last-applied-configuration annotations" + cmdutil.AddFilenameOptionFlags(cmd, &options.FilenameOptions, usage) + cmdutil.AddLabelSelectorFlagVar(cmd, &options.Selector) + + return cmd +} + +// Complete checks an object for last-applied-configuration annotations. +func (o *ViewLastAppliedOptions) Complete(cmd *cobra.Command, f cmdutil.Factory, args []string) error { + cmdNamespace, enforceNamespace, err := f.ToRawKubeConfigLoader().Namespace() + if err != nil { + return err + } + + r := f.NewBuilder(). + Unstructured(). + NamespaceParam(cmdNamespace).DefaultNamespace(). + FilenameParam(enforceNamespace, &o.FilenameOptions). + ResourceTypeOrNameArgs(enforceNamespace, args...). + SelectAllParam(o.All). + LabelSelectorParam(o.Selector). + Latest(). + Flatten(). + Do() + err = r.Err() + if err != nil { + return err + } + + err = r.Visit(func(info *resource.Info, err error) error { + if err != nil { + return err + } + + configString, err := util.GetOriginalConfiguration(info.Object) + if err != nil { + return err + } + if configString == nil { + return cmdutil.AddSourceToErr(fmt.Sprintf("no last-applied-configuration annotation found on resource: %s\n", info.Name), info.Source, err) + } + o.LastAppliedConfigurationList = append(o.LastAppliedConfigurationList, string(configString)) + return nil + }) + + if err != nil { + return err + } + + return nil +} + +// Validate checks ViewLastAppliedOptions for validity. +func (o *ViewLastAppliedOptions) Validate() error { + return nil +} + +// RunApplyViewLastApplied executes the `view-last-applied` command according to ViewLastAppliedOptions. +func (o *ViewLastAppliedOptions) RunApplyViewLastApplied(cmd *cobra.Command) error { + for _, str := range o.LastAppliedConfigurationList { + switch o.OutputFormat { + case "json": + jsonBuffer := &bytes.Buffer{} + err := json.Indent(jsonBuffer, []byte(str), "", " ") + if err != nil { + return err + } + fmt.Fprint(o.Out, string(jsonBuffer.Bytes())) + case "yaml": + yamlOutput, err := yaml.JSONToYAML([]byte(str)) + if err != nil { + return err + } + fmt.Fprint(o.Out, string(yamlOutput)) + default: + return cmdutil.UsageErrorf( + cmd, + "Unexpected -o output mode: %s, the flag 'output' must be one of yaml|json", + o.OutputFormat) + } + } + + return nil +} diff --git a/pkg/kubectl/apply/applyset.go b/pkg/kubectl/apply/applyset.go new file mode 100644 index 0000000..4fd6dd8 --- /dev/null +++ b/pkg/kubectl/apply/applyset.go @@ -0,0 +1,607 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apply + +import ( + "context" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "fmt" + "sort" + "strings" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/cli-runtime/pkg/resource" + "k8s.io/client-go/dynamic" + "k8s.io/klog/v2" + cmdutil "k8s.io/kubectl/pkg/cmd/util" +) + +// Label and annotation keys from the ApplySet specification. +// https://git.k8s.io/enhancements/keps/sig-cli/3659-kubectl-apply-prune#design-details-applyset-specification +const ( + // ApplySetToolingAnnotation is the key of the label that indicates which tool is used to manage this ApplySet. + // Tooling should refuse to mutate ApplySets belonging to other tools. + // The value must be in the format /. + // Example value: "kubectl/v1.27" or "helm/v3" or "kpt/v1.0.0" + ApplySetToolingAnnotation = "applyset.kubernetes.io/tooling" + + // ApplySetAdditionalNamespacesAnnotation annotation extends the scope of the ApplySet beyond the parent + // object's own namespace (if any) to include the listed namespaces. The value is a comma-separated + // list of the names of namespaces other than the parent's namespace in which objects are found + // Example value: "kube-system,ns1,ns2". + ApplySetAdditionalNamespacesAnnotation = "applyset.kubernetes.io/additional-namespaces" + + // Deprecated: ApplySetGRsAnnotation is a list of group-resources used to optimize listing of ApplySet member objects. + // It is optional in the ApplySet specification, as tools can perform discovery or use a different optimization. + // However, it is currently required in kubectl. + // When present, the value of this annotation must be a comma separated list of the group-resources, + // in the fully-qualified name format, i.e. .. + // Example value: "certificates.cert-manager.io,configmaps,deployments.apps,secrets,services" + // Deprecated and replaced by ApplySetGKsAnnotation, support for this can be removed in applyset beta or GA. + DeprecatedApplySetGRsAnnotation = "applyset.kubernetes.io/contains-group-resources" + + // ApplySetGKsAnnotation is a list of group-kinds used to optimize listing of ApplySet member objects. + // It is optional in the ApplySet specification, as tools can perform discovery or use a different optimization. + // However, it is currently required in kubectl. + // When present, the value of this annotation must be a comma separated list of the group-kinds, + // in the fully-qualified name format, i.e. .. + // Example value: "Certificate.cert-manager.io,ConfigMap,deployments.apps,Secret,Service" + ApplySetGKsAnnotation = "applyset.kubernetes.io/contains-group-kinds" + + // ApplySetParentIDLabel is the key of the label that makes object an ApplySet parent object. + // Its value MUST use the format specified in V1ApplySetIdFormat below + ApplySetParentIDLabel = "applyset.kubernetes.io/id" + + // V1ApplySetIdFormat is the format required for the value of ApplySetParentIDLabel (and ApplysetPartOfLabel). + // The %s segment is the unique ID of the object itself, which MUST be the base64 encoding + // (using the URL safe encoding of RFC4648) of the hash of the GKNN of the object it is on, in the form: + // base64(sha256(...)). + V1ApplySetIdFormat = "applyset-%s-v1" + + // ApplysetPartOfLabel is the key of the label which indicates that the object is a member of an ApplySet. + // The value of the label MUST match the value of ApplySetParentIDLabel on the parent object. + ApplysetPartOfLabel = "applyset.kubernetes.io/part-of" + + // ApplysetParentCRDLabel is the key of the label that can be set on a CRD to identify + // the custom resource type it defines (not the CRD itself) as an allowed parent for an ApplySet. + ApplysetParentCRDLabel = "applyset.kubernetes.io/is-parent-type" +) + +var defaultApplySetParentGVR = schema.GroupVersionResource{Version: "v1", Resource: "secrets"} + +// ApplySet tracks the information about an applyset apply/prune +type ApplySet struct { + // parentRef is a reference to the parent object that is used to track the applyset. + parentRef *ApplySetParentRef + + // toolingID is the value to be used and validated in the applyset.kubernetes.io/tooling annotation. + toolingID ApplySetTooling + + // currentResources is the set of resources that are part of the sever-side set as of when the current operation started. + currentResources map[schema.GroupKind]*kindInfo + + // currentNamespaces is the set of namespaces that contain objects in this applyset as of when the current operation started. + currentNamespaces sets.Set[string] + + // updatedResources is the set of resources that will be part of the set as of when the current operation completes. + updatedResources map[schema.GroupKind]*kindInfo + + // updatedNamespaces is the set of namespaces that will contain objects in this applyset as of when the current operation completes. + updatedNamespaces sets.Set[string] + + restMapper meta.RESTMapper + + // client is a client specific to the ApplySet parent object's type + client resource.RESTClient +} + +var builtinApplySetParentGVRs = sets.New[schema.GroupVersionResource]( + defaultApplySetParentGVR, + schema.GroupVersionResource{Version: "v1", Resource: "configmaps"}, +) + +// ApplySetParentRef stores object and type meta for the parent object that is used to track the applyset. +type ApplySetParentRef struct { + Name string + Namespace string + *meta.RESTMapping +} + +func (p ApplySetParentRef) IsNamespaced() bool { + return p.Scope.Name() == meta.RESTScopeNameNamespace +} + +// String returns the string representation of the parent object using the same format +// that we expect to receive in the --applyset flag on the CLI. +func (p ApplySetParentRef) String() string { + return fmt.Sprintf("%s.%s/%s", p.Resource.Resource, p.Resource.Group, p.Name) +} + +type ApplySetTooling struct { + Name string + Version string +} + +func (t ApplySetTooling) String() string { + return fmt.Sprintf("%s/%s", t.Name, t.Version) +} + +// NewApplySet creates a new ApplySet object tracked by the given parent object. +func NewApplySet(parent *ApplySetParentRef, tooling ApplySetTooling, mapper meta.RESTMapper, client resource.RESTClient) *ApplySet { + return &ApplySet{ + currentResources: make(map[schema.GroupKind]*kindInfo), + currentNamespaces: make(sets.Set[string]), + updatedResources: make(map[schema.GroupKind]*kindInfo), + updatedNamespaces: make(sets.Set[string]), + parentRef: parent, + toolingID: tooling, + restMapper: mapper, + client: client, + } +} + +const applySetIDPartDelimiter = "." + +// ID is the label value that we are using to identify this applyset. +// Format: base64(sha256(...)), using the URL safe encoding of RFC4648. + +func (a ApplySet) ID() string { + unencoded := strings.Join([]string{a.parentRef.Name, a.parentRef.Namespace, a.parentRef.GroupVersionKind.Kind, a.parentRef.GroupVersionKind.Group}, applySetIDPartDelimiter) + hashed := sha256.Sum256([]byte(unencoded)) + b64 := base64.RawURLEncoding.EncodeToString(hashed[:]) + // Label values must start and end with alphanumeric values, so add a known-safe prefix and suffix. + return fmt.Sprintf(V1ApplySetIdFormat, b64) +} + +// Validate imposes restrictions on the parent object that is used to track the applyset. +func (a ApplySet) Validate(ctx context.Context, client dynamic.Interface) error { + var errors []error + if a.parentRef.IsNamespaced() && a.parentRef.Namespace == "" { + errors = append(errors, fmt.Errorf("namespace is required to use namespace-scoped ApplySet")) + } + if !builtinApplySetParentGVRs.Has(a.parentRef.Resource) { + // Determine which custom resource types are allowed as ApplySet parents. + // Optimization: Since this makes requests, we only do this if they aren't using a default type. + permittedCRParents, err := a.getAllowedCustomResourceParents(ctx, client) + if err != nil { + errors = append(errors, fmt.Errorf("identifying allowed custom resource parent types: %w", err)) + } + parentRefResourceIgnoreVersion := a.parentRef.Resource.GroupResource().WithVersion("") + if !permittedCRParents.Has(parentRefResourceIgnoreVersion) { + errors = append(errors, fmt.Errorf("resource %q is not permitted as an ApplySet parent", a.parentRef.Resource)) + } + } + return utilerrors.NewAggregate(errors) +} + +func (a *ApplySet) labelForCustomParentCRDs() *metav1.LabelSelector { + return &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{{ + Key: ApplysetParentCRDLabel, + Operator: metav1.LabelSelectorOpExists, + }}, + } +} + +func (a *ApplySet) getAllowedCustomResourceParents(ctx context.Context, client dynamic.Interface) (sets.Set[schema.GroupVersionResource], error) { + opts := metav1.ListOptions{ + LabelSelector: metav1.FormatLabelSelector(a.labelForCustomParentCRDs()), + } + list, err := client.Resource(schema.GroupVersionResource{ + Group: "apiextensions.k8s.io", + Version: "v1", + Resource: "customresourcedefinitions", + }).List(ctx, opts) + if err != nil { + return nil, err + } + set := sets.New[schema.GroupVersionResource]() + for i := range list.Items { + // Custom resources must be named `.` + // and are served under `/apis///.../` + gr := schema.ParseGroupResource(list.Items[i].GetName()) + set.Insert(gr.WithVersion("")) + } + return set, nil +} + +func (a *ApplySet) LabelsForMember() map[string]string { + return map[string]string{ + ApplysetPartOfLabel: a.ID(), + } +} + +// addLabels sets our tracking labels on each object; this should be called as part of loading the objects. +func (a *ApplySet) AddLabels(objects ...*resource.Info) error { + applysetLabels := a.LabelsForMember() + for _, obj := range objects { + accessor, err := meta.Accessor(obj.Object) + if err != nil { + return fmt.Errorf("getting accessor: %w", err) + } + labels := accessor.GetLabels() + if labels == nil { + labels = make(map[string]string) + } + for k, v := range applysetLabels { + if _, found := labels[k]; found { + return fmt.Errorf("ApplySet label %q already set in input data", k) + } + labels[k] = v + } + accessor.SetLabels(labels) + } + + return nil +} + +func (a *ApplySet) fetchParent() error { + helper := resource.NewHelper(a.client, a.parentRef.RESTMapping) + obj, err := helper.Get(a.parentRef.Namespace, a.parentRef.Name) + if errors.IsNotFound(err) { + if !builtinApplySetParentGVRs.Has(a.parentRef.Resource) { + return fmt.Errorf("custom resource ApplySet parents cannot be created automatically") + } + return nil + } else if err != nil { + return fmt.Errorf("failed to fetch ApplySet parent object %q: %w", a.parentRef, err) + } else if obj == nil { + return fmt.Errorf("failed to fetch ApplySet parent object %q", a.parentRef) + } + + labels, annotations, err := getLabelsAndAnnotations(obj) + if err != nil { + return fmt.Errorf("getting metadata from parent object %q: %w", a.parentRef, err) + } + + toolAnnotation, hasToolAnno := annotations[ApplySetToolingAnnotation] + if !hasToolAnno { + return fmt.Errorf("ApplySet parent object %q already exists and is missing required annotation %q", a.parentRef, ApplySetToolingAnnotation) + } + if managedBy := toolingBaseName(toolAnnotation); managedBy != a.toolingID.Name { + return fmt.Errorf("ApplySet parent object %q already exists and is managed by tooling %q instead of %q", a.parentRef, managedBy, a.toolingID.Name) + } + + idLabel, hasIDLabel := labels[ApplySetParentIDLabel] + if !hasIDLabel { + return fmt.Errorf("ApplySet parent object %q exists and does not have required label %s", a.parentRef, ApplySetParentIDLabel) + } + if idLabel != a.ID() { + return fmt.Errorf("ApplySet parent object %q exists and has incorrect value for label %q (got: %s, want: %s)", a.parentRef, ApplySetParentIDLabel, idLabel, a.ID()) + } + + if a.currentResources, err = parseKindAnnotation(annotations, a.restMapper); err != nil { + // TODO: handle GVRs for now-deleted CRDs + return fmt.Errorf("parsing ApplySet annotation on %q: %w", a.parentRef, err) + } + a.currentNamespaces = parseNamespacesAnnotation(annotations) + if a.parentRef.IsNamespaced() { + a.currentNamespaces.Insert(a.parentRef.Namespace) + } + return nil +} +func (a *ApplySet) LabelSelectorForMembers() string { + return metav1.FormatLabelSelector(&metav1.LabelSelector{ + MatchLabels: a.LabelsForMember(), + }) +} + +// AllPrunableResources returns the list of all resources that should be considered for pruning. +// This is potentially a superset of the resources types that actually contain resources. +func (a *ApplySet) AllPrunableResources() []*kindInfo { + var ret []*kindInfo + for _, m := range a.currentResources { + ret = append(ret, m) + } + return ret +} + +// AllPrunableNamespaces returns the list of all namespaces that should be considered for pruning. +// This is potentially a superset of the namespaces that actually contain resources. +func (a *ApplySet) AllPrunableNamespaces() []string { + var ret []string + for ns := range a.currentNamespaces { + ret = append(ret, ns) + } + return ret +} + +func getLabelsAndAnnotations(obj runtime.Object) (map[string]string, map[string]string, error) { + accessor, err := meta.Accessor(obj) + if err != nil { + return nil, nil, err + } + return accessor.GetLabels(), accessor.GetAnnotations(), nil +} + +func toolingBaseName(toolAnnotation string) string { + parts := strings.Split(toolAnnotation, "/") + if len(parts) >= 2 { + return strings.Join(parts[:len(parts)-1], "/") + } + return toolAnnotation +} + +// kindInfo holds type information about a particular resource type. +type kindInfo struct { + restMapping *meta.RESTMapping +} + +func parseKindAnnotation(annotations map[string]string, mapper meta.RESTMapper) (map[schema.GroupKind]*kindInfo, error) { + annotation, ok := annotations[ApplySetGKsAnnotation] + if !ok { + if annotations[DeprecatedApplySetGRsAnnotation] != "" { + return parseDeprecatedResourceAnnotation(annotations[DeprecatedApplySetGRsAnnotation], mapper) + } + + // The spec does not require this annotation. However, 'missing' means 'perform discovery'. + // We return an error because we do not currently support dynamic discovery in kubectl apply. + return nil, fmt.Errorf("kubectl requires the %q annotation to be set on all ApplySet parent objects", ApplySetGKsAnnotation) + } + mappings := make(map[schema.GroupKind]*kindInfo) + // Annotation present but empty means that this is currently an empty set. + if annotation == "" { + return mappings, nil + } + for _, gkString := range strings.Split(annotation, ",") { + gk := schema.ParseGroupKind(gkString) + restMapping, err := mapper.RESTMapping(gk) + if err != nil { + return nil, fmt.Errorf("could not find mapping for kind in %q annotation: %w", ApplySetGKsAnnotation, err) + } + mappings[gk] = &kindInfo{ + restMapping: restMapping, + } + } + + return mappings, nil +} + +func parseDeprecatedResourceAnnotation(annotation string, mapper meta.RESTMapper) (map[schema.GroupKind]*kindInfo, error) { + mappings := make(map[schema.GroupKind]*kindInfo) + // Annotation present but empty means that this is currently an empty set. + if annotation == "" { + return mappings, nil + } + for _, grString := range strings.Split(annotation, ",") { + gr := schema.ParseGroupResource(grString) + gvk, err := mapper.KindFor(gr.WithVersion("")) + if err != nil { + return nil, fmt.Errorf("invalid group resource in %q annotation: %w", DeprecatedApplySetGRsAnnotation, err) + } + restMapping, err := mapper.RESTMapping(gvk.GroupKind()) + if err != nil { + return nil, fmt.Errorf("could not find kind for resource in %q annotation: %w", DeprecatedApplySetGRsAnnotation, err) + } + mappings[gvk.GroupKind()] = &kindInfo{ + restMapping: restMapping, + } + } + return mappings, nil +} + +func parseNamespacesAnnotation(annotations map[string]string) sets.Set[string] { + annotation, ok := annotations[ApplySetAdditionalNamespacesAnnotation] + if !ok { // this annotation is completely optional + return sets.Set[string]{} + } + // Don't include an empty namespace + if annotation == "" { + return sets.Set[string]{} + } + return sets.New(strings.Split(annotation, ",")...) +} + +// addResource registers the given resource and namespace as being part of the updated set of +// resources being applied by the current operation. +func (a *ApplySet) addResource(restMapping *meta.RESTMapping, namespace string) { + gk := restMapping.GroupVersionKind.GroupKind() + if _, found := a.updatedResources[gk]; !found { + a.updatedResources[gk] = &kindInfo{ + restMapping: restMapping, + } + } + if restMapping.Scope == meta.RESTScopeNamespace && namespace != "" { + a.updatedNamespaces.Insert(namespace) + } +} + +type ApplySetUpdateMode string + +var updateToLatestSet ApplySetUpdateMode = "latest" +var updateToSuperset ApplySetUpdateMode = "superset" + +func (a *ApplySet) updateParent(mode ApplySetUpdateMode, dryRun cmdutil.DryRunStrategy, validation string) error { + data, err := json.Marshal(a.buildParentPatch(mode)) + if err != nil { + return fmt.Errorf("failed to encode patch for ApplySet parent: %w", err) + } + // Note that because we are using SSA, we will remove any annotations we don't specify, + // which is how we remove the deprecated contains-group-resources annotation. + err = serverSideApplyRequest(a, data, dryRun, validation, false) + if err != nil && errors.IsConflict(err) { + // Try again with conflicts forced + klog.Warningf("WARNING: failed to update ApplySet: %s\nApplySet field manager %s should own these fields. Retrying with conflicts forced.", err.Error(), a.FieldManager()) + err = serverSideApplyRequest(a, data, dryRun, validation, true) + } + if err != nil { + return fmt.Errorf("failed to update ApplySet: %w", err) + } + return nil +} + +func serverSideApplyRequest(a *ApplySet, data []byte, dryRun cmdutil.DryRunStrategy, validation string, forceConficts bool) error { + if dryRun == cmdutil.DryRunClient { + return nil + } + helper := resource.NewHelper(a.client, a.parentRef.RESTMapping). + DryRun(dryRun == cmdutil.DryRunServer). + WithFieldManager(a.FieldManager()). + WithFieldValidation(validation) + + options := metav1.PatchOptions{ + Force: &forceConficts, + } + _, err := helper.Patch( + a.parentRef.Namespace, + a.parentRef.Name, + types.ApplyPatchType, + data, + &options, + ) + return err +} + +func (a *ApplySet) buildParentPatch(mode ApplySetUpdateMode) *metav1.PartialObjectMetadata { + var newGKsAnnotation, newNsAnnotation string + switch mode { + case updateToSuperset: + // If the apply succeeded but pruning failed, the set of group resources that + // the ApplySet should track is the superset of the previous and current resources. + // This ensures that the resources that failed to be pruned are not orphaned from the set. + grSuperset := sets.KeySet(a.currentResources).Union(sets.KeySet(a.updatedResources)) + newGKsAnnotation = generateKindsAnnotation(grSuperset) + newNsAnnotation = generateNamespacesAnnotation(a.currentNamespaces.Union(a.updatedNamespaces), a.parentRef.Namespace) + case updateToLatestSet: + newGKsAnnotation = generateKindsAnnotation(sets.KeySet(a.updatedResources)) + newNsAnnotation = generateNamespacesAnnotation(a.updatedNamespaces, a.parentRef.Namespace) + } + + return &metav1.PartialObjectMetadata{ + TypeMeta: metav1.TypeMeta{ + Kind: a.parentRef.GroupVersionKind.Kind, + APIVersion: a.parentRef.GroupVersionKind.GroupVersion().String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: a.parentRef.Name, + Namespace: a.parentRef.Namespace, + Annotations: map[string]string{ + ApplySetToolingAnnotation: a.toolingID.String(), + ApplySetGKsAnnotation: newGKsAnnotation, + ApplySetAdditionalNamespacesAnnotation: newNsAnnotation, + }, + Labels: map[string]string{ + ApplySetParentIDLabel: a.ID(), + }, + }, + } +} + +func generateNamespacesAnnotation(namespaces sets.Set[string], skip string) string { + nsList := namespaces.Clone().Delete(skip).UnsortedList() + sort.Strings(nsList) + return strings.Join(nsList, ",") +} + +func generateKindsAnnotation(resources sets.Set[schema.GroupKind]) string { + var gks []string + for gk := range resources { + gks = append(gks, gk.String()) + } + sort.Strings(gks) + return strings.Join(gks, ",") +} + +func (a ApplySet) FieldManager() string { + return fmt.Sprintf("%s-applyset", a.toolingID.Name) +} + +// ParseApplySetParentRef creates a new ApplySetParentRef from a parent reference in the format [RESOURCE][.GROUP]/NAME +func ParseApplySetParentRef(parentRefStr string, mapper meta.RESTMapper) (*ApplySetParentRef, error) { + var gvr schema.GroupVersionResource + var name string + + if groupRes, nameSuffix, hasTypeInfo := strings.Cut(parentRefStr, "/"); hasTypeInfo { + name = nameSuffix + gvr = schema.ParseGroupResource(groupRes).WithVersion("") + } else { + name = parentRefStr + gvr = defaultApplySetParentGVR + } + + if name == "" { + return nil, fmt.Errorf("name cannot be blank") + } + + gvk, err := mapper.KindFor(gvr) + if err != nil { + return nil, err + } + mapping, err := mapper.RESTMapping(gvk.GroupKind()) + if err != nil { + return nil, err + } + return &ApplySetParentRef{Name: name, RESTMapping: mapping}, nil +} + +// Prune deletes any objects from the apiserver that are no longer in the applyset. +func (a *ApplySet) Prune(ctx context.Context, o *ApplyOptions) error { + printer, err := o.ToPrinter("pruned") + if err != nil { + return err + } + opt := &ApplySetDeleteOptions{ + CascadingStrategy: o.DeleteOptions.CascadingStrategy, + DryRunStrategy: o.DryRunStrategy, + GracePeriod: o.DeleteOptions.GracePeriod, + + Printer: printer, + + IOStreams: o.IOStreams, + } + + if err := a.pruneAll(ctx, o.DynamicClient, o.VisitedUids, opt); err != nil { + return err + } + + if err := a.updateParent(updateToLatestSet, o.DryRunStrategy, o.ValidationDirective); err != nil { + return fmt.Errorf("apply and prune succeeded, but ApplySet update failed: %w", err) + } + + return nil +} + +// BeforeApply should be called before applying the objects. +// It pre-updates the parent object so that it covers the resources that will be applied. +// In this way, even if we are interrupted, we will not leak objects. +func (a *ApplySet) BeforeApply(objects []*resource.Info, dryRunStrategy cmdutil.DryRunStrategy, validationDirective string) error { + if err := a.fetchParent(); err != nil { + return err + } + // Update the live parent object to the superset of the current and previous resources. + // Doing this before the actual apply and prune operations improves behavior by ensuring + // the live object contains the superset on failure. This may cause the next pruning + // operation to make a larger number of GET requests than strictly necessary, but it prevents + // object leakage from the set. The superset will automatically be reduced to the correct + // set by the next successful operation. + for _, info := range objects { + a.addResource(info.ResourceMapping(), info.Namespace) + } + if err := a.updateParent(updateToSuperset, dryRunStrategy, validationDirective); err != nil { + return err + } + return nil +} diff --git a/pkg/kubectl/apply/applyset_pruner.go b/pkg/kubectl/apply/applyset_pruner.go new file mode 100644 index 0000000..3c064af --- /dev/null +++ b/pkg/kubectl/apply/applyset_pruner.go @@ -0,0 +1,195 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apply + +import ( + "context" + "fmt" + "sync" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/cli-runtime/pkg/genericiooptions" + "k8s.io/cli-runtime/pkg/printers" + "k8s.io/client-go/dynamic" + "k8s.io/klog/v2" + cmdutil "k8s.io/kubectl/pkg/cmd/util" +) + +type ApplySetDeleteOptions struct { + CascadingStrategy metav1.DeletionPropagation + DryRunStrategy cmdutil.DryRunStrategy + GracePeriod int + + Printer printers.ResourcePrinter + + IOStreams genericiooptions.IOStreams +} + +// PruneObject is an apiserver object that should be deleted as part of prune. +type PruneObject struct { + Name string + Namespace string + Mapping *meta.RESTMapping + Object runtime.Object +} + +// String returns a human-readable name of the object, for use in debug messages. +func (p *PruneObject) String() string { + s := p.Mapping.GroupVersionKind.GroupKind().String() + + if p.Namespace != "" { + s += " " + p.Namespace + "/" + p.Name + } else { + s += " " + p.Name + } + return s +} + +// FindAllObjectsToPrune returns the list of objects that will be pruned. +// Calling this instead of Prune can be useful for dry-run / diff behaviour. +func (a *ApplySet) FindAllObjectsToPrune(ctx context.Context, dynamicClient dynamic.Interface, visitedUids sets.Set[types.UID]) ([]PruneObject, error) { + type task struct { + namespace string + restMapping *meta.RESTMapping + + err error + results []PruneObject + } + var tasks []*task + + // We run discovery in parallel, in as many goroutines as priority and fairness will allow + // (We don't expect many requests in real-world scenarios - maybe tens, unlikely to be hundreds) + for gvk, resource := range a.AllPrunableResources() { + scope := resource.restMapping.Scope + + switch scope.Name() { + case meta.RESTScopeNameNamespace: + for _, namespace := range a.AllPrunableNamespaces() { + if namespace == "" { + // Just double-check because otherwise we get cryptic error messages + return nil, fmt.Errorf("unexpectedly encountered empty namespace during prune of namespace-scoped resource %v", gvk) + } + tasks = append(tasks, &task{ + namespace: namespace, + restMapping: resource.restMapping, + }) + } + + case meta.RESTScopeNameRoot: + tasks = append(tasks, &task{ + restMapping: resource.restMapping, + }) + + default: + return nil, fmt.Errorf("unhandled scope %q", scope.Name()) + } + } + + var wg sync.WaitGroup + + for i := range tasks { + task := tasks[i] + wg.Add(1) + go func() { + defer wg.Done() + + results, err := a.findObjectsToPrune(ctx, dynamicClient, visitedUids, task.namespace, task.restMapping) + if err != nil { + task.err = fmt.Errorf("listing %v objects for pruning: %w", task.restMapping.GroupVersionKind.String(), err) + } else { + task.results = results + } + }() + } + // Wait for all the goroutines to finish + wg.Wait() + + var allObjects []PruneObject + for _, task := range tasks { + if task.err != nil { + return nil, task.err + } + allObjects = append(allObjects, task.results...) + } + return allObjects, nil +} + +func (a *ApplySet) pruneAll(ctx context.Context, dynamicClient dynamic.Interface, visitedUids sets.Set[types.UID], deleteOptions *ApplySetDeleteOptions) error { + allObjects, err := a.FindAllObjectsToPrune(ctx, dynamicClient, visitedUids) + if err != nil { + return err + } + + return a.deleteObjects(ctx, dynamicClient, allObjects, deleteOptions) +} + +func (a *ApplySet) findObjectsToPrune(ctx context.Context, dynamicClient dynamic.Interface, visitedUids sets.Set[types.UID], namespace string, mapping *meta.RESTMapping) ([]PruneObject, error) { + applysetLabelSelector := a.LabelSelectorForMembers() + + opt := metav1.ListOptions{ + LabelSelector: applysetLabelSelector, + } + + klog.V(2).Infof("listing objects for pruning; namespace=%q, resource=%v", namespace, mapping.Resource) + objects, err := dynamicClient.Resource(mapping.Resource).Namespace(namespace).List(ctx, opt) + if err != nil { + return nil, err + } + + var pruneObjects []PruneObject + for i := range objects.Items { + obj := &objects.Items[i] + + uid := obj.GetUID() + if visitedUids.Has(uid) { + continue + } + name := obj.GetName() + pruneObjects = append(pruneObjects, PruneObject{ + Name: name, + Namespace: namespace, + Mapping: mapping, + Object: obj, + }) + + } + return pruneObjects, nil +} + +func (a *ApplySet) deleteObjects(ctx context.Context, dynamicClient dynamic.Interface, pruneObjects []PruneObject, opt *ApplySetDeleteOptions) error { + for i := range pruneObjects { + pruneObject := &pruneObjects[i] + + name := pruneObject.Name + namespace := pruneObject.Namespace + mapping := pruneObject.Mapping + + if opt.DryRunStrategy != cmdutil.DryRunClient { + if err := runDelete(ctx, namespace, name, mapping, dynamicClient, opt.CascadingStrategy, opt.GracePeriod, opt.DryRunStrategy == cmdutil.DryRunServer); err != nil { + return fmt.Errorf("pruning %v: %w", pruneObject.String(), err) + } + } + + opt.Printer.PrintObj(pruneObject.Object, opt.IOStreams.Out) + + } + return nil +} diff --git a/pkg/kubectl/apply/patcher.go b/pkg/kubectl/apply/patcher.go new file mode 100644 index 0000000..5f2f142 --- /dev/null +++ b/pkg/kubectl/apply/patcher.go @@ -0,0 +1,431 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apply + +import ( + "context" + "encoding/json" + "fmt" + "io" + "time" + + "github.com/pkg/errors" + + "github.com/jonboulle/clockwork" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/jsonmergepatch" + "k8s.io/apimachinery/pkg/util/mergepatch" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/cli-runtime/pkg/resource" + "k8s.io/client-go/openapi3" + "k8s.io/klog/v2" + "k8s.io/kube-openapi/pkg/validation/spec" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/scheme" + "k8s.io/kubectl/pkg/util" + "k8s.io/kubectl/pkg/util/openapi" +) + +const ( + // maxPatchRetry is the maximum number of conflicts retry for during a patch operation before returning failure + maxPatchRetry = 5 + // how many times we can retry before back off + triesBeforeBackOff = 1 + // groupVersionKindExtensionKey is the key used to lookup the + // GroupVersionKind value for an object definition from the + // definition's "extensions" map. + groupVersionKindExtensionKey = "x-kubernetes-group-version-kind" +) + +// patchRetryBackOffPeriod is the period to back off when apply patch results in error. +var patchRetryBackOffPeriod = 1 * time.Second + +var createPatchErrFormat = "creating patch with:\noriginal:\n%s\nmodified:\n%s\ncurrent:\n%s\nfor:" + +// Patcher defines options to patch OpenAPI objects. +type Patcher struct { + Mapping *meta.RESTMapping + Helper *resource.Helper + + Overwrite bool + BackOff clockwork.Clock + + Force bool + CascadingStrategy metav1.DeletionPropagation + Timeout time.Duration + GracePeriod int + + // If set, forces the patch against a specific resourceVersion + ResourceVersion *string + + // Number of retries to make if the patch fails with conflict + Retries int + + OpenAPIGetter openapi.OpenAPIResourcesGetter + OpenAPIV3Root openapi3.Root +} + +func newPatcher(o *ApplyOptions, info *resource.Info, helper *resource.Helper) (*Patcher, error) { + var openAPIGetter openapi.OpenAPIResourcesGetter + var openAPIV3Root openapi3.Root + + if o.OpenAPIPatch { + openAPIGetter = o.OpenAPIGetter + openAPIV3Root = o.OpenAPIV3Root + } + + return &Patcher{ + Mapping: info.Mapping, + Helper: helper, + Overwrite: o.Overwrite, + BackOff: clockwork.NewRealClock(), + Force: o.DeleteOptions.ForceDeletion, + CascadingStrategy: o.DeleteOptions.CascadingStrategy, + Timeout: o.DeleteOptions.Timeout, + GracePeriod: o.DeleteOptions.GracePeriod, + OpenAPIGetter: openAPIGetter, + OpenAPIV3Root: openAPIV3Root, + Retries: maxPatchRetry, + }, nil +} + +func (p *Patcher) delete(namespace, name string) error { + options := asDeleteOptions(p.CascadingStrategy, p.GracePeriod) + _, err := p.Helper.DeleteWithOptions(namespace, name, &options) + return err +} + +func (p *Patcher) patchSimple(obj runtime.Object, modified []byte, namespace, name string, errOut io.Writer) ([]byte, runtime.Object, error) { + // Serialize the current configuration of the object from the server. + current, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj) + if err != nil { + return nil, nil, errors.Wrapf(err, "serializing current configuration from:\n%v\nfor:", obj) + } + + // Retrieve the original configuration of the object from the annotation. + original, err := util.GetOriginalConfiguration(obj) + if err != nil { + return nil, nil, errors.Wrapf(err, "retrieving original configuration from:\n%v\nfor:", obj) + } + + var patchType types.PatchType + var patch []byte + + if p.OpenAPIV3Root != nil { + gvkSupported, err := p.gvkSupportsPatchOpenAPIV3(p.Mapping.GroupVersionKind) + if err != nil { + // Realistically this error logging is not needed (not present in V2), + // but would help us in debugging if users encounter a problem + // with OpenAPI V3 not present in V2. + klog.V(5).Infof("warning: OpenAPI V3 path does not exist - group: %s, version %s, kind %s\n", + p.Mapping.GroupVersionKind.Group, p.Mapping.GroupVersionKind.Version, p.Mapping.GroupVersionKind.Kind) + } else if gvkSupported { + patch, err = p.buildStrategicMergePatchFromOpenAPIV3(original, modified, current) + if err != nil { + // Fall back to OpenAPI V2 if there is a problem + // We should remove the fallback in the future, + // but for the first release it might be beneficial + // to fall back to OpenAPI V2 while logging the error + // and seeing if we get any bug reports. + fmt.Fprintf(errOut, "warning: error calculating patch from openapi v3 spec: %v\n", err) + } else { + patchType = types.StrategicMergePatchType + } + } else { + klog.V(5).Infof("warning: OpenAPI V3 path does not support strategic merge patch - group: %s, version %s, kind %s\n", + p.Mapping.GroupVersionKind.Group, p.Mapping.GroupVersionKind.Version, p.Mapping.GroupVersionKind.Kind) + } + } + + if patch == nil && p.OpenAPIGetter != nil { + if openAPISchema, err := p.OpenAPIGetter.OpenAPISchema(); err == nil && openAPISchema != nil { + // if openapischema is used, we'll try to get required patch type for this GVK from Open API. + // if it fails or could not find any patch type, fall back to baked-in patch type determination. + if patchType, err = p.getPatchTypeFromOpenAPI(openAPISchema, p.Mapping.GroupVersionKind); err == nil && patchType == types.StrategicMergePatchType { + patch, err = p.buildStrategicMergeFromOpenAPI(openAPISchema, original, modified, current) + if err != nil { + // Warn user about problem and continue strategic merge patching using builtin types. + fmt.Fprintf(errOut, "warning: error calculating patch from openapi spec: %v\n", err) + } + } + } + } + + if patch == nil { + versionedObj, err := scheme.Scheme.New(p.Mapping.GroupVersionKind) + if err == nil { + patchType = types.StrategicMergePatchType + patch, err = p.buildStrategicMergeFromBuiltins(versionedObj, original, modified, current) + if err != nil { + return nil, nil, errors.Wrapf(err, createPatchErrFormat, original, modified, current) + } + } else { + if !runtime.IsNotRegisteredError(err) { + return nil, nil, errors.Wrapf(err, "getting instance of versioned object for %v:", p.Mapping.GroupVersionKind) + } + + patchType = types.MergePatchType + patch, err = p.buildMergePatch(original, modified, current) + if err != nil { + return nil, nil, errors.Wrapf(err, createPatchErrFormat, original, modified, current) + } + } + } + + if string(patch) == "{}" { + return patch, obj, nil + } + + if p.ResourceVersion != nil { + patch, err = addResourceVersion(patch, *p.ResourceVersion) + if err != nil { + return nil, nil, errors.Wrap(err, "Failed to insert resourceVersion in patch") + } + } + + patchedObj, err := p.Helper.Patch(namespace, name, patchType, patch, nil) + return patch, patchedObj, err +} + +// buildMergePatch builds patch according to the JSONMergePatch which is used for +// custom resource definitions. +func (p *Patcher) buildMergePatch(original, modified, current []byte) ([]byte, error) { + preconditions := []mergepatch.PreconditionFunc{mergepatch.RequireKeyUnchanged("apiVersion"), + mergepatch.RequireKeyUnchanged("kind"), mergepatch.RequireMetadataKeyUnchanged("name")} + patch, err := jsonmergepatch.CreateThreeWayJSONMergePatch(original, modified, current, preconditions...) + if err != nil { + if mergepatch.IsPreconditionFailed(err) { + return nil, fmt.Errorf("%s", "At least one of apiVersion, kind and name was changed") + } + return nil, err + } + + return patch, nil +} + +// gvkSupportsPatchOpenAPIV3 checks if a particular GVK supports the patch operation. +// It returns an error if the OpenAPI V3 could not be downloaded. +func (p *Patcher) gvkSupportsPatchOpenAPIV3(gvk schema.GroupVersionKind) (bool, error) { + gvSpec, err := p.OpenAPIV3Root.GVSpec(schema.GroupVersion{ + Group: p.Mapping.GroupVersionKind.Group, + Version: p.Mapping.GroupVersionKind.Version, + }) + if err != nil { + return false, err + } + if gvSpec == nil || gvSpec.Paths == nil || gvSpec.Paths.Paths == nil { + return false, fmt.Errorf("gvk group: %s, version: %s, kind: %s does not exist for OpenAPI V3", gvk.Group, gvk.Version, gvk.Kind) + } + for _, path := range gvSpec.Paths.Paths { + if path.Patch != nil { + if gvkMatchesSingle(p.Mapping.GroupVersionKind, path.Patch.Extensions) { + if path.Patch.RequestBody == nil || path.Patch.RequestBody.Content == nil { + // GVK exists but does not support requestBody. Indication of malformed OpenAPI. + return false, nil + } + if _, ok := path.Patch.RequestBody.Content["application/strategic-merge-patch+json"]; ok { + return true, nil + } + // GVK exists but strategic-merge-patch is not supported. Likely to be a CRD or aggregated resource. + return false, nil + } + } + } + return false, nil +} + +func gvkMatchesArray(targetGVK schema.GroupVersionKind, ext spec.Extensions) bool { + var gvkList []map[string]string + err := ext.GetObject(groupVersionKindExtensionKey, &gvkList) + if err != nil { + return false + } + for _, gvkMap := range gvkList { + if gvkMap["group"] == targetGVK.Group && + gvkMap["version"] == targetGVK.Version && + gvkMap["kind"] == targetGVK.Kind { + return true + } + } + return false +} + +func gvkMatchesSingle(targetGVK schema.GroupVersionKind, ext spec.Extensions) bool { + var gvkMap map[string]string + err := ext.GetObject(groupVersionKindExtensionKey, &gvkMap) + if err != nil { + return false + } + return gvkMap["group"] == targetGVK.Group && + gvkMap["version"] == targetGVK.Version && + gvkMap["kind"] == targetGVK.Kind +} + +func (p *Patcher) buildStrategicMergePatchFromOpenAPIV3(original, modified, current []byte) ([]byte, error) { + gvSpec, err := p.OpenAPIV3Root.GVSpec(schema.GroupVersion{ + Group: p.Mapping.GroupVersionKind.Group, + Version: p.Mapping.GroupVersionKind.Version, + }) + if err != nil { + return nil, err + } + if gvSpec == nil || gvSpec.Components == nil { + return nil, fmt.Errorf("OpenAPI V3 Components is nil") + } + for _, c := range gvSpec.Components.Schemas { + if !gvkMatchesArray(p.Mapping.GroupVersionKind, c.Extensions) { + continue + } + lookupPatchMeta := strategicpatch.PatchMetaFromOpenAPIV3{Schema: c, SchemaList: gvSpec.Components.Schemas} + if openapiv3Patch, err := strategicpatch.CreateThreeWayMergePatch(original, modified, current, lookupPatchMeta, p.Overwrite); err != nil { + return nil, err + } else { + return openapiv3Patch, nil + } + + } + return nil, nil +} + +// buildStrategicMergeFromOpenAPI builds patch from OpenAPI if it is enabled. +// This is used for core types which is published in openapi. +func (p *Patcher) buildStrategicMergeFromOpenAPI(openAPISchema openapi.Resources, original, modified, current []byte) ([]byte, error) { + schema := openAPISchema.LookupResource(p.Mapping.GroupVersionKind) + if schema == nil { + // Missing schema returns nil patch; also no error. + return nil, nil + } + lookupPatchMeta := strategicpatch.PatchMetaFromOpenAPI{Schema: schema} + if openapiPatch, err := strategicpatch.CreateThreeWayMergePatch(original, modified, current, lookupPatchMeta, p.Overwrite); err != nil { + return nil, err + } else { + return openapiPatch, nil + } +} + +// getPatchTypeFromOpenAPI looks up patch types supported by given GroupVersionKind in Open API. +func (p *Patcher) getPatchTypeFromOpenAPI(openAPISchema openapi.Resources, gvk schema.GroupVersionKind) (types.PatchType, error) { + if pc := openAPISchema.GetConsumes(p.Mapping.GroupVersionKind, "PATCH"); pc != nil { + for _, c := range pc { + if c == string(types.StrategicMergePatchType) { + return types.StrategicMergePatchType, nil + } + } + + return types.MergePatchType, nil + } + + return types.MergePatchType, fmt.Errorf("unable to find any patch type for %s in Open API", gvk) +} + +// buildStrategicMergeFromStruct builds patch from struct. This is used when +// openapi endpoint is not working or user disables it by setting openapi-patch flag +// to false. +func (p *Patcher) buildStrategicMergeFromBuiltins(versionedObj runtime.Object, original, modified, current []byte) ([]byte, error) { + lookupPatchMeta, err := strategicpatch.NewPatchMetaFromStruct(versionedObj) + if err != nil { + return nil, err + } + patch, err := strategicpatch.CreateThreeWayMergePatch(original, modified, current, lookupPatchMeta, p.Overwrite) + if err != nil { + return nil, err + } + + return patch, nil +} + +// Patch tries to patch an OpenAPI resource. On success, returns the merge patch as well +// the final patched object. On failure, returns an error. +func (p *Patcher) Patch(current runtime.Object, modified []byte, source, namespace, name string, errOut io.Writer) ([]byte, runtime.Object, error) { + var getErr error + patchBytes, patchObject, err := p.patchSimple(current, modified, namespace, name, errOut) + if p.Retries == 0 { + p.Retries = maxPatchRetry + } + for i := 1; i <= p.Retries && apierrors.IsConflict(err); i++ { + if i > triesBeforeBackOff { + p.BackOff.Sleep(patchRetryBackOffPeriod) + } + current, getErr = p.Helper.Get(namespace, name) + if getErr != nil { + return nil, nil, getErr + } + patchBytes, patchObject, err = p.patchSimple(current, modified, namespace, name, errOut) + } + if err != nil { + if (apierrors.IsConflict(err) || apierrors.IsInvalid(err)) && p.Force { + patchBytes, patchObject, err = p.deleteAndCreate(current, modified, namespace, name) + } else { + err = cmdutil.AddSourceToErr("patching", source, err) + } + } + return patchBytes, patchObject, err +} + +func (p *Patcher) deleteAndCreate(original runtime.Object, modified []byte, namespace, name string) ([]byte, runtime.Object, error) { + if err := p.delete(namespace, name); err != nil { + return modified, nil, err + } + // TODO: use wait + if err := wait.PollUntilContextTimeout(context.Background(), 1*time.Second, p.Timeout, true, func(ctx context.Context) (bool, error) { + if _, err := p.Helper.Get(namespace, name); !apierrors.IsNotFound(err) { + return false, err + } + return true, nil + }); err != nil { + return modified, nil, err + } + versionedObject, _, err := unstructured.UnstructuredJSONScheme.Decode(modified, nil, nil) + if err != nil { + return modified, nil, err + } + createdObject, err := p.Helper.Create(namespace, true, versionedObject) + if err != nil { + // restore the original object if we fail to create the new one + // but still propagate and advertise error to user + recreated, recreateErr := p.Helper.Create(namespace, true, original) + if recreateErr != nil { + err = fmt.Errorf("An error occurred force-replacing the existing object with the newly provided one:\n\n%v.\n\nAdditionally, an error occurred attempting to restore the original object:\n\n%v", err, recreateErr) + } else { + createdObject = recreated + } + } + return modified, createdObject, err +} + +func addResourceVersion(patch []byte, rv string) ([]byte, error) { + var patchMap map[string]interface{} + err := json.Unmarshal(patch, &patchMap) + if err != nil { + return nil, err + } + u := unstructured.Unstructured{Object: patchMap} + a, err := meta.Accessor(&u) + if err != nil { + return nil, err + } + a.SetResourceVersion(rv) + + return json.Marshal(patchMap) +} diff --git a/pkg/kubectl/apply/prune.go b/pkg/kubectl/apply/prune.go new file mode 100644 index 0000000..98ac19d --- /dev/null +++ b/pkg/kubectl/apply/prune.go @@ -0,0 +1,162 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apply + +import ( + "context" + "fmt" + "io" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/cli-runtime/pkg/printers" + "k8s.io/client-go/dynamic" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/util/prune" +) + +type pruner struct { + mapper meta.RESTMapper + dynamicClient dynamic.Interface + + visitedUids sets.Set[types.UID] + visitedNamespaces sets.Set[string] + labelSelector string + fieldSelector string + + cascadingStrategy metav1.DeletionPropagation + dryRunStrategy cmdutil.DryRunStrategy + gracePeriod int + + toPrinter func(string) (printers.ResourcePrinter, error) + + out io.Writer +} + +func newPruner(o *ApplyOptions) pruner { + return pruner{ + mapper: o.Mapper, + dynamicClient: o.DynamicClient, + + labelSelector: o.Selector, + visitedUids: o.VisitedUids, + visitedNamespaces: o.VisitedNamespaces, + + cascadingStrategy: o.DeleteOptions.CascadingStrategy, + dryRunStrategy: o.DryRunStrategy, + gracePeriod: o.DeleteOptions.GracePeriod, + + toPrinter: o.ToPrinter, + + out: o.Out, + } +} + +func (p *pruner) pruneAll(o *ApplyOptions) error { + + namespacedRESTMappings, nonNamespacedRESTMappings, err := prune.GetRESTMappings(o.Mapper, o.PruneResources, o.Namespace != "") + if err != nil { + return fmt.Errorf("error retrieving RESTMappings to prune: %v", err) + } + + for n := range p.visitedNamespaces { + for _, m := range namespacedRESTMappings { + if err := p.prune(n, m); err != nil { + return fmt.Errorf("error pruning namespaced object %v: %v", m.GroupVersionKind, err) + } + } + } + + for _, m := range nonNamespacedRESTMappings { + if err := p.prune(metav1.NamespaceNone, m); err != nil { + return fmt.Errorf("error pruning nonNamespaced object %v: %v", m.GroupVersionKind, err) + } + } + + return nil +} + +func (p *pruner) prune(namespace string, mapping *meta.RESTMapping) error { + objList, err := p.dynamicClient.Resource(mapping.Resource). + Namespace(namespace). + List(context.TODO(), metav1.ListOptions{ + LabelSelector: p.labelSelector, + FieldSelector: p.fieldSelector, + }) + if err != nil { + return err + } + + objs, err := meta.ExtractList(objList) + if err != nil { + return err + } + + for _, obj := range objs { + metadata, err := meta.Accessor(obj) + if err != nil { + return err + } + annots := metadata.GetAnnotations() + if _, ok := annots[corev1.LastAppliedConfigAnnotation]; !ok { + // don't prune resources not created with apply + continue + } + uid := metadata.GetUID() + if p.visitedUids.Has(uid) { + continue + } + name := metadata.GetName() + if p.dryRunStrategy != cmdutil.DryRunClient { + if err := p.delete(namespace, name, mapping); err != nil { + return err + } + } + + printer, err := p.toPrinter("pruned") + if err != nil { + return err + } + printer.PrintObj(obj, p.out) + } + return nil +} + +func (p *pruner) delete(namespace, name string, mapping *meta.RESTMapping) error { + ctx := context.TODO() + return runDelete(ctx, namespace, name, mapping, p.dynamicClient, p.cascadingStrategy, p.gracePeriod, p.dryRunStrategy == cmdutil.DryRunServer) +} + +func runDelete(ctx context.Context, namespace, name string, mapping *meta.RESTMapping, c dynamic.Interface, cascadingStrategy metav1.DeletionPropagation, gracePeriod int, serverDryRun bool) error { + options := asDeleteOptions(cascadingStrategy, gracePeriod) + if serverDryRun { + options.DryRun = []string{metav1.DryRunAll} + } + return c.Resource(mapping.Resource).Namespace(namespace).Delete(ctx, name, options) +} + +func asDeleteOptions(cascadingStrategy metav1.DeletionPropagation, gracePeriod int) metav1.DeleteOptions { + options := metav1.DeleteOptions{} + if gracePeriod >= 0 { + options = *metav1.NewDeleteOptions(int64(gracePeriod)) + } + options.PropagationPolicy = &cascadingStrategy + return options +} diff --git a/pkg/kubectl/cli-runtime/resource/builder.go b/pkg/kubectl/cli-runtime/resource/builder.go new file mode 100644 index 0000000..47ec83b --- /dev/null +++ b/pkg/kubectl/cli-runtime/resource/builder.go @@ -0,0 +1,1259 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "errors" + "fmt" + "io" + "net/url" + "os" + "path/filepath" + "strings" + "sync" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructuredscheme" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/discovery" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" + "sigs.k8s.io/kustomize/kyaml/filesys" +) + +var FileExtensions = []string{".json", ".yaml", ".yml"} +var InputExtensions = append(FileExtensions, "stdin") + +const defaultHttpGetAttempts = 3 +const pathNotExistError = "the path %q does not exist" + +// Builder provides convenience functions for taking arguments and parameters +// from the command line and converting them to a list of resources to iterate +// over using the Visitor interface. +type Builder struct { + categoryExpanderFn CategoryExpanderFunc + + // mapper is set explicitly by resource builders + mapper *mapper + + // clientConfigFn is a function to produce a client, *if* you need one + clientConfigFn ClientConfigFunc + + restMapperFn RESTMapperFunc + + // objectTyper is statically determinant per-command invocation based on your internal or unstructured choice + // it does not ever need to rely upon discovery. + objectTyper runtime.ObjectTyper + + // codecFactory describes which codecs you want to use + negotiatedSerializer runtime.NegotiatedSerializer + + // local indicates that we cannot make server calls + local bool + + errs []error + + paths []Visitor + stream bool + stdinInUse bool + dir bool + + visitorConcurrency int + + labelSelector *string + fieldSelector *string + selectAll bool + limitChunks int64 + requestTransforms []RequestTransform + + resources []string + subresource string + + namespace string + allNamespace bool + names []string + + resourceTuples []resourceTuple + + defaultNamespace bool + requireNamespace bool + + flatten bool + latest bool + + requireObject bool + + singleResourceType bool + continueOnError bool + + singleItemImplied bool + + schema ContentValidator + + // fakeClientFn is used for testing + fakeClientFn FakeClientFunc +} + +var missingResourceError = fmt.Errorf(`You must provide one or more resources by argument or filename. +Example resource specifications include: + '-f rsrc.yaml' + '--filename=rsrc.json' + ' ' + ''`) + +var LocalResourceError = errors.New(`error: you must specify resources by --filename when --local is set. +Example resource specifications include: + '-f rsrc.yaml' + '--filename=rsrc.json'`) + +var StdinMultiUseError = errors.New("standard input cannot be used for multiple arguments") + +// TODO: expand this to include other errors. +func IsUsageError(err error) bool { + if err == nil { + return false + } + return err == missingResourceError +} + +type FilenameOptions struct { + Filenames []string + Kustomize string + Recursive bool +} + +func (o *FilenameOptions) validate() []error { + var errs []error + if len(o.Filenames) > 0 && len(o.Kustomize) > 0 { + errs = append(errs, fmt.Errorf("only one of -f or -k can be specified")) + } + if len(o.Kustomize) > 0 && o.Recursive { + errs = append(errs, fmt.Errorf("the -k flag can't be used with -f or -R")) + } + return errs +} + +func (o *FilenameOptions) RequireFilenameOrKustomize() error { + if len(o.Filenames) == 0 && len(o.Kustomize) == 0 { + return fmt.Errorf("must specify one of -f and -k") + } + return nil +} + +type resourceTuple struct { + Resource string + Name string +} + +type FakeClientFunc func(version schema.GroupVersion) (RESTClient, error) + +func NewFakeBuilder(fakeClientFn FakeClientFunc, restMapper RESTMapperFunc, categoryExpander CategoryExpanderFunc) *Builder { + ret := newBuilder(nil, restMapper, categoryExpander) + ret.fakeClientFn = fakeClientFn + return ret +} + +// NewBuilder creates a builder that operates on generic objects. At least one of +// internal or unstructured must be specified. +// TODO: Add versioned client (although versioned is still lossy) +// TODO remove internal and unstructured mapper and instead have them set the negotiated serializer for use in the client +func newBuilder(clientConfigFn ClientConfigFunc, restMapper RESTMapperFunc, categoryExpander CategoryExpanderFunc) *Builder { + return &Builder{ + clientConfigFn: clientConfigFn, + restMapperFn: restMapper, + categoryExpanderFn: categoryExpander, + requireObject: true, + } +} + +// noopClientGetter implements RESTClientGetter returning only errors. +// used as a dummy getter in a local-only builder. +type noopClientGetter struct{} + +func (noopClientGetter) ToRESTConfig() (*rest.Config, error) { + return nil, fmt.Errorf("local operation only") +} +func (noopClientGetter) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) { + return nil, fmt.Errorf("local operation only") +} +func (noopClientGetter) ToRESTMapper() (meta.RESTMapper, error) { + return nil, fmt.Errorf("local operation only") +} + +// NewLocalBuilder returns a builder that is configured not to create REST clients and avoids asking the server for results. +func NewLocalBuilder() *Builder { + return NewBuilder(noopClientGetter{}).Local() +} + +func NewBuilder(restClientGetter RESTClientGetter) *Builder { + categoryExpanderFn := func() (restmapper.CategoryExpander, error) { + discoveryClient, err := restClientGetter.ToDiscoveryClient() + if err != nil { + return nil, err + } + return restmapper.NewDiscoveryCategoryExpander(discoveryClient), err + } + + return newBuilder( + restClientGetter.ToRESTConfig, + restClientGetter.ToRESTMapper, + (&cachingCategoryExpanderFunc{delegate: categoryExpanderFn}).ToCategoryExpander, + ) +} + +func (b *Builder) Schema(schema ContentValidator) *Builder { + b.schema = schema + return b +} + +func (b *Builder) AddError(err error) *Builder { + if err == nil { + return b + } + b.errs = append(b.errs, err) + return b +} + +// VisitorConcurrency sets the number of concurrent visitors to use when +// visiting lists. +func (b *Builder) VisitorConcurrency(concurrency int) *Builder { + b.visitorConcurrency = concurrency + return b +} + +// FilenameParam groups input in two categories: URLs and files (files, directories, STDIN) +// If enforceNamespace is false, namespaces in the specs will be allowed to +// override the default namespace. If it is true, namespaces that don't match +// will cause an error. +// If ContinueOnError() is set prior to this method, objects on the path that are not +// recognized will be ignored (but logged at V(2)). +func (b *Builder) FilenameParam(enforceNamespace bool, filenameOptions *FilenameOptions) *Builder { + if errs := filenameOptions.validate(); len(errs) > 0 { + b.errs = append(b.errs, errs...) + return b + } + recursive := filenameOptions.Recursive + paths := filenameOptions.Filenames + for _, s := range paths { + switch { + case s == "-": + b.Stdin() + case strings.Index(s, "http://") == 0 || strings.Index(s, "https://") == 0: + url, err := url.Parse(s) + if err != nil { + b.errs = append(b.errs, fmt.Errorf("the URL passed to filename %q is not valid: %v", s, err)) + continue + } + b.URL(defaultHttpGetAttempts, url) + default: + matches, err := expandIfFilePattern(s) + if err != nil { + b.errs = append(b.errs, err) + continue + } + if !recursive && len(matches) == 1 { + b.singleItemImplied = true + } + b.Path(recursive, matches...) + } + } + if filenameOptions.Kustomize != "" { + b.paths = append( + b.paths, + &KustomizeVisitor{ + mapper: b.mapper, + dirPath: filenameOptions.Kustomize, + schema: b.schema, + fSys: filesys.MakeFsOnDisk(), + }) + } + + if enforceNamespace { + b.RequireNamespace() + } + + return b +} + +// Unstructured updates the builder so that it will request and send unstructured +// objects. Unstructured objects preserve all fields sent by the server in a map format +// based on the object's JSON structure which means no data is lost when the client +// reads and then writes an object. Use this mode in preference to Internal unless you +// are working with Go types directly. +func (b *Builder) Unstructured() *Builder { + if b.mapper != nil { + b.errs = append(b.errs, fmt.Errorf("another mapper was already selected, cannot use unstructured types")) + return b + } + b.objectTyper = unstructuredscheme.NewUnstructuredObjectTyper() + b.mapper = &mapper{ + localFn: b.isLocal, + restMapperFn: b.restMapperFn, + clientFn: b.getClient, + decoder: &metadataValidatingDecoder{unstructured.UnstructuredJSONScheme}, + } + + return b +} + +// WithScheme uses the scheme to manage typing, conversion (optional), and decoding. If decodingVersions +// is empty, then you can end up with internal types. You have been warned. +func (b *Builder) WithScheme(scheme *runtime.Scheme, decodingVersions ...schema.GroupVersion) *Builder { + if b.mapper != nil { + b.errs = append(b.errs, fmt.Errorf("another mapper was already selected, cannot use internal types")) + return b + } + b.objectTyper = scheme + codecFactory := serializer.NewCodecFactory(scheme) + negotiatedSerializer := runtime.NegotiatedSerializer(codecFactory) + // if you specified versions, you're specifying a desire for external types, which you don't want to round-trip through + // internal types + if len(decodingVersions) > 0 { + negotiatedSerializer = codecFactory.WithoutConversion() + } + b.negotiatedSerializer = negotiatedSerializer + + b.mapper = &mapper{ + localFn: b.isLocal, + restMapperFn: b.restMapperFn, + clientFn: b.getClient, + decoder: codecFactory.UniversalDecoder(decodingVersions...), + } + + return b +} + +// LocalParam calls Local() if local is true. +func (b *Builder) LocalParam(local bool) *Builder { + if local { + b.Local() + } + return b +} + +// Local will avoid asking the server for results. +func (b *Builder) Local() *Builder { + b.local = true + return b +} + +func (b *Builder) isLocal() bool { + return b.local +} + +// Mapper returns a copy of the current mapper. +func (b *Builder) Mapper() *mapper { + mapper := *b.mapper + return &mapper +} + +// URL accepts a number of URLs directly. +func (b *Builder) URL(httpAttemptCount int, urls ...*url.URL) *Builder { + for _, u := range urls { + b.paths = append(b.paths, &URLVisitor{ + URL: u, + StreamVisitor: NewStreamVisitor(nil, b.mapper, u.String(), b.schema), + HttpAttemptCount: httpAttemptCount, + }) + } + return b +} + +// Stdin will read objects from the standard input. If ContinueOnError() is set +// prior to this method being called, objects in the stream that are unrecognized +// will be ignored (but logged at V(2)). If StdinInUse() is set prior to this method +// being called, an error will be recorded as there are multiple entities trying to use +// the single standard input stream. +func (b *Builder) Stdin() *Builder { + b.stream = true + if b.stdinInUse { + b.errs = append(b.errs, StdinMultiUseError) + } + b.stdinInUse = true + b.paths = append(b.paths, FileVisitorForSTDIN(b.mapper, b.schema)) + return b +} + +// StdinInUse will mark standard input as in use by this Builder, and therefore standard +// input should not be used by another entity. If Stdin() is set prior to this method +// being called, an error will be recorded as there are multiple entities trying to use +// the single standard input stream. +func (b *Builder) StdinInUse() *Builder { + if b.stdinInUse { + b.errs = append(b.errs, StdinMultiUseError) + } + b.stdinInUse = true + return b +} + +// Stream will read objects from the provided reader, and if an error occurs will +// include the name string in the error message. If ContinueOnError() is set +// prior to this method being called, objects in the stream that are unrecognized +// will be ignored (but logged at V(2)). +func (b *Builder) Stream(r io.Reader, name string) *Builder { + b.stream = true + b.paths = append(b.paths, NewStreamVisitor(r, b.mapper, name, b.schema)) + return b +} + +// Path accepts a set of paths that may be files, directories (all can containing +// one or more resources). Creates a FileVisitor for each file and then each +// FileVisitor is streaming the content to a StreamVisitor. If ContinueOnError() is set +// prior to this method being called, objects on the path that are unrecognized will be +// ignored (but logged at V(2)). +func (b *Builder) Path(recursive bool, paths ...string) *Builder { + for _, p := range paths { + _, err := os.Stat(p) + if os.IsNotExist(err) { + b.errs = append(b.errs, fmt.Errorf(pathNotExistError, p)) + continue + } + if err != nil { + b.errs = append(b.errs, fmt.Errorf("the path %q cannot be accessed: %v", p, err)) + continue + } + + visitors, err := ExpandPathsToFileVisitors(b.mapper, p, recursive, FileExtensions, b.schema) + if err != nil { + b.errs = append(b.errs, fmt.Errorf("error reading %q: %v", p, err)) + } + if len(visitors) > 1 { + b.dir = true + } + + b.paths = append(b.paths, visitors...) + } + if len(b.paths) == 0 && len(b.errs) == 0 { + b.errs = append(b.errs, fmt.Errorf("error reading %v: recognized file extensions are %v", paths, FileExtensions)) + } + return b +} + +// ResourceTypes is a list of types of resources to operate on, when listing objects on +// the server or retrieving objects that match a selector. +func (b *Builder) ResourceTypes(types ...string) *Builder { + b.resources = append(b.resources, types...) + return b +} + +// ResourceNames accepts a default type and one or more names, and creates tuples of +// resources +func (b *Builder) ResourceNames(resource string, names ...string) *Builder { + for _, name := range names { + // See if this input string is of type/name format + tuple, ok, err := splitResourceTypeName(name) + if err != nil { + b.errs = append(b.errs, err) + return b + } + + if ok { + b.resourceTuples = append(b.resourceTuples, tuple) + continue + } + if len(resource) == 0 { + b.errs = append(b.errs, fmt.Errorf("the argument %q must be RESOURCE/NAME", name)) + continue + } + + // Use the given default type to create a resource tuple + b.resourceTuples = append(b.resourceTuples, resourceTuple{Resource: resource, Name: name}) + } + return b +} + +// LabelSelectorParam defines a selector that should be applied to the object types to load. +// This will not affect files loaded from disk or URL. If the parameter is empty it is +// a no-op - to select all resources invoke `b.LabelSelector(labels.Everything.String)`. +func (b *Builder) LabelSelectorParam(s string) *Builder { + selector := strings.TrimSpace(s) + if len(selector) == 0 { + return b + } + if b.selectAll { + b.errs = append(b.errs, fmt.Errorf("found non-empty label selector %q with previously set 'all' parameter. ", s)) + return b + } + return b.LabelSelector(selector) +} + +// LabelSelector accepts a selector directly and will filter the resulting list by that object. +// Use LabelSelectorParam instead for user input. +func (b *Builder) LabelSelector(selector string) *Builder { + if len(selector) == 0 { + return b + } + + b.labelSelector = &selector + return b +} + +// FieldSelectorParam defines a selector that should be applied to the object types to load. +// This will not affect files loaded from disk or URL. If the parameter is empty it is +// a no-op - to select all resources. +func (b *Builder) FieldSelectorParam(s string) *Builder { + s = strings.TrimSpace(s) + if len(s) == 0 { + return b + } + if b.selectAll { + b.errs = append(b.errs, fmt.Errorf("found non-empty field selector %q with previously set 'all' parameter. ", s)) + return b + } + b.fieldSelector = &s + return b +} + +// NamespaceParam accepts the namespace that these resources should be +// considered under from - used by DefaultNamespace() and RequireNamespace() +func (b *Builder) NamespaceParam(namespace string) *Builder { + b.namespace = namespace + return b +} + +// DefaultNamespace instructs the builder to set the namespace value for any object found +// to NamespaceParam() if empty. +func (b *Builder) DefaultNamespace() *Builder { + b.defaultNamespace = true + return b +} + +// AllNamespaces instructs the builder to metav1.NamespaceAll as a namespace to request resources +// across all of the namespace. This overrides the namespace set by NamespaceParam(). +func (b *Builder) AllNamespaces(allNamespace bool) *Builder { + if allNamespace { + b.namespace = metav1.NamespaceAll + } + b.allNamespace = allNamespace + return b +} + +// RequireNamespace instructs the builder to set the namespace value for any object found +// to NamespaceParam() if empty, and if the value on the resource does not match +// NamespaceParam() an error will be returned. +func (b *Builder) RequireNamespace() *Builder { + b.requireNamespace = true + return b +} + +// RequestChunksOf attempts to load responses from the server in batches of size limit +// to avoid long delays loading and transferring very large lists. If unset defaults to +// no chunking. +func (b *Builder) RequestChunksOf(chunkSize int64) *Builder { + b.limitChunks = chunkSize + return b +} + +// TransformRequests alters API calls made by clients requested from this builder. Pass +// an empty list to clear modifiers. +func (b *Builder) TransformRequests(opts ...RequestTransform) *Builder { + b.requestTransforms = opts + return b +} + +// Subresource instructs the builder to retrieve the object at the +// subresource path instead of the main resource path. +func (b *Builder) Subresource(subresource string) *Builder { + b.subresource = subresource + return b +} + +// SelectEverythingParam +func (b *Builder) SelectAllParam(selectAll bool) *Builder { + if selectAll && (b.labelSelector != nil || b.fieldSelector != nil) { + b.errs = append(b.errs, fmt.Errorf("setting 'all' parameter but found a non empty selector. ")) + return b + } + b.selectAll = selectAll + return b +} + +// ResourceTypeOrNameArgs indicates that the builder should accept arguments +// of the form `([,,...]| [,,...])`. When one argument is +// received, the types provided will be retrieved from the server (and be comma delimited). +// When two or more arguments are received, they must be a single type and resource name(s). +// The allowEmptySelector permits to select all the resources (via Everything func). +func (b *Builder) ResourceTypeOrNameArgs(allowEmptySelector bool, args ...string) *Builder { + args = normalizeMultipleResourcesArgs(args) + if ok, err := hasCombinedTypeArgs(args); ok { + if err != nil { + b.errs = append(b.errs, err) + return b + } + for _, s := range args { + tuple, ok, err := splitResourceTypeName(s) + if err != nil { + b.errs = append(b.errs, err) + return b + } + if ok { + b.resourceTuples = append(b.resourceTuples, tuple) + } + } + return b + } + if len(args) > 0 { + // Try replacing aliases only in types + args[0] = b.ReplaceAliases(args[0]) + } + switch { + case len(args) > 2: + b.names = append(b.names, args[1:]...) + b.ResourceTypes(SplitResourceArgument(args[0])...) + case len(args) == 2: + b.names = append(b.names, args[1]) + b.ResourceTypes(SplitResourceArgument(args[0])...) + case len(args) == 1: + b.ResourceTypes(SplitResourceArgument(args[0])...) + if b.labelSelector == nil && allowEmptySelector { + selector := labels.Everything().String() + b.labelSelector = &selector + } + case len(args) == 0: + default: + b.errs = append(b.errs, fmt.Errorf("arguments must consist of a resource or a resource and name")) + } + return b +} + +// ReplaceAliases accepts an argument and tries to expand any existing +// aliases found in it +func (b *Builder) ReplaceAliases(input string) string { + replaced := []string{} + for _, arg := range strings.Split(input, ",") { + if b.categoryExpanderFn == nil { + continue + } + categoryExpander, err := b.categoryExpanderFn() + if err != nil { + b.AddError(err) + continue + } + + if resources, ok := categoryExpander.Expand(arg); ok { + asStrings := []string{} + for _, resource := range resources { + if len(resource.Group) == 0 { + asStrings = append(asStrings, resource.Resource) + continue + } + asStrings = append(asStrings, resource.Resource+"."+resource.Group) + } + arg = strings.Join(asStrings, ",") + } + replaced = append(replaced, arg) + } + return strings.Join(replaced, ",") +} + +func hasCombinedTypeArgs(args []string) (bool, error) { + hasSlash := 0 + for _, s := range args { + if strings.Contains(s, "/") { + hasSlash++ + } + } + switch { + case hasSlash > 0 && hasSlash == len(args): + return true, nil + case hasSlash > 0 && hasSlash != len(args): + baseCmd := "cmd" + if len(os.Args) > 0 { + baseCmdSlice := strings.Split(os.Args[0], "/") + baseCmd = baseCmdSlice[len(baseCmdSlice)-1] + } + return true, fmt.Errorf("there is no need to specify a resource type as a separate argument when passing arguments in resource/name form (e.g. '%s get resource/' instead of '%s get resource resource/'", baseCmd, baseCmd) + default: + return false, nil + } +} + +// Normalize args convert multiple resources to resource tuples, a,b,c d +// as a transform to a/d b/d c/d +func normalizeMultipleResourcesArgs(args []string) []string { + if len(args) >= 2 { + resources := []string{} + resources = append(resources, SplitResourceArgument(args[0])...) + if len(resources) > 1 { + names := []string{} + names = append(names, args[1:]...) + newArgs := []string{} + for _, resource := range resources { + for _, name := range names { + newArgs = append(newArgs, strings.Join([]string{resource, name}, "/")) + } + } + return newArgs + } + } + return args +} + +// splitResourceTypeName handles type/name resource formats and returns a resource tuple +// (empty or not), whether it successfully found one, and an error +func splitResourceTypeName(s string) (resourceTuple, bool, error) { + if !strings.Contains(s, "/") { + return resourceTuple{}, false, nil + } + seg := strings.Split(s, "/") + if len(seg) != 2 { + return resourceTuple{}, false, fmt.Errorf("arguments in resource/name form may not have more than one slash") + } + resource, name := seg[0], seg[1] + if len(resource) == 0 || len(name) == 0 || len(SplitResourceArgument(resource)) != 1 { + return resourceTuple{}, false, fmt.Errorf("arguments in resource/name form must have a single resource and name") + } + return resourceTuple{Resource: resource, Name: name}, true, nil +} + +// Flatten will convert any objects with a field named "Items" that is an array of runtime.Object +// compatible types into individual entries and give them their own items. The original object +// is not passed to any visitors. +func (b *Builder) Flatten() *Builder { + b.flatten = true + return b +} + +// Latest will fetch the latest copy of any objects loaded from URLs or files from the server. +func (b *Builder) Latest() *Builder { + b.latest = true + return b +} + +// RequireObject ensures that resulting infos have an object set. If false, resulting info may not have an object set. +func (b *Builder) RequireObject(require bool) *Builder { + b.requireObject = require + return b +} + +// ContinueOnError will attempt to load and visit as many objects as possible, even if some visits +// return errors or some objects cannot be loaded. The default behavior is to terminate after +// the first error is returned from a VisitorFunc. +func (b *Builder) ContinueOnError() *Builder { + b.continueOnError = true + return b +} + +// SingleResourceType will cause the builder to error if the user specifies more than a single type +// of resource. +func (b *Builder) SingleResourceType() *Builder { + b.singleResourceType = true + return b +} + +// mappingFor returns the RESTMapping for the Kind given, or the Kind referenced by the resource. +// Prefers a fully specified GroupVersionResource match. If one is not found, we match on a fully +// specified GroupVersionKind, or fallback to a match on GroupKind. +func (b *Builder) mappingFor(resourceOrKindArg string) (*meta.RESTMapping, error) { + fullySpecifiedGVR, groupResource := schema.ParseResourceArg(resourceOrKindArg) + gvk := schema.GroupVersionKind{} + restMapper, err := b.restMapperFn() + if err != nil { + return nil, err + } + + if fullySpecifiedGVR != nil { + gvk, _ = restMapper.KindFor(*fullySpecifiedGVR) + } + if gvk.Empty() { + gvk, _ = restMapper.KindFor(groupResource.WithVersion("")) + } + if !gvk.Empty() { + return restMapper.RESTMapping(gvk.GroupKind(), gvk.Version) + } + + fullySpecifiedGVK, groupKind := schema.ParseKindArg(resourceOrKindArg) + if fullySpecifiedGVK == nil { + gvk := groupKind.WithVersion("") + fullySpecifiedGVK = &gvk + } + + if !fullySpecifiedGVK.Empty() { + if mapping, err := restMapper.RESTMapping(fullySpecifiedGVK.GroupKind(), fullySpecifiedGVK.Version); err == nil { + return mapping, nil + } + } + + mapping, err := restMapper.RESTMapping(groupKind, gvk.Version) + if err != nil { + // if we error out here, it is because we could not match a resource or a kind + // for the given argument. To maintain consistency with previous behavior, + // announce that a resource type could not be found. + // if the error is _not_ a *meta.NoKindMatchError, then we had trouble doing discovery, + // so we should return the original error since it may help a user diagnose what is actually wrong + if meta.IsNoMatchError(err) { + return nil, fmt.Errorf("the server doesn't have a resource type %q", groupResource.Resource) + } + return nil, err + } + + return mapping, nil +} + +func (b *Builder) resourceMappings() ([]*meta.RESTMapping, error) { + if len(b.resources) > 1 && b.singleResourceType { + return nil, fmt.Errorf("you may only specify a single resource type") + } + mappings := []*meta.RESTMapping{} + seen := map[schema.GroupVersionKind]bool{} + for _, r := range b.resources { + mapping, err := b.mappingFor(r) + if err != nil { + return nil, err + } + // This ensures the mappings for resources(shortcuts, plural) unique + if seen[mapping.GroupVersionKind] { + continue + } + seen[mapping.GroupVersionKind] = true + + mappings = append(mappings, mapping) + } + return mappings, nil +} + +func (b *Builder) resourceTupleMappings() (map[string]*meta.RESTMapping, error) { + mappings := make(map[string]*meta.RESTMapping) + canonical := make(map[schema.GroupVersionResource]struct{}) + for _, r := range b.resourceTuples { + if _, ok := mappings[r.Resource]; ok { + continue + } + mapping, err := b.mappingFor(r.Resource) + if err != nil { + return nil, err + } + + mappings[r.Resource] = mapping + canonical[mapping.Resource] = struct{}{} + } + if len(canonical) > 1 && b.singleResourceType { + return nil, fmt.Errorf("you may only specify a single resource type") + } + return mappings, nil +} + +func (b *Builder) visitorResult() *Result { + if len(b.errs) > 0 { + return &Result{err: utilerrors.NewAggregate(b.errs)} + } + + if b.selectAll { + selector := labels.Everything().String() + b.labelSelector = &selector + } + + // visit items specified by paths + if len(b.paths) != 0 { + return b.visitByPaths() + } + + // visit selectors + if b.labelSelector != nil || b.fieldSelector != nil { + return b.visitBySelector() + } + + // visit items specified by resource and name + if len(b.resourceTuples) != 0 { + return b.visitByResource() + } + + // visit items specified by name + if len(b.names) != 0 { + return b.visitByName() + } + + if len(b.resources) != 0 { + for _, r := range b.resources { + _, err := b.mappingFor(r) + if err != nil { + return &Result{err: err} + } + } + return &Result{err: fmt.Errorf("resource(s) were provided, but no name was specified")} + } + return &Result{err: missingResourceError} +} + +func (b *Builder) visitBySelector() *Result { + result := &Result{ + targetsSingleItems: false, + } + + if len(b.names) != 0 { + return result.withError(fmt.Errorf("name cannot be provided when a selector is specified")) + } + if len(b.resourceTuples) != 0 { + return result.withError(fmt.Errorf("selectors and the all flag cannot be used when passing resource/name arguments")) + } + if len(b.resources) == 0 { + return result.withError(fmt.Errorf("at least one resource must be specified to use a selector")) + } + if len(b.subresource) != 0 { + return result.withError(fmt.Errorf("subresource cannot be used when bulk resources are specified")) + } + + mappings, err := b.resourceMappings() + if err != nil { + result.err = err + return result + } + + var labelSelector, fieldSelector string + if b.labelSelector != nil { + labelSelector = *b.labelSelector + } + if b.fieldSelector != nil { + fieldSelector = *b.fieldSelector + } + + visitors := []Visitor{} + for _, mapping := range mappings { + client, err := b.getClient(mapping.GroupVersionKind.GroupVersion()) + if err != nil { + result.err = err + return result + } + selectorNamespace := b.namespace + if mapping.Scope.Name() != meta.RESTScopeNameNamespace { + selectorNamespace = "" + } + visitors = append(visitors, NewSelector(client, mapping, selectorNamespace, labelSelector, fieldSelector, b.limitChunks)) + } + if b.continueOnError { + result.visitor = EagerVisitorList(visitors) + } else { + result.visitor = VisitorList(visitors) + } + result.sources = visitors + return result +} + +func (b *Builder) getClient(gv schema.GroupVersion) (RESTClient, error) { + var ( + client RESTClient + err error + ) + + switch { + case b.fakeClientFn != nil: + client, err = b.fakeClientFn(gv) + case b.negotiatedSerializer != nil: + client, err = b.clientConfigFn.withStdinUnavailable(b.stdinInUse).clientForGroupVersion(gv, b.negotiatedSerializer) + default: + client, err = b.clientConfigFn.withStdinUnavailable(b.stdinInUse).unstructuredClientForGroupVersion(gv) + } + + if err != nil { + return nil, err + } + + return NewClientWithOptions(client, b.requestTransforms...), nil +} + +func (b *Builder) visitByResource() *Result { + // if b.singleItemImplied is false, this could be by default, so double-check length + // of resourceTuples to determine if in fact it is singleItemImplied or not + isSingleItemImplied := b.singleItemImplied + if !isSingleItemImplied { + isSingleItemImplied = len(b.resourceTuples) == 1 + } + + result := &Result{ + singleItemImplied: isSingleItemImplied, + targetsSingleItems: true, + } + + if len(b.resources) != 0 { + return result.withError(fmt.Errorf("you may not specify individual resources and bulk resources in the same call")) + } + + // retrieve one client for each resource + mappings, err := b.resourceTupleMappings() + if err != nil { + result.err = err + return result + } + clients := make(map[string]RESTClient) + for _, mapping := range mappings { + s := fmt.Sprintf("%s/%s", mapping.GroupVersionKind.GroupVersion().String(), mapping.Resource.Resource) + if _, ok := clients[s]; ok { + continue + } + client, err := b.getClient(mapping.GroupVersionKind.GroupVersion()) + if err != nil { + result.err = err + return result + } + clients[s] = client + } + + items := []Visitor{} + for _, tuple := range b.resourceTuples { + mapping, ok := mappings[tuple.Resource] + if !ok { + return result.withError(fmt.Errorf("resource %q is not recognized: %v", tuple.Resource, mappings)) + } + s := fmt.Sprintf("%s/%s", mapping.GroupVersionKind.GroupVersion().String(), mapping.Resource.Resource) + client, ok := clients[s] + if !ok { + return result.withError(fmt.Errorf("could not find a client for resource %q", tuple.Resource)) + } + + selectorNamespace := b.namespace + if mapping.Scope.Name() != meta.RESTScopeNameNamespace { + selectorNamespace = "" + } else { + if len(b.namespace) == 0 { + errMsg := "namespace may not be empty when retrieving a resource by name" + if b.allNamespace { + errMsg = "a resource cannot be retrieved by name across all namespaces" + } + return result.withError(fmt.Errorf(errMsg)) + } + } + + info := &Info{ + Client: client, + Mapping: mapping, + Namespace: selectorNamespace, + Name: tuple.Name, + Subresource: b.subresource, + } + items = append(items, info) + } + + var visitors Visitor + if b.continueOnError { + visitors = EagerVisitorList(items) + } else { + visitors = VisitorList(items) + } + result.visitor = visitors + result.sources = items + return result +} + +func (b *Builder) visitByName() *Result { + result := &Result{ + singleItemImplied: len(b.names) == 1, + targetsSingleItems: true, + } + + if len(b.paths) != 0 { + return result.withError(fmt.Errorf("when paths, URLs, or stdin is provided as input, you may not specify a resource by arguments as well")) + } + if len(b.resources) == 0 { + return result.withError(fmt.Errorf("you must provide a resource and a resource name together")) + } + if len(b.resources) > 1 { + return result.withError(fmt.Errorf("you must specify only one resource")) + } + + mappings, err := b.resourceMappings() + if err != nil { + result.err = err + return result + } + mapping := mappings[0] + + client, err := b.getClient(mapping.GroupVersionKind.GroupVersion()) + if err != nil { + result.err = err + return result + } + + selectorNamespace := b.namespace + if mapping.Scope.Name() != meta.RESTScopeNameNamespace { + selectorNamespace = "" + } else { + if len(b.namespace) == 0 { + errMsg := "namespace may not be empty when retrieving a resource by name" + if b.allNamespace { + errMsg = "a resource cannot be retrieved by name across all namespaces" + } + return result.withError(fmt.Errorf(errMsg)) + } + } + + visitors := []Visitor{} + for _, name := range b.names { + info := &Info{ + Client: client, + Mapping: mapping, + Namespace: selectorNamespace, + Name: name, + Subresource: b.subresource, + } + visitors = append(visitors, info) + } + result.visitor = VisitorList(visitors) + result.sources = visitors + return result +} + +func (b *Builder) visitByPaths() *Result { + result := &Result{ + singleItemImplied: !b.dir && !b.stream && len(b.paths) == 1, + targetsSingleItems: true, + } + + if len(b.resources) != 0 { + return result.withError(fmt.Errorf("when paths, URLs, or stdin is provided as input, you may not specify resource arguments as well")) + } + if len(b.names) != 0 { + return result.withError(fmt.Errorf("name cannot be provided when a path is specified")) + } + if len(b.resourceTuples) != 0 { + return result.withError(fmt.Errorf("resource/name arguments cannot be provided when a path is specified")) + } + + var visitors Visitor + if b.continueOnError { + visitors = EagerVisitorList(b.paths) + } else { + visitors = ConcurrentVisitorList{ + visitors: b.paths, + concurrency: b.visitorConcurrency, + } + } + + if b.flatten { + visitors = NewFlattenListVisitor(visitors, b.objectTyper, b.mapper) + } + + // only items from disk can be refetched + if b.latest { + // must set namespace prior to fetching + if b.defaultNamespace { + visitors = NewDecoratedVisitor(visitors, SetNamespace(b.namespace)) + } + visitors = NewDecoratedVisitor(visitors, RetrieveLatest) + } + if b.labelSelector != nil { + selector, err := labels.Parse(*b.labelSelector) + if err != nil { + return result.withError(fmt.Errorf("the provided selector %q is not valid: %v", *b.labelSelector, err)) + } + visitors = NewFilteredVisitor(visitors, FilterByLabelSelector(selector)) + } + result.visitor = visitors + result.sources = b.paths + return result +} + +// Do returns a Result object with a Visitor for the resources identified by the Builder. +// The visitor will respect the error behavior specified by ContinueOnError. Note that stream +// inputs are consumed by the first execution - use Infos() or Object() on the Result to capture a list +// for further iteration. +func (b *Builder) Do() *Result { + r := b.visitorResult() + r.mapper = b.Mapper() + if r.err != nil { + return r + } + if b.flatten { + r.visitor = NewFlattenListVisitor(r.visitor, b.objectTyper, b.mapper) + } + helpers := []VisitorFunc{} + if b.defaultNamespace { + helpers = append(helpers, SetNamespace(b.namespace)) + } + if b.requireNamespace { + helpers = append(helpers, RequireNamespace(b.namespace)) + } + helpers = append(helpers, FilterNamespace) + if b.requireObject { + helpers = append(helpers, RetrieveLazy) + } + if b.continueOnError { + r.visitor = ContinueOnErrorVisitor{Visitor: r.visitor} + } + r.visitor = NewDecoratedVisitor(r.visitor, helpers...) + return r +} + +// SplitResourceArgument splits the argument with commas and returns unique +// strings in the original order. +func SplitResourceArgument(arg string) []string { + out := []string{} + set := sets.NewString() + for _, s := range strings.Split(arg, ",") { + if set.Has(s) { + continue + } + set.Insert(s) + out = append(out, s) + } + return out +} + +// HasNames returns true if the provided args contain resource names +func HasNames(args []string) (bool, error) { + args = normalizeMultipleResourcesArgs(args) + hasCombinedTypes, err := hasCombinedTypeArgs(args) + if err != nil { + return false, err + } + return hasCombinedTypes || len(args) > 1, nil +} + +// expandIfFilePattern returns all the filenames that match the input pattern +// or the filename if it is a specific filename and not a pattern. +// If the input is a pattern and it yields no result it will result in an error. +func expandIfFilePattern(pattern string) ([]string, error) { + if _, err := os.Stat(pattern); os.IsNotExist(err) { + matches, err := filepath.Glob(pattern) + if err == nil && len(matches) == 0 { + return nil, fmt.Errorf(pathNotExistError, pattern) + } + if err == filepath.ErrBadPattern { + return nil, fmt.Errorf("pattern %q is not valid: %v", pattern, err) + } + return matches, err + } + return []string{pattern}, nil +} + +type cachingCategoryExpanderFunc struct { + delegate CategoryExpanderFunc + + lock sync.Mutex + cached restmapper.CategoryExpander +} + +func (c *cachingCategoryExpanderFunc) ToCategoryExpander() (restmapper.CategoryExpander, error) { + c.lock.Lock() + defer c.lock.Unlock() + if c.cached != nil { + return c.cached, nil + } + + ret, err := c.delegate() + if err != nil { + return nil, err + } + c.cached = ret + return c.cached, nil +} diff --git a/pkg/kubectl/cli-runtime/resource/client.go b/pkg/kubectl/cli-runtime/resource/client.go new file mode 100644 index 0000000..cd52c30 --- /dev/null +++ b/pkg/kubectl/cli-runtime/resource/client.go @@ -0,0 +1,69 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/rest" +) + +// TODO require negotiatedSerializer. leaving it optional lets us plumb current behavior and deal with the difference after major plumbing is complete +func (clientConfigFn ClientConfigFunc) clientForGroupVersion(gv schema.GroupVersion, negotiatedSerializer runtime.NegotiatedSerializer) (RESTClient, error) { + cfg, err := clientConfigFn() + if err != nil { + return nil, err + } + if negotiatedSerializer != nil { + cfg.ContentConfig.NegotiatedSerializer = negotiatedSerializer + } + cfg.GroupVersion = &gv + if len(gv.Group) == 0 { + cfg.APIPath = "/api" + } else { + cfg.APIPath = "/apis" + } + + return rest.RESTClientFor(cfg) +} + +func (clientConfigFn ClientConfigFunc) unstructuredClientForGroupVersion(gv schema.GroupVersion) (RESTClient, error) { + cfg, err := clientConfigFn() + if err != nil { + return nil, err + } + cfg.ContentConfig = UnstructuredPlusDefaultContentConfig() + cfg.GroupVersion = &gv + if len(gv.Group) == 0 { + cfg.APIPath = "/api" + } else { + cfg.APIPath = "/apis" + } + + return rest.RESTClientFor(cfg) +} + +func (clientConfigFn ClientConfigFunc) withStdinUnavailable(stdinUnavailable bool) ClientConfigFunc { + return func() (*rest.Config, error) { + cfg, err := clientConfigFn() + if stdinUnavailable && cfg != nil && cfg.ExecProvider != nil { + cfg.ExecProvider.StdinUnavailable = stdinUnavailable + cfg.ExecProvider.StdinUnavailableMessage = "used by stdin resource manifest reader" + } + return cfg, err + } +} diff --git a/pkg/kubectl/cli-runtime/resource/helper.go b/pkg/kubectl/cli-runtime/resource/helper.go new file mode 100644 index 0000000..aa400ae --- /dev/null +++ b/pkg/kubectl/cli-runtime/resource/helper.go @@ -0,0 +1,321 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "context" + "fmt" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" +) + +var metadataAccessor = meta.NewAccessor() + +// Helper provides methods for retrieving or mutating a RESTful +// resource. +type Helper struct { + // The name of this resource as the server would recognize it + Resource string + // The name of the subresource as the server would recognize it + Subresource string + // A RESTClient capable of mutating this resource. + RESTClient RESTClient + // True if the resource type is scoped to namespaces + NamespaceScoped bool + // If true, then use server-side dry-run to not persist changes to storage + // for verbs and resources that support server-side dry-run. + // + // Note this should only be used against an apiserver with dry-run enabled, + // and on resources that support dry-run. If the apiserver or the resource + // does not support dry-run, then the change will be persisted to storage. + ServerDryRun bool + + // FieldManager is the name associated with the actor or entity that is making + // changes. + FieldManager string + + // FieldValidation is the directive used to indicate how the server should perform + // field validation (Ignore, Warn, or Strict) + FieldValidation string +} + +// NewHelper creates a Helper from a ResourceMapping +func NewHelper(client RESTClient, mapping *meta.RESTMapping) *Helper { + return &Helper{ + Resource: mapping.Resource.Resource, + RESTClient: client, + NamespaceScoped: mapping.Scope.Name() == meta.RESTScopeNameNamespace, + } +} + +// DryRun, if true, will use server-side dry-run to not persist changes to storage. +// Otherwise, changes will be persisted to storage. +func (m *Helper) DryRun(dryRun bool) *Helper { + m.ServerDryRun = dryRun + return m +} + +// WithFieldManager sets the field manager option to indicate the actor or entity +// that is making changes in a create or update operation. +func (m *Helper) WithFieldManager(fieldManager string) *Helper { + m.FieldManager = fieldManager + return m +} + +// WithFieldValidation sets the field validation option to indicate +// how the server should perform field validation (Ignore, Warn, or Strict). +func (m *Helper) WithFieldValidation(validationDirective string) *Helper { + m.FieldValidation = validationDirective + return m +} + +// Subresource sets the helper to access (/[ns//]/) +func (m *Helper) WithSubresource(subresource string) *Helper { + m.Subresource = subresource + return m +} + +func (m *Helper) Get(namespace, name string) (runtime.Object, error) { + req := m.RESTClient.Get(). + NamespaceIfScoped(namespace, m.NamespaceScoped). + Resource(m.Resource). + Name(name). + SubResource(m.Subresource) + return req.Do(context.TODO()).Get() +} + +func (m *Helper) List(namespace, apiVersion string, options *metav1.ListOptions) (runtime.Object, error) { + req := m.RESTClient.Get(). + NamespaceIfScoped(namespace, m.NamespaceScoped). + Resource(m.Resource). + VersionedParams(options, metav1.ParameterCodec) + return req.Do(context.TODO()).Get() +} + +// FollowContinue handles the continue parameter returned by the API server when using list +// chunking. To take advantage of this, the initial ListOptions provided by the consumer +// should include a non-zero Limit parameter. +func FollowContinue(initialOpts *metav1.ListOptions, + listFunc func(metav1.ListOptions) (runtime.Object, error)) error { + opts := initialOpts + for { + list, err := listFunc(*opts) + if err != nil { + return err + } + nextContinueToken, _ := metadataAccessor.Continue(list) + if len(nextContinueToken) == 0 { + return nil + } + opts.Continue = nextContinueToken + } +} + +// EnhanceListError augments errors typically returned by List operations with additional context, +// making sure to retain the StatusError type when applicable. +func EnhanceListError(err error, opts metav1.ListOptions, subj string) error { + if apierrors.IsResourceExpired(err) { + return err + } + if apierrors.IsBadRequest(err) || apierrors.IsNotFound(err) { + if se, ok := err.(*apierrors.StatusError); ok { + // modify the message without hiding this is an API error + if len(opts.LabelSelector) == 0 && len(opts.FieldSelector) == 0 { + se.ErrStatus.Message = fmt.Sprintf("Unable to list %q: %v", subj, + se.ErrStatus.Message) + } else { + se.ErrStatus.Message = fmt.Sprintf( + "Unable to find %q that match label selector %q, field selector %q: %v", subj, + opts.LabelSelector, + opts.FieldSelector, se.ErrStatus.Message) + } + return se + } + if len(opts.LabelSelector) == 0 && len(opts.FieldSelector) == 0 { + return fmt.Errorf("Unable to list %q: %v", subj, err) + } + return fmt.Errorf("Unable to find %q that match label selector %q, field selector %q: %v", + subj, opts.LabelSelector, opts.FieldSelector, err) + } + return err +} + +func (m *Helper) Watch(namespace, apiVersion string, options *metav1.ListOptions) (watch.Interface, error) { + options.Watch = true + return m.RESTClient.Get(). + NamespaceIfScoped(namespace, m.NamespaceScoped). + Resource(m.Resource). + VersionedParams(options, metav1.ParameterCodec). + Watch(context.TODO()) +} + +func (m *Helper) WatchSingle(namespace, name, resourceVersion string) (watch.Interface, error) { + return m.RESTClient.Get(). + NamespaceIfScoped(namespace, m.NamespaceScoped). + Resource(m.Resource). + VersionedParams(&metav1.ListOptions{ + ResourceVersion: resourceVersion, + Watch: true, + FieldSelector: fields.OneTermEqualSelector("metadata.name", name).String(), + }, metav1.ParameterCodec). + Watch(context.TODO()) +} + +func (m *Helper) Delete(namespace, name string) (runtime.Object, error) { + return m.DeleteWithOptions(namespace, name, nil) +} + +func (m *Helper) DeleteWithOptions(namespace, name string, options *metav1.DeleteOptions) (runtime.Object, error) { + if options == nil { + options = &metav1.DeleteOptions{} + } + if m.ServerDryRun { + options.DryRun = []string{metav1.DryRunAll} + } + + return m.RESTClient.Delete(). + NamespaceIfScoped(namespace, m.NamespaceScoped). + Resource(m.Resource). + Name(name). + Body(options). + Do(context.TODO()). + Get() +} + +func (m *Helper) Create(namespace string, modify bool, obj runtime.Object) (runtime.Object, error) { + return m.CreateWithOptions(namespace, modify, obj, nil) +} + +func (m *Helper) CreateWithOptions(namespace string, modify bool, obj runtime.Object, options *metav1.CreateOptions) (runtime.Object, error) { + if options == nil { + options = &metav1.CreateOptions{} + } + if m.ServerDryRun { + options.DryRun = []string{metav1.DryRunAll} + } + if m.FieldManager != "" { + options.FieldManager = m.FieldManager + } + if m.FieldValidation != "" { + options.FieldValidation = m.FieldValidation + } + if modify { + // Attempt to version the object based on client logic. + version, err := metadataAccessor.ResourceVersion(obj) + if err != nil { + // We don't know how to clear the version on this object, so send it to the server as is + return m.createResource(m.RESTClient, m.Resource, namespace, obj, options) + } + if version != "" { + if err := metadataAccessor.SetResourceVersion(obj, ""); err != nil { + return nil, err + } + } + } + + return m.createResource(m.RESTClient, m.Resource, namespace, obj, options) +} + +func (m *Helper) createResource(c RESTClient, resource, namespace string, obj runtime.Object, options *metav1.CreateOptions) (runtime.Object, error) { + return c.Post(). + NamespaceIfScoped(namespace, m.NamespaceScoped). + Resource(resource). + VersionedParams(options, metav1.ParameterCodec). + Body(obj). + Do(context.TODO()). + Get() +} +func (m *Helper) Patch(namespace, name string, pt types.PatchType, data []byte, options *metav1.PatchOptions) (runtime.Object, error) { + if options == nil { + options = &metav1.PatchOptions{} + } + if m.ServerDryRun { + options.DryRun = []string{metav1.DryRunAll} + } + if m.FieldManager != "" { + options.FieldManager = m.FieldManager + } + if m.FieldValidation != "" { + options.FieldValidation = m.FieldValidation + } + return m.RESTClient.Patch(pt). + NamespaceIfScoped(namespace, m.NamespaceScoped). + Resource(m.Resource). + Name(name). + SubResource(m.Subresource). + VersionedParams(options, metav1.ParameterCodec). + Body(data). + Do(context.TODO()). + Get() +} + +func (m *Helper) Replace(namespace, name string, overwrite bool, obj runtime.Object) (runtime.Object, error) { + c := m.RESTClient + var options = &metav1.UpdateOptions{} + if m.ServerDryRun { + options.DryRun = []string{metav1.DryRunAll} + } + if m.FieldManager != "" { + options.FieldManager = m.FieldManager + } + if m.FieldValidation != "" { + options.FieldValidation = m.FieldValidation + } + + // Attempt to version the object based on client logic. + version, err := metadataAccessor.ResourceVersion(obj) + if err != nil { + // We don't know how to version this object, so send it to the server as is + return m.replaceResource(c, m.Resource, namespace, name, obj, options) + } + if version == "" && overwrite { + // Retrieve the current version of the object to overwrite the server object + serverObj, err := c.Get().NamespaceIfScoped(namespace, m.NamespaceScoped).Resource(m.Resource).Name(name).SubResource(m.Subresource).Do(context.TODO()).Get() + if err != nil { + // The object does not exist, but we want it to be created + return m.replaceResource(c, m.Resource, namespace, name, obj, options) + } + serverVersion, err := metadataAccessor.ResourceVersion(serverObj) + if err != nil { + return nil, err + } + if err := metadataAccessor.SetResourceVersion(obj, serverVersion); err != nil { + return nil, err + } + } + + return m.replaceResource(c, m.Resource, namespace, name, obj, options) +} + +func (m *Helper) replaceResource(c RESTClient, resource, namespace, name string, obj runtime.Object, options *metav1.UpdateOptions) (runtime.Object, error) { + return c.Put(). + NamespaceIfScoped(namespace, m.NamespaceScoped). + Resource(resource). + Name(name). + SubResource(m.Subresource). + VersionedParams(options, metav1.ParameterCodec). + Body(obj). + Do(context.TODO()). + Get() +} diff --git a/pkg/kubectl/cli-runtime/resource/interfaces.go b/pkg/kubectl/cli-runtime/resource/interfaces.go new file mode 100644 index 0000000..29d7b34 --- /dev/null +++ b/pkg/kubectl/cli-runtime/resource/interfaces.go @@ -0,0 +1,103 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/discovery" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" +) + +type RESTClientGetter interface { + ToRESTConfig() (*rest.Config, error) + ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) + ToRESTMapper() (meta.RESTMapper, error) +} + +type ClientConfigFunc func() (*rest.Config, error) +type RESTMapperFunc func() (meta.RESTMapper, error) +type CategoryExpanderFunc func() (restmapper.CategoryExpander, error) + +// RESTClient is a client helper for dealing with RESTful resources +// in a generic way. +type RESTClient interface { + Get() *rest.Request + Post() *rest.Request + Patch(types.PatchType) *rest.Request + Delete() *rest.Request + Put() *rest.Request +} + +// RequestTransform is a function that is given a chance to modify the outgoing request. +type RequestTransform func(*rest.Request) + +// NewClientWithOptions wraps the provided RESTClient and invokes each transform on each +// newly created request. +func NewClientWithOptions(c RESTClient, transforms ...RequestTransform) RESTClient { + if len(transforms) == 0 { + return c + } + return &clientOptions{c: c, transforms: transforms} +} + +type clientOptions struct { + c RESTClient + transforms []RequestTransform +} + +func (c *clientOptions) modify(req *rest.Request) *rest.Request { + for _, transform := range c.transforms { + transform(req) + } + return req +} + +func (c *clientOptions) Get() *rest.Request { + return c.modify(c.c.Get()) +} + +func (c *clientOptions) Post() *rest.Request { + return c.modify(c.c.Post()) +} +func (c *clientOptions) Patch(t types.PatchType) *rest.Request { + return c.modify(c.c.Patch(t)) +} +func (c *clientOptions) Delete() *rest.Request { + return c.modify(c.c.Delete()) +} +func (c *clientOptions) Put() *rest.Request { + return c.modify(c.c.Put()) +} + +// ContentValidator is an interface that knows how to validate an API object serialized to a byte array. +type ContentValidator interface { + ValidateBytes(data []byte) error +} + +// Visitor lets clients walk a list of resources. +type Visitor interface { + Visit(VisitorFunc) error +} + +// VisitorFunc implements the Visitor interface for a matching function. +// If there was a problem walking a list of resources, the incoming error +// will describe the problem and the function can decide how to handle that error. +// A nil returned indicates to accept an error to continue loops even when errors happen. +// This is useful for ignoring certain kinds of errors or aggregating errors in some way. +type VisitorFunc func(*Info, error) error diff --git a/pkg/kubectl/cli-runtime/resource/kustomizevisitor.go b/pkg/kubectl/cli-runtime/resource/kustomizevisitor.go new file mode 100644 index 0000000..32895ea --- /dev/null +++ b/pkg/kubectl/cli-runtime/resource/kustomizevisitor.go @@ -0,0 +1,54 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "bytes" + + "sigs.k8s.io/kustomize/api/krusty" + "sigs.k8s.io/kustomize/kyaml/filesys" +) + +// KustomizeVisitor handles kustomization.yaml files. +type KustomizeVisitor struct { + mapper *mapper + schema ContentValidator + // Directory expected to contain a kustomization file. + dirPath string + // File system containing dirPath. + fSys filesys.FileSystem + // Holds result of kustomize build, retained for tests. + yml []byte +} + +// Visit passes the result of a kustomize build to a StreamVisitor. +func (v *KustomizeVisitor) Visit(fn VisitorFunc) error { + kOpts := krusty.MakeDefaultOptions() + kOpts.Reorder = krusty.ReorderOptionLegacy + k := krusty.MakeKustomizer(kOpts) + m, err := k.Run(v.fSys, v.dirPath) + if err != nil { + return err + } + v.yml, err = m.AsYaml() + if err != nil { + return err + } + sv := NewStreamVisitor( + bytes.NewReader(v.yml), v.mapper, v.dirPath, v.schema) + return sv.Visit(fn) +} diff --git a/pkg/kubectl/cli-runtime/resource/mapper.go b/pkg/kubectl/cli-runtime/resource/mapper.go new file mode 100644 index 0000000..03b6668 --- /dev/null +++ b/pkg/kubectl/cli-runtime/resource/mapper.go @@ -0,0 +1,166 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "fmt" + "reflect" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// Mapper is a convenience struct for holding references to the interfaces +// needed to create Info for arbitrary objects. +type mapper struct { + // localFn indicates the call can't make server requests + localFn func() bool + + restMapperFn RESTMapperFunc + clientFn func(version schema.GroupVersion) (RESTClient, error) + decoder runtime.Decoder +} + +// InfoForData creates an Info object for the given data. An error is returned +// if any of the decoding or client lookup steps fail. Name and namespace will be +// set into Info if the mapping's MetadataAccessor can retrieve them. +func (m *mapper) infoForData(data []byte, source string) (*Info, error) { + obj, gvk, err := m.decoder.Decode(data, nil, nil) + if err != nil { + return nil, fmt.Errorf("unable to decode %q: %v", source, err) + } + + name, _ := metadataAccessor.Name(obj) + namespace, _ := metadataAccessor.Namespace(obj) + resourceVersion, _ := metadataAccessor.ResourceVersion(obj) + + ret := &Info{ + Source: source, + Namespace: namespace, + Name: name, + ResourceVersion: resourceVersion, + + Object: obj, + } + + if m.localFn == nil || !m.localFn() { + restMapper, err := m.restMapperFn() + if err != nil { + return nil, err + } + mapping, err := restMapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + if _, ok := err.(*meta.NoKindMatchError); ok { + return nil, fmt.Errorf("resource mapping not found for name: %q namespace: %q from %q: %w\nensure CRDs are installed first", + name, namespace, source, err) + } + return nil, fmt.Errorf("unable to recognize %q: %v", source, err) + } + ret.Mapping = mapping + + client, err := m.clientFn(gvk.GroupVersion()) + if err != nil { + return nil, fmt.Errorf("unable to connect to a server to handle %q: %v", mapping.Resource, err) + } + ret.Client = client + } + + return ret, nil +} + +// InfoForObject creates an Info object for the given Object. An error is returned +// if the object cannot be introspected. Name and namespace will be set into Info +// if the mapping's MetadataAccessor can retrieve them. +func (m *mapper) infoForObject(obj runtime.Object, typer runtime.ObjectTyper, preferredGVKs []schema.GroupVersionKind) (*Info, error) { + groupVersionKinds, _, err := typer.ObjectKinds(obj) + if err != nil { + return nil, fmt.Errorf("unable to get type info from the object %q: %v", reflect.TypeOf(obj), err) + } + + gvk := groupVersionKinds[0] + if len(groupVersionKinds) > 1 && len(preferredGVKs) > 0 { + gvk = preferredObjectKind(groupVersionKinds, preferredGVKs) + } + + name, _ := metadataAccessor.Name(obj) + namespace, _ := metadataAccessor.Namespace(obj) + resourceVersion, _ := metadataAccessor.ResourceVersion(obj) + ret := &Info{ + Namespace: namespace, + Name: name, + ResourceVersion: resourceVersion, + + Object: obj, + } + + if m.localFn == nil || !m.localFn() { + restMapper, err := m.restMapperFn() + if err != nil { + return nil, err + } + mapping, err := restMapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, fmt.Errorf("unable to recognize %v", err) + } + ret.Mapping = mapping + + client, err := m.clientFn(gvk.GroupVersion()) + if err != nil { + return nil, fmt.Errorf("unable to connect to a server to handle %q: %v", mapping.Resource, err) + } + ret.Client = client + } + + return ret, nil +} + +// preferredObjectKind picks the possibility that most closely matches the priority list in this order: +// GroupVersionKind matches (exact match) +// GroupKind matches +// Group matches +func preferredObjectKind(possibilities []schema.GroupVersionKind, preferences []schema.GroupVersionKind) schema.GroupVersionKind { + // Exact match + for _, priority := range preferences { + for _, possibility := range possibilities { + if possibility == priority { + return possibility + } + } + } + + // GroupKind match + for _, priority := range preferences { + for _, possibility := range possibilities { + if possibility.GroupKind() == priority.GroupKind() { + return possibility + } + } + } + + // Group match + for _, priority := range preferences { + for _, possibility := range possibilities { + if possibility.Group == priority.Group { + return possibility + } + } + } + + // Just pick the first + return possibilities[0] +} diff --git a/pkg/kubectl/cli-runtime/resource/metadata_decoder.go b/pkg/kubectl/cli-runtime/resource/metadata_decoder.go new file mode 100644 index 0000000..d688c3a --- /dev/null +++ b/pkg/kubectl/cli-runtime/resource/metadata_decoder.go @@ -0,0 +1,56 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utiljson "k8s.io/apimachinery/pkg/util/json" +) + +// metadataValidatingDecoder wraps a decoder and additionally ensures metadata schema fields decode before returning an unstructured object +type metadataValidatingDecoder struct { + decoder runtime.Decoder +} + +func (m *metadataValidatingDecoder) Decode(data []byte, defaults *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { + obj, gvk, err := m.decoder.Decode(data, defaults, into) + + // if we already errored, return + if err != nil { + return obj, gvk, err + } + + // if we're not unstructured, return + if _, isUnstructured := obj.(runtime.Unstructured); !isUnstructured { + return obj, gvk, err + } + + // make sure the data can decode into ObjectMeta before we return, + // so we don't silently truncate schema errors in metadata later with accesser get/set calls + v := &metadataOnlyObject{} + if typedErr := utiljson.Unmarshal(data, v); typedErr != nil { + return obj, gvk, typedErr + } + return obj, gvk, err +} + +type metadataOnlyObject struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` +} diff --git a/pkg/kubectl/cli-runtime/resource/result.go b/pkg/kubectl/cli-runtime/resource/result.go new file mode 100644 index 0000000..2ccf2a6 --- /dev/null +++ b/pkg/kubectl/cli-runtime/resource/result.go @@ -0,0 +1,242 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "fmt" + "reflect" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/watch" +) + +// ErrMatchFunc can be used to filter errors that may not be true failures. +type ErrMatchFunc func(error) bool + +// Result contains helper methods for dealing with the outcome of a Builder. +type Result struct { + err error + visitor Visitor + + sources []Visitor + singleItemImplied bool + targetsSingleItems bool + + mapper *mapper + ignoreErrors []utilerrors.Matcher + + // populated by a call to Infos + info []*Info +} + +// withError allows a fluent style for internal result code. +func (r *Result) withError(err error) *Result { + r.err = err + return r +} + +// TargetsSingleItems returns true if any of the builder arguments pointed +// to non-list calls (if the user explicitly asked for any object by name). +// This includes directories, streams, URLs, and resource name tuples. +func (r *Result) TargetsSingleItems() bool { + return r.targetsSingleItems +} + +// IgnoreErrors will filter errors that occur when by visiting the result +// (but not errors that occur by creating the result in the first place), +// eliminating any that match fns. This is best used in combination with +// Builder.ContinueOnError(), where the visitors accumulate errors and return +// them after visiting as a slice of errors. If no errors remain after +// filtering, the various visitor methods on Result will return nil for +// err. +func (r *Result) IgnoreErrors(fns ...ErrMatchFunc) *Result { + for _, fn := range fns { + r.ignoreErrors = append(r.ignoreErrors, utilerrors.Matcher(fn)) + } + return r +} + +// Mapper returns a copy of the builder's mapper. +func (r *Result) Mapper() *mapper { + return r.mapper +} + +// Err returns one or more errors (via a util.ErrorList) that occurred prior +// to visiting the elements in the visitor. To see all errors including those +// that occur during visitation, invoke Infos(). +func (r *Result) Err() error { + return r.err +} + +// Visit implements the Visitor interface on the items described in the Builder. +// Note that some visitor sources are not traversable more than once, or may +// return different results. If you wish to operate on the same set of resources +// multiple times, use the Infos() method. +func (r *Result) Visit(fn VisitorFunc) error { + if r.err != nil { + return r.err + } + err := r.visitor.Visit(fn) + return utilerrors.FilterOut(err, r.ignoreErrors...) +} + +// IntoSingleItemImplied sets the provided boolean pointer to true if the Builder input +// implies a single item, or multiple. +func (r *Result) IntoSingleItemImplied(b *bool) *Result { + *b = r.singleItemImplied + return r +} + +// Infos returns an array of all of the resource infos retrieved via traversal. +// Will attempt to traverse the entire set of visitors only once, and will return +// a cached list on subsequent calls. +func (r *Result) Infos() ([]*Info, error) { + if r.err != nil { + return nil, r.err + } + if r.info != nil { + return r.info, nil + } + + infos := []*Info{} + err := r.visitor.Visit(func(info *Info, err error) error { + if err != nil { + return err + } + infos = append(infos, info) + return nil + }) + err = utilerrors.FilterOut(err, r.ignoreErrors...) + + r.info, r.err = infos, err + return infos, err +} + +// Object returns a single object representing the output of a single visit to all +// found resources. If the Builder was a singular context (expected to return a +// single resource by user input) and only a single resource was found, the resource +// will be returned as is. Otherwise, the returned resources will be part of an +// v1.List. The ResourceVersion of the v1.List will be set only if it is identical +// across all infos returned. +func (r *Result) Object() (runtime.Object, error) { + infos, err := r.Infos() + if err != nil { + return nil, err + } + + versions := sets.String{} + objects := []runtime.Object{} + for _, info := range infos { + if info.Object != nil { + objects = append(objects, info.Object) + versions.Insert(info.ResourceVersion) + } + } + + if len(objects) == 1 { + if r.singleItemImplied { + return objects[0], nil + } + // if the item is a list already, don't create another list + if meta.IsListType(objects[0]) { + return objects[0], nil + } + } + + version := "" + if len(versions) == 1 { + version = versions.List()[0] + } + + return toV1List(objects, version), err +} + +// Compile time check to enforce that list implements the necessary interface +var _ metav1.ListInterface = &v1.List{} +var _ metav1.ListMetaAccessor = &v1.List{} + +// toV1List takes a slice of Objects + their version, and returns +// a v1.List Object containing the objects in the Items field +func toV1List(objects []runtime.Object, version string) runtime.Object { + raw := []runtime.RawExtension{} + for _, o := range objects { + raw = append(raw, runtime.RawExtension{Object: o}) + } + return &v1.List{ + ListMeta: metav1.ListMeta{ + ResourceVersion: version, + }, + Items: raw, + } +} + +// ResourceMapping returns a single meta.RESTMapping representing the +// resources located by the builder, or an error if more than one +// mapping was found. +func (r *Result) ResourceMapping() (*meta.RESTMapping, error) { + if r.err != nil { + return nil, r.err + } + mappings := map[schema.GroupVersionResource]*meta.RESTMapping{} + for i := range r.sources { + m, ok := r.sources[i].(ResourceMapping) + if !ok { + return nil, fmt.Errorf("a resource mapping could not be loaded from %v", reflect.TypeOf(r.sources[i])) + } + mapping := m.ResourceMapping() + mappings[mapping.Resource] = mapping + } + if len(mappings) != 1 { + return nil, fmt.Errorf("expected only a single resource type") + } + for _, mapping := range mappings { + return mapping, nil + } + return nil, nil +} + +// Watch retrieves changes that occur on the server to the specified resource. +// It currently supports watching a single source - if the resource source +// (selectors or pure types) can be watched, they will be, otherwise the list +// will be visited (equivalent to the Infos() call) and if there is a single +// resource present, it will be watched, otherwise an error will be returned. +func (r *Result) Watch(resourceVersion string) (watch.Interface, error) { + if r.err != nil { + return nil, r.err + } + if len(r.sources) != 1 { + return nil, fmt.Errorf("you may only watch a single resource or type of resource at a time") + } + w, ok := r.sources[0].(Watchable) + if !ok { + info, err := r.Infos() + if err != nil { + return nil, err + } + if len(info) != 1 { + return nil, fmt.Errorf("watch is only supported on individual resources and resource collections - %d resources were found", len(info)) + } + return info[0].Watch(resourceVersion) + } + return w.Watch(resourceVersion) +} diff --git a/pkg/kubectl/cli-runtime/resource/scheme.go b/pkg/kubectl/cli-runtime/resource/scheme.go new file mode 100644 index 0000000..858a462 --- /dev/null +++ b/pkg/kubectl/cli-runtime/resource/scheme.go @@ -0,0 +1,82 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "encoding/json" + "io" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" +) + +// dynamicCodec is a codec that wraps the standard unstructured codec +// with special handling for Status objects. +// Deprecated only used by test code and its wrong +type dynamicCodec struct{} + +func (dynamicCodec) Decode(data []byte, gvk *schema.GroupVersionKind, obj runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { + obj, gvk, err := unstructured.UnstructuredJSONScheme.Decode(data, gvk, obj) + if err != nil { + return nil, nil, err + } + + if strings.EqualFold(gvk.Kind, "status") && gvk.Version == "v1" && (gvk.Group == "" || gvk.Group == "meta.k8s.io") { + if _, ok := obj.(*metav1.Status); !ok { + obj = &metav1.Status{} + err := json.Unmarshal(data, obj) + if err != nil { + return nil, nil, err + } + } + } + + return obj, gvk, nil +} + +func (dynamicCodec) Encode(obj runtime.Object, w io.Writer) error { + // There is no need to handle runtime.CacheableObject, as we only + // fallback to other encoders here. + return unstructured.UnstructuredJSONScheme.Encode(obj, w) +} + +// Identifier implements runtime.Encoder interface. +func (dynamicCodec) Identifier() runtime.Identifier { + return unstructured.UnstructuredJSONScheme.Identifier() +} + +// UnstructuredPlusDefaultContentConfig returns a rest.ContentConfig for dynamic types. It includes enough codecs to act as a "normal" +// serializer for the rest.client with options, status and the like. +func UnstructuredPlusDefaultContentConfig() rest.ContentConfig { + // TODO: scheme.Codecs here should become "pkg/apis/server/scheme" which is the minimal core you need + // to talk to a kubernetes server + jsonInfo, _ := runtime.SerializerInfoForMediaType(scheme.Codecs.SupportedMediaTypes(), runtime.ContentTypeJSON) + + jsonInfo.Serializer = dynamicCodec{} + jsonInfo.PrettySerializer = nil + return rest.ContentConfig{ + AcceptContentTypes: runtime.ContentTypeJSON, + ContentType: runtime.ContentTypeJSON, + NegotiatedSerializer: serializer.NegotiatedSerializerWrapper(jsonInfo), + } +} diff --git a/pkg/kubectl/cli-runtime/resource/selector.go b/pkg/kubectl/cli-runtime/resource/selector.go new file mode 100644 index 0000000..2a283d4 --- /dev/null +++ b/pkg/kubectl/cli-runtime/resource/selector.go @@ -0,0 +1,92 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" +) + +// Selector is a Visitor for resources that match a label selector. +type Selector struct { + Client RESTClient + Mapping *meta.RESTMapping + Namespace string + LabelSelector string + FieldSelector string + LimitChunks int64 +} + +// NewSelector creates a resource selector which hides details of getting items by their label selector. +func NewSelector(client RESTClient, mapping *meta.RESTMapping, namespace, labelSelector, fieldSelector string, limitChunks int64) *Selector { + return &Selector{ + Client: client, + Mapping: mapping, + Namespace: namespace, + LabelSelector: labelSelector, + FieldSelector: fieldSelector, + LimitChunks: limitChunks, + } +} + +// Visit implements Visitor and uses request chunking by default. +func (r *Selector) Visit(fn VisitorFunc) error { + helper := NewHelper(r.Client, r.Mapping) + initialOpts := metav1.ListOptions{ + LabelSelector: r.LabelSelector, + FieldSelector: r.FieldSelector, + Limit: r.LimitChunks, + } + return FollowContinue(&initialOpts, func(options metav1.ListOptions) (runtime.Object, error) { + list, err := helper.List( + r.Namespace, + r.ResourceMapping().GroupVersionKind.GroupVersion().String(), + &options, + ) + if err != nil { + return nil, EnhanceListError(err, options, r.Mapping.Resource.String()) + } + resourceVersion, _ := metadataAccessor.ResourceVersion(list) + + info := &Info{ + Client: r.Client, + Mapping: r.Mapping, + + Namespace: r.Namespace, + ResourceVersion: resourceVersion, + + Object: list, + } + + if err := fn(info, nil); err != nil { + return nil, err + } + return list, nil + }) +} + +func (r *Selector) Watch(resourceVersion string) (watch.Interface, error) { + return NewHelper(r.Client, r.Mapping).Watch(r.Namespace, r.ResourceMapping().GroupVersionKind.GroupVersion().String(), + &metav1.ListOptions{ResourceVersion: resourceVersion, LabelSelector: r.LabelSelector, FieldSelector: r.FieldSelector}) +} + +// ResourceMapping returns the mapping for this resource and implements ResourceMapping +func (r *Selector) ResourceMapping() *meta.RESTMapping { + return r.Mapping +} diff --git a/pkg/kubectl/cli-runtime/resource/visitor.go b/pkg/kubectl/cli-runtime/resource/visitor.go new file mode 100644 index 0000000..76cfbbd --- /dev/null +++ b/pkg/kubectl/cli-runtime/resource/visitor.go @@ -0,0 +1,770 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "bytes" + "context" + "fmt" + "io" + "net/http" + "net/url" + "os" + "path/filepath" + "strings" + "time" + + "golang.org/x/sync/errgroup" + "golang.org/x/text/encoding/unicode" + "golang.org/x/text/transform" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/apimachinery/pkg/watch" +) + +const ( + constSTDINstr = "STDIN" + stopValidateMessage = "if you choose to ignore these errors, turn validation off with --validate=false" +) + +// Watchable describes a resource that can be watched for changes that occur on the server, +// beginning after the provided resource version. +type Watchable interface { + Watch(resourceVersion string) (watch.Interface, error) +} + +// ResourceMapping allows an object to return the resource mapping associated with +// the resource or resources it represents. +type ResourceMapping interface { + ResourceMapping() *meta.RESTMapping +} + +// Info contains temporary info to execute a REST call, or show the results +// of an already completed REST call. +type Info struct { + // Client will only be present if this builder was not local + Client RESTClient + // Mapping will only be present if this builder was not local + Mapping *meta.RESTMapping + + // Namespace will be set if the object is namespaced and has a specified value. + Namespace string + Name string + + // Optional, Source is the filename or URL to template file (.json or .yaml), + // or stdin to use to handle the resource + Source string + // Optional, this is the most recent value returned by the server if available. It will + // typically be in unstructured or internal forms, depending on how the Builder was + // defined. If retrieved from the server, the Builder expects the mapping client to + // decide the final form. Use the AsVersioned, AsUnstructured, and AsInternal helpers + // to alter the object versions. + // If Subresource is specified, this will be the object for the subresource. + Object runtime.Object + // Optional, this is the most recent resource version the server knows about for + // this type of resource. It may not match the resource version of the object, + // but if set it should be equal to or newer than the resource version of the + // object (however the server defines resource version). + ResourceVersion string + // Optional, if specified, the object is the most recent value of the subresource + // returned by the server if available. + Subresource string +} + +// Visit implements Visitor +func (i *Info) Visit(fn VisitorFunc) error { + return fn(i, nil) +} + +// Get retrieves the object from the Namespace and Name fields +func (i *Info) Get() (err error) { + obj, err := NewHelper(i.Client, i.Mapping).WithSubresource(i.Subresource).Get(i.Namespace, i.Name) + if err != nil { + if errors.IsNotFound(err) && len(i.Namespace) > 0 && i.Namespace != metav1.NamespaceDefault && i.Namespace != metav1.NamespaceAll { + err2 := i.Client.Get().AbsPath("api", "v1", "namespaces", i.Namespace).Do(context.TODO()).Error() + if err2 != nil && errors.IsNotFound(err2) { + return err2 + } + } + return err + } + i.Object = obj + i.ResourceVersion, _ = metadataAccessor.ResourceVersion(obj) + return nil +} + +// Refresh updates the object with another object. If ignoreError is set +// the Object will be updated even if name, namespace, or resourceVersion +// attributes cannot be loaded from the object. +func (i *Info) Refresh(obj runtime.Object, ignoreError bool) error { + name, err := metadataAccessor.Name(obj) + if err != nil { + if !ignoreError { + return err + } + } else { + i.Name = name + } + namespace, err := metadataAccessor.Namespace(obj) + if err != nil { + if !ignoreError { + return err + } + } else { + i.Namespace = namespace + } + version, err := metadataAccessor.ResourceVersion(obj) + if err != nil { + if !ignoreError { + return err + } + } else { + i.ResourceVersion = version + } + i.Object = obj + return nil +} + +// ObjectName returns an approximate form of the resource's kind/name. +func (i *Info) ObjectName() string { + if i.Mapping != nil { + return fmt.Sprintf("%s/%s", i.Mapping.Resource.Resource, i.Name) + } + gvk := i.Object.GetObjectKind().GroupVersionKind() + if len(gvk.Group) == 0 { + return fmt.Sprintf("%s/%s", strings.ToLower(gvk.Kind), i.Name) + } + return fmt.Sprintf("%s.%s/%s\n", strings.ToLower(gvk.Kind), gvk.Group, i.Name) +} + +// String returns the general purpose string representation +func (i *Info) String() string { + basicInfo := fmt.Sprintf("Name: %q, Namespace: %q", i.Name, i.Namespace) + if i.Mapping != nil { + mappingInfo := fmt.Sprintf("Resource: %q, GroupVersionKind: %q", i.Mapping.Resource.String(), + i.Mapping.GroupVersionKind.String()) + return fmt.Sprint(mappingInfo, "\n", basicInfo) + } + return basicInfo +} + +// Namespaced returns true if the object belongs to a namespace +func (i *Info) Namespaced() bool { + if i.Mapping != nil { + // if we have RESTMapper info, use it + return i.Mapping.Scope.Name() == meta.RESTScopeNameNamespace + } + // otherwise, use the presence of a namespace in the info as an indicator + return len(i.Namespace) > 0 +} + +// Watch returns server changes to this object after it was retrieved. +func (i *Info) Watch(resourceVersion string) (watch.Interface, error) { + return NewHelper(i.Client, i.Mapping).WatchSingle(i.Namespace, i.Name, resourceVersion) +} + +// ResourceMapping returns the mapping for this resource and implements ResourceMapping +func (i *Info) ResourceMapping() *meta.RESTMapping { + return i.Mapping +} + +// VisitorList implements Visit for the sub visitors it contains. The first error +// returned from a child Visitor will terminate iteration. +type VisitorList []Visitor + +// Visit implements Visitor +func (l VisitorList) Visit(fn VisitorFunc) error { + for i := range l { + if err := l[i].Visit(fn); err != nil { + return err + } + } + return nil +} + +type ConcurrentVisitorList struct { + visitors []Visitor + concurrency int +} + +func (l ConcurrentVisitorList) Visit(fn VisitorFunc) error { + g := errgroup.Group{} + + // Concurrency 1 just runs the visitors sequentially, this is the default + // as it preserves the previous behavior, but allows components to opt into + // concurrency. + concurrency := 1 + if l.concurrency > concurrency { + concurrency = l.concurrency + } + g.SetLimit(concurrency) + + for i := range l.visitors { + i := i + g.Go(func() error { + return l.visitors[i].Visit(fn) + }) + } + + return g.Wait() +} + +// EagerVisitorList implements Visit for the sub visitors it contains. All errors +// will be captured and returned at the end of iteration. +type EagerVisitorList []Visitor + +// Visit implements Visitor, and gathers errors that occur during processing until +// all sub visitors have been visited. +func (l EagerVisitorList) Visit(fn VisitorFunc) error { + var errs []error + for i := range l { + err := l[i].Visit(func(info *Info, err error) error { + if err != nil { + errs = append(errs, err) + return nil + } + if err := fn(info, nil); err != nil { + errs = append(errs, err) + } + return nil + }) + if err != nil { + errs = append(errs, err) + } + } + return utilerrors.NewAggregate(errs) +} + +func ValidateSchema(data []byte, schema ContentValidator) error { + if schema == nil { + return nil + } + if err := schema.ValidateBytes(data); err != nil { + return fmt.Errorf("error validating data: %v; %s", err, stopValidateMessage) + } + return nil +} + +// URLVisitor downloads the contents of a URL, and if successful, returns +// an info object representing the downloaded object. +type URLVisitor struct { + URL *url.URL + *StreamVisitor + HttpAttemptCount int +} + +func (v *URLVisitor) Visit(fn VisitorFunc) error { + body, err := readHttpWithRetries(httpgetImpl, time.Second, v.URL.String(), v.HttpAttemptCount) + if err != nil { + return err + } + defer body.Close() + v.StreamVisitor.Reader = body + return v.StreamVisitor.Visit(fn) +} + +// readHttpWithRetries tries to http.Get the v.URL retries times before giving up. +func readHttpWithRetries(get httpget, duration time.Duration, u string, attempts int) (io.ReadCloser, error) { + var err error + if attempts <= 0 { + return nil, fmt.Errorf("http attempts must be greater than 0, was %d", attempts) + } + for i := 0; i < attempts; i++ { + var ( + statusCode int + status string + body io.ReadCloser + ) + if i > 0 { + time.Sleep(duration) + } + + // Try to get the URL + statusCode, status, body, err = get(u) + + // Retry Errors + if err != nil { + continue + } + + if statusCode == http.StatusOK { + return body, nil + } + body.Close() + // Error - Set the error condition from the StatusCode + err = fmt.Errorf("unable to read URL %q, server reported %s, status code=%d", u, status, statusCode) + + if statusCode >= 500 && statusCode < 600 { + // Retry 500's + continue + } else { + // Don't retry other StatusCodes + break + } + } + return nil, err +} + +// httpget Defines function to retrieve a url and return the results. Exists for unit test stubbing. +type httpget func(url string) (int, string, io.ReadCloser, error) + +// httpgetImpl Implements a function to retrieve a url and return the results. +func httpgetImpl(url string) (int, string, io.ReadCloser, error) { + resp, err := http.Get(url) + if err != nil { + return 0, "", nil, err + } + return resp.StatusCode, resp.Status, resp.Body, nil +} + +// DecoratedVisitor will invoke the decorators in order prior to invoking the visitor function +// passed to Visit. An error will terminate the visit. +type DecoratedVisitor struct { + visitor Visitor + decorators []VisitorFunc +} + +// NewDecoratedVisitor will create a visitor that invokes the provided visitor functions before +// the user supplied visitor function is invoked, giving them the opportunity to mutate the Info +// object or terminate early with an error. +func NewDecoratedVisitor(v Visitor, fn ...VisitorFunc) Visitor { + if len(fn) == 0 { + return v + } + return DecoratedVisitor{v, fn} +} + +// Visit implements Visitor +func (v DecoratedVisitor) Visit(fn VisitorFunc) error { + return v.visitor.Visit(func(info *Info, err error) error { + if err != nil { + return err + } + for i := range v.decorators { + if err := v.decorators[i](info, nil); err != nil { + return err + } + } + return fn(info, nil) + }) +} + +// ContinueOnErrorVisitor visits each item and, if an error occurs on +// any individual item, returns an aggregate error after all items +// are visited. +type ContinueOnErrorVisitor struct { + Visitor +} + +// Visit returns nil if no error occurs during traversal, a regular +// error if one occurs, or if multiple errors occur, an aggregate +// error. If the provided visitor fails on any individual item it +// will not prevent the remaining items from being visited. An error +// returned by the visitor directly may still result in some items +// not being visited. +func (v ContinueOnErrorVisitor) Visit(fn VisitorFunc) error { + var errs []error + err := v.Visitor.Visit(func(info *Info, err error) error { + if err != nil { + errs = append(errs, err) + return nil + } + if err := fn(info, nil); err != nil { + errs = append(errs, err) + } + return nil + }) + if err != nil { + errs = append(errs, err) + } + if len(errs) == 1 { + return errs[0] + } + return utilerrors.NewAggregate(errs) +} + +// FlattenListVisitor flattens any objects that runtime.ExtractList recognizes as a list +// - has an "Items" public field that is a slice of runtime.Objects or objects satisfying +// that interface - into multiple Infos. Returns nil in the case of no errors. +// When an error is hit on sub items (for instance, if a List contains an object that does +// not have a registered client or resource), returns an aggregate error. +type FlattenListVisitor struct { + visitor Visitor + typer runtime.ObjectTyper + mapper *mapper +} + +// NewFlattenListVisitor creates a visitor that will expand list style runtime.Objects +// into individual items and then visit them individually. +func NewFlattenListVisitor(v Visitor, typer runtime.ObjectTyper, mapper *mapper) Visitor { + return FlattenListVisitor{v, typer, mapper} +} + +func (v FlattenListVisitor) Visit(fn VisitorFunc) error { + return v.visitor.Visit(func(info *Info, err error) error { + if err != nil { + return err + } + if info.Object == nil { + return fn(info, nil) + } + if !meta.IsListType(info.Object) { + return fn(info, nil) + } + + items := []runtime.Object{} + itemsToProcess := []runtime.Object{info.Object} + + for i := 0; i < len(itemsToProcess); i++ { + currObj := itemsToProcess[i] + if !meta.IsListType(currObj) { + items = append(items, currObj) + continue + } + + currItems, err := meta.ExtractList(currObj) + if err != nil { + return err + } + if errs := runtime.DecodeList(currItems, v.mapper.decoder); len(errs) > 0 { + return utilerrors.NewAggregate(errs) + } + itemsToProcess = append(itemsToProcess, currItems...) + } + + // If we have a GroupVersionKind on the list, prioritize that when asking for info on the objects contained in the list + var preferredGVKs []schema.GroupVersionKind + if info.Mapping != nil && !info.Mapping.GroupVersionKind.Empty() { + preferredGVKs = append(preferredGVKs, info.Mapping.GroupVersionKind) + } + var errs []error + for i := range items { + item, err := v.mapper.infoForObject(items[i], v.typer, preferredGVKs) + if err != nil { + errs = append(errs, err) + continue + } + if len(info.ResourceVersion) != 0 { + item.ResourceVersion = info.ResourceVersion + } + // propagate list source to items source + if len(info.Source) != 0 { + item.Source = info.Source + } + if err := fn(item, nil); err != nil { + errs = append(errs, err) + } + } + return utilerrors.NewAggregate(errs) + }) +} + +func ignoreFile(path string, extensions []string) bool { + if len(extensions) == 0 { + return false + } + ext := filepath.Ext(path) + for _, s := range extensions { + if s == ext { + return false + } + } + return true +} + +// FileVisitorForSTDIN return a special FileVisitor just for STDIN +func FileVisitorForSTDIN(mapper *mapper, schema ContentValidator) Visitor { + return &FileVisitor{ + Path: constSTDINstr, + StreamVisitor: NewStreamVisitor(nil, mapper, constSTDINstr, schema), + } +} + +// ExpandPathsToFileVisitors will return a slice of FileVisitors that will handle files from the provided path. +// After FileVisitors open the files, they will pass an io.Reader to a StreamVisitor to do the reading. (stdin +// is also taken care of). Paths argument also accepts a single file, and will return a single visitor +func ExpandPathsToFileVisitors(mapper *mapper, paths string, recursive bool, extensions []string, schema ContentValidator) ([]Visitor, error) { + var visitors []Visitor + err := filepath.Walk(paths, func(path string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + + if fi.IsDir() { + if path != paths && !recursive { + return filepath.SkipDir + } + return nil + } + // Don't check extension if the filepath was passed explicitly + if path != paths && ignoreFile(path, extensions) { + return nil + } + + visitor := &FileVisitor{ + Path: path, + StreamVisitor: NewStreamVisitor(nil, mapper, path, schema), + } + + visitors = append(visitors, visitor) + return nil + }) + + if err != nil { + return nil, err + } + return visitors, nil +} + +// FileVisitor is wrapping around a StreamVisitor, to handle open/close files +type FileVisitor struct { + Path string + *StreamVisitor +} + +// Visit in a FileVisitor is just taking care of opening/closing files +func (v *FileVisitor) Visit(fn VisitorFunc) error { + var f *os.File + if v.Path == constSTDINstr { + f = os.Stdin + } else { + var err error + f, err = os.Open(v.Path) + if err != nil { + return err + } + defer f.Close() + } + + // TODO: Consider adding a flag to force to UTF16, apparently some + // Windows tools don't write the BOM + utf16bom := unicode.BOMOverride(unicode.UTF8.NewDecoder()) + v.StreamVisitor.Reader = transform.NewReader(f, utf16bom) + + return v.StreamVisitor.Visit(fn) +} + +// StreamVisitor reads objects from an io.Reader and walks them. A stream visitor can only be +// visited once. +// TODO: depends on objects being in JSON format before being passed to decode - need to implement +// a stream decoder method on runtime.Codec to properly handle this. +type StreamVisitor struct { + io.Reader + *mapper + + Source string + Schema ContentValidator +} + +// NewStreamVisitor is a helper function that is useful when we want to change the fields of the struct but keep calls the same. +func NewStreamVisitor(r io.Reader, mapper *mapper, source string, schema ContentValidator) *StreamVisitor { + return &StreamVisitor{ + Reader: r, + mapper: mapper, + Source: source, + Schema: schema, + } +} + +// Visit implements Visitor over a stream. StreamVisitor is able to distinct multiple resources in one stream. +func (v *StreamVisitor) Visit(fn VisitorFunc) error { + d := yaml.NewYAMLOrJSONDecoder(v.Reader, 4096) + for { + ext := runtime.RawExtension{} + if err := d.Decode(&ext); err != nil { + if err == io.EOF { + return nil + } + return fmt.Errorf("error parsing %s: %v", v.Source, err) + } + // TODO: This needs to be able to handle object in other encodings and schemas. + ext.Raw = bytes.TrimSpace(ext.Raw) + if len(ext.Raw) == 0 || bytes.Equal(ext.Raw, []byte("null")) { + continue + } + if err := ValidateSchema(ext.Raw, v.Schema); err != nil { + return fmt.Errorf("error validating %q: %v", v.Source, err) + } + info, err := v.infoForData(ext.Raw, v.Source) + if err != nil { + if fnErr := fn(info, err); fnErr != nil { + return fnErr + } + continue + } + if err := fn(info, nil); err != nil { + return err + } + } +} + +func UpdateObjectNamespace(info *Info, err error) error { + if err != nil { + return err + } + if info.Object != nil { + return metadataAccessor.SetNamespace(info.Object, info.Namespace) + } + return nil +} + +// FilterNamespace omits the namespace if the object is not namespace scoped +func FilterNamespace(info *Info, err error) error { + if err != nil { + return err + } + if !info.Namespaced() { + info.Namespace = "" + UpdateObjectNamespace(info, nil) + } + return nil +} + +// SetNamespace ensures that every Info object visited will have a namespace +// set. If info.Object is set, it will be mutated as well. +func SetNamespace(namespace string) VisitorFunc { + return func(info *Info, err error) error { + if err != nil { + return err + } + if !info.Namespaced() { + return nil + } + if len(info.Namespace) == 0 { + info.Namespace = namespace + UpdateObjectNamespace(info, nil) + } + return nil + } +} + +// RequireNamespace will either set a namespace if none is provided on the +// Info object, or if the namespace is set and does not match the provided +// value, returns an error. This is intended to guard against administrators +// accidentally operating on resources outside their namespace. +func RequireNamespace(namespace string) VisitorFunc { + return func(info *Info, err error) error { + if err != nil { + return err + } + if !info.Namespaced() { + return nil + } + if len(info.Namespace) == 0 { + info.Namespace = namespace + UpdateObjectNamespace(info, nil) + return nil + } + if info.Namespace != namespace { + return fmt.Errorf("the namespace from the provided object %q does not match the namespace %q. You must pass '--namespace=%s' to perform this operation.", info.Namespace, namespace, info.Namespace) + } + return nil + } +} + +// RetrieveLatest updates the Object on each Info by invoking a standard client +// Get. +func RetrieveLatest(info *Info, err error) error { + if err != nil { + return err + } + if meta.IsListType(info.Object) { + return fmt.Errorf("watch is only supported on individual resources and resource collections, but a list of resources is found") + } + if len(info.Name) == 0 { + return nil + } + if info.Namespaced() && len(info.Namespace) == 0 { + return fmt.Errorf("no namespace set on resource %s %q", info.Mapping.Resource, info.Name) + } + return info.Get() +} + +// RetrieveLazy updates the object if it has not been loaded yet. +func RetrieveLazy(info *Info, err error) error { + if err != nil { + return err + } + if info.Object == nil { + return info.Get() + } + return nil +} + +type FilterFunc func(info *Info, err error) (bool, error) + +type FilteredVisitor struct { + visitor Visitor + filters []FilterFunc +} + +func NewFilteredVisitor(v Visitor, fn ...FilterFunc) Visitor { + if len(fn) == 0 { + return v + } + return FilteredVisitor{v, fn} +} + +func (v FilteredVisitor) Visit(fn VisitorFunc) error { + return v.visitor.Visit(func(info *Info, err error) error { + if err != nil { + return err + } + for _, filter := range v.filters { + ok, err := filter(info, nil) + if err != nil { + return err + } + if !ok { + return nil + } + } + return fn(info, nil) + }) +} + +func FilterByLabelSelector(s labels.Selector) FilterFunc { + return func(info *Info, err error) (bool, error) { + if err != nil { + return false, err + } + a, err := meta.Accessor(info.Object) + if err != nil { + return false, err + } + if !s.Matches(labels.Set(a.GetLabels())) { + return false, nil + } + return true, nil + } +} + +type InfoListVisitor []*Info + +func (infos InfoListVisitor) Visit(fn VisitorFunc) error { + var err error + for _, i := range infos { + err = fn(i, err) + } + return err +}