Skip to content

Commit

Permalink
Remove deprecated code to prepare branch for release 1.9
Browse files Browse the repository at this point in the history
  • Loading branch information
Ankitasw committed Oct 25, 2023
1 parent c2b813c commit e3df956
Show file tree
Hide file tree
Showing 11 changed files with 33 additions and 102 deletions.
11 changes: 0 additions & 11 deletions controllers/vspherecluster_reconciler.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,12 +51,6 @@ import (
infrautilv1 "sigs.k8s.io/cluster-api-provider-vsphere/pkg/util"
)

// legacyIdentityFinalizer is deprecated and should be used only while upgrading the cluster
// from v1alpha3(v.0.7).
//
// Deprecated: legacyIdentityFinalizer will be removed in a future release.
const legacyIdentityFinalizer string = "identity/infrastructure.cluster.x-k8s.io"

type clusterReconciler struct {
ControllerManagerContext *capvcontext.ControllerManagerContext
Client client.Client
Expand Down Expand Up @@ -210,11 +204,6 @@ func (r *clusterReconciler) reconcileDelete(ctx context.Context, clusterCtx *cap
log.Info(fmt.Sprintf("Removing finalizer from Secret %s/%s having finalizers %v", secret.Namespace, secret.Name, secret.Finalizers))
ctrlutil.RemoveFinalizer(secret, infrav1.SecretIdentitySetFinalizer)

// Check if the old finalizer(from v0.7) is present, if yes, delete it
// For more context, please refer: https://github.com/kubernetes-sigs/cluster-api-provider-vsphere/issues/1482
if ctrlutil.ContainsFinalizer(secret, legacyIdentityFinalizer) {
ctrlutil.RemoveFinalizer(secret, legacyIdentityFinalizer)
}
if err := r.Client.Update(ctx, secret); err != nil {
return reconcile.Result{}, err
}
Expand Down
14 changes: 0 additions & 14 deletions controllers/vspherecluster_reconciler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -334,25 +334,11 @@ var _ = Describe("VIM based VSphere ClusterReconciler", func() {
By("deleting the vspherecluster which has the secret with legacy finalizer")
Expect(testEnv.Delete(ctx, instance)).To(Succeed())

By("checking that the secret is deleted")
secretKey := client.ObjectKey{Namespace: secret.Namespace, Name: secret.Name}
Eventually(func() bool {
err := testEnv.Get(ctx, secretKey, secret)
return apierrors.IsNotFound(err)
}, timeout).Should(BeTrue())

// confirm that the VSphereCluster is deleted
Eventually(func() bool {
err := testEnv.Get(ctx, key, instance)
return apierrors.IsNotFound(err)
}, timeout).Should(BeTrue())

/*By("checking that the secret is deleted")
secretKey := client.ObjectKey{Namespace: secret.Namespace, Name: secret.Name}
Eventually(func() bool {
err := testEnv.Get(ctx, secretKey, secret)
return apierrors.IsNotFound(err)
}, timeout).Should(BeTrue())*/
})
})

Expand Down
5 changes: 0 additions & 5 deletions controllers/vsphereclusteridentity_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -192,11 +192,6 @@ func (r clusterIdentityReconciler) reconcileDelete(ctx context.Context, identity
return err
}
log.Info(fmt.Sprintf("Removing finalizer from Secret %s/%s", secret.Namespace, secret.Name))
// Check if the old finalizer(from v0.7) is present, if yes, delete it
// For more context, please refer: https://github.com/kubernetes-sigs/cluster-api-provider-vsphere/issues/1482
if ctrlutil.ContainsFinalizer(secret, legacyIdentityFinalizer) {
ctrlutil.RemoveFinalizer(secret, legacyIdentityFinalizer)
}
ctrlutil.RemoveFinalizer(secret, infrav1.SecretIdentitySetFinalizer)
if err := r.Client.Update(ctx, secret); err != nil {
return err
Expand Down
56 changes: 24 additions & 32 deletions controllers/vspheredeploymentzone_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -82,16 +82,14 @@ var _ = Describe("VSphereDeploymentZoneReconciler", func() {
},
Spec: infrav1.VSphereFailureDomainSpec{
Region: infrav1.FailureDomain{
Name: "k8s-region-west",
Type: infrav1.DatacenterFailureDomain,
TagCategory: "k8s-region",
AutoConfigure: pointer.Bool(false),
Name: "k8s-region-west",
Type: infrav1.DatacenterFailureDomain,
TagCategory: "k8s-region",
},
Zone: infrav1.FailureDomain{
Name: "k8s-zone-west-1",
Type: infrav1.ComputeClusterFailureDomain,
TagCategory: "k8s-zone",
AutoConfigure: pointer.Bool(false),
Name: "k8s-zone-west-1",
Type: infrav1.ComputeClusterFailureDomain,
TagCategory: "k8s-zone",
},
Topology: infrav1.Topology{
Datacenter: "DC0",
Expand Down Expand Up @@ -163,16 +161,14 @@ var _ = Describe("VSphereDeploymentZoneReconciler", func() {
},
Spec: infrav1.VSphereFailureDomainSpec{
Region: infrav1.FailureDomain{
Name: "k8s-region-west",
Type: infrav1.DatacenterFailureDomain,
TagCategory: "k8s-region",
AutoConfigure: pointer.Bool(false),
Name: "k8s-region-west",
Type: infrav1.DatacenterFailureDomain,
TagCategory: "k8s-region",
},
Zone: infrav1.FailureDomain{
Name: "k8s-zone-west-1",
Type: infrav1.ComputeClusterFailureDomain,
TagCategory: "k8s-zone",
AutoConfigure: pointer.Bool(false),
Name: "k8s-zone-west-1",
Type: infrav1.ComputeClusterFailureDomain,
TagCategory: "k8s-zone",
},
Topology: infrav1.Topology{
Datacenter: "DC0",
Expand Down Expand Up @@ -336,16 +332,14 @@ func TestVSphereDeploymentZone_Reconcile(t *testing.T) {
},
Spec: infrav1.VSphereFailureDomainSpec{
Region: infrav1.FailureDomain{
Name: "k8s-region-west",
Type: infrav1.DatacenterFailureDomain,
TagCategory: "k8s-region",
AutoConfigure: pointer.Bool(false),
Name: "k8s-region-west",
Type: infrav1.DatacenterFailureDomain,
TagCategory: "k8s-region",
},
Zone: infrav1.FailureDomain{
Name: "k8s-zone-west-1",
Type: infrav1.ComputeClusterFailureDomain,
TagCategory: "k8s-zone",
AutoConfigure: pointer.Bool(false),
Name: "k8s-zone-west-1",
Type: infrav1.ComputeClusterFailureDomain,
TagCategory: "k8s-zone",
},
Topology: infrav1.Topology{
Datacenter: "DC0",
Expand Down Expand Up @@ -408,16 +402,14 @@ func TestVSphereDeploymentZone_Reconcile(t *testing.T) {
},
Spec: infrav1.VSphereFailureDomainSpec{
Region: infrav1.FailureDomain{
Name: "k8s-region-west",
Type: infrav1.DatacenterFailureDomain,
TagCategory: "k8s-region",
AutoConfigure: pointer.Bool(false),
Name: "k8s-region-west",
Type: infrav1.DatacenterFailureDomain,
TagCategory: "k8s-region",
},
Zone: infrav1.FailureDomain{
Name: "k8s-zone-west-1",
Type: infrav1.ComputeClusterFailureDomain,
TagCategory: "k8s-zone",
AutoConfigure: pointer.Bool(false),
Name: "k8s-zone-west-1",
Type: infrav1.ComputeClusterFailureDomain,
TagCategory: "k8s-zone",
},
Topology: infrav1.Topology{
Datacenter: "DC0",
Expand Down
10 changes: 1 addition & 9 deletions internal/webhooks/vspherefailuredomain.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/utils/pointer"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/webhook"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
Expand Down Expand Up @@ -103,17 +102,10 @@ func (webhook *VSphereFailureDomainWebhook) ValidateDelete(_ context.Context, _

// Default implements webhook.Defaulter so a webhook will be registered for the type.
func (webhook *VSphereFailureDomainWebhook) Default(_ context.Context, obj runtime.Object) error {
typedObj, ok := obj.(*infrav1.VSphereFailureDomain)
_, ok := obj.(*infrav1.VSphereFailureDomain)
if !ok {
return apierrors.NewBadRequest(fmt.Sprintf("expected a VSphereFailureDomain but got a %T", obj))
}
if typedObj.Spec.Zone.AutoConfigure == nil {
typedObj.Spec.Zone.AutoConfigure = pointer.Bool(false)
}

if typedObj.Spec.Region.AutoConfigure == nil {
typedObj.Spec.Region.AutoConfigure = pointer.Bool(false)
}

return nil
}
10 changes: 3 additions & 7 deletions internal/webhooks/vspherefailuredomain_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,9 +33,6 @@ func TestVsphereFailureDomain_Default(t *testing.T) {
}
webhook := &VSphereFailureDomainWebhook{}
g.Expect(webhook.Default(context.Background(), m)).ToNot(HaveOccurred())

g.Expect(*m.Spec.Zone.AutoConfigure).To(BeFalse())
g.Expect(*m.Spec.Region.AutoConfigure).To(BeFalse())
}

func TestVSphereFailureDomain_ValidateCreate(t *testing.T) {
Expand All @@ -50,10 +47,9 @@ func TestVSphereFailureDomain_ValidateCreate(t *testing.T) {
name: "region failureDomain type is hostGroup",
failureDomain: infrav1.VSphereFailureDomain{Spec: infrav1.VSphereFailureDomainSpec{
Region: infrav1.FailureDomain{
Name: "foo",
Type: infrav1.HostGroupFailureDomain,
TagCategory: "k8s-bar",
AutoConfigure: pointer.Bool(true),
Name: "foo",
Type: infrav1.HostGroupFailureDomain,
TagCategory: "k8s-bar",
},
}},
},
Expand Down
5 changes: 0 additions & 5 deletions pkg/services/network/constants.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,4 @@ const (

// SystemNamespace is the namespace where supervisor control plane VMs reside.
SystemNamespace = "kube-system"

// legacyDefaultNetworkLabel was the label used for default networks.
//
// Deprecated: legacyDefaultNetworkLabel will be removed in a future release.
legacyDefaultNetworkLabel = "capw.vmware.com/is-default-network"
)
10 changes: 3 additions & 7 deletions pkg/services/network/netop_provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@ import (
vmoprv1 "github.com/vmware-tanzu/vm-operator/api/v1alpha1"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/cluster-api/util/conditions"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"

vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1"
Expand Down Expand Up @@ -55,15 +54,12 @@ func (np *netopNetworkProvider) ProvisionClusterNetwork(_ context.Context, clust
}

func (np *netopNetworkProvider) getDefaultClusterNetwork(ctx context.Context, clusterCtx *vmware.ClusterContext) (*netopv1.Network, error) {
log := ctrl.LoggerFrom(ctx)

networkWithLabel, err := np.getDefaultClusterNetworkWithLabel(ctx, clusterCtx, CAPVDefaultNetworkLabel)
if networkWithLabel != nil && err == nil {
return networkWithLabel, nil
if err != nil {
return nil, err
}

log.Info("falling back to legacy label to identify default network", "label", legacyDefaultNetworkLabel)
return np.getDefaultClusterNetworkWithLabel(ctx, clusterCtx, legacyDefaultNetworkLabel)
return networkWithLabel, nil
}

func (np *netopNetworkProvider) getDefaultClusterNetworkWithLabel(ctx context.Context, clusterCtx *vmware.ClusterContext, label string) (*netopv1.Network, error) {
Expand Down
8 changes: 0 additions & 8 deletions pkg/services/network/network_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -170,10 +170,6 @@ var _ = Describe("Network provider", func() {
Context("with new CAPV default network label", func() {
testWithLabelFunc(CAPVDefaultNetworkLabel)
})

Context("with legacy default network label", func() {
testWithLabelFunc(legacyDefaultNetworkLabel)
})
})

Context("with nsx-t network provider", func() {
Expand Down Expand Up @@ -512,10 +508,6 @@ var _ = Describe("Network provider", func() {
testWithLabelFunc(CAPVDefaultNetworkLabel)
})

Context("with legacy default network label", func() {
testWithLabelFunc(legacyDefaultNetworkLabel)
})

})
})

Expand Down
2 changes: 1 addition & 1 deletion pkg/services/vmoperator/control_plane_endpoint_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -217,9 +217,9 @@ var _ = Describe("ControlPlaneEndpoint Tests", func() {
// A VirtualMachineService should be created and will wait for a VIP to be assigned
expectedAnnotations["netoperator.vmware.com/network-name"] = "dummy-network"
expectVMS = true
createDefaultNetwork(ctx, clusterCtx, c)
expectedConditions[0].Reason = vmwarev1.WaitingForLoadBalancerIPReason
expectedConditions[0].Message = waitingForVIPFailure
createDefaultNetwork(ctx, clusterCtx, c)
apiEndpoint, err = cpService.ReconcileControlPlaneEndpointService(ctx, clusterCtx, netOpProvider)
verifyOutput()

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,12 +8,10 @@ spec:
name: '${VSPHERE_DATACENTER}'
type: Datacenter
tagCategory: k8s-region
# autoConfigure: true
zone:
name: '${VSPHERE_COMPUTE_CLUSTER}'
type: ComputeCluster
tagCategory: k8s-zone
# autoConfigure: true
topology:
datacenter: '${VSPHERE_DATACENTER}'
# datastore is optional and should\can be set when only one compute cluster is set
Expand All @@ -28,4 +26,4 @@ spec:
server: '${VSPHERE_SERVER}'
failureDomain: "ownerreferences"
placementConstraint:
resourcePool: '${VSPHERE_RESOURCE_POOL}'
resourcePool: '${VSPHERE_RESOURCE_POOL}'

0 comments on commit e3df956

Please sign in to comment.