Skip to content

Commit

Permalink
Merge pull request #7026 from blackpiglet/6376_fix
Browse files Browse the repository at this point in the history
Add HealthCheckNodePort deletion logic in Serivce restore
  • Loading branch information
blackpiglet authored Oct 27, 2023
2 parents 1e0fc77 + a949180 commit 9ff4b1e
Show file tree
Hide file tree
Showing 4 changed files with 239 additions and 8 deletions.
1 change: 1 addition & 0 deletions changelogs/unreleased/7026-blackpiglet
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Add HealthCheckNodePort deletion logic for Service restore.
69 changes: 69 additions & 0 deletions pkg/restore/service_action.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,9 @@ func (a *ServiceAction) Execute(input *velero.RestoreItemActionExecuteInput) (*v
if err := deleteNodePorts(service); err != nil {
return nil, err
}
if err := deleteHealthCheckNodePort(service); err != nil {
return nil, err
}
}

res, err := runtime.DefaultUnstructuredConverter.ToUnstructured(service)
Expand All @@ -76,6 +79,72 @@ func (a *ServiceAction) Execute(input *velero.RestoreItemActionExecuteInput) (*v
return velero.NewRestoreItemActionExecuteOutput(&unstructured.Unstructured{Object: res}), nil
}

func deleteHealthCheckNodePort(service *corev1api.Service) error {
// Check service type and external traffic policy setting,
// if the setting is not applicable for HealthCheckNodePort, return early.
if service.Spec.ExternalTrafficPolicy != corev1api.ServiceExternalTrafficPolicyTypeLocal ||
service.Spec.Type != corev1api.ServiceTypeLoadBalancer {
return nil
}

// HealthCheckNodePort is already 0, return.
if service.Spec.HealthCheckNodePort == 0 {
return nil
}

// Search HealthCheckNodePort from server's last-applied-configuration
// annotation(HealthCheckNodePort is specified by `kubectl apply` command)
lastAppliedConfig, ok := service.Annotations[annotationLastAppliedConfig]
if ok {
appliedServiceUnstructured := new(map[string]interface{})
if err := json.Unmarshal([]byte(lastAppliedConfig), appliedServiceUnstructured); err != nil {
return errors.WithStack(err)
}

healthCheckNodePort, exist, err := unstructured.NestedFloat64(*appliedServiceUnstructured, "spec", "healthCheckNodePort")
if err != nil {
return errors.WithStack(err)
}

// Found healthCheckNodePort in lastAppliedConfig annotation,
// and the value is not 0. No need to delete, return.
if exist && healthCheckNodePort != 0 {
return nil
}
}

// Search HealthCheckNodePort from ManagedFields(HealthCheckNodePort
// is specified by `kubectl apply --server-side` command).
for _, entry := range service.GetManagedFields() {
if entry.FieldsV1 == nil {
continue
}
fields := new(map[string]interface{})
if err := json.Unmarshal(entry.FieldsV1.Raw, fields); err != nil {
return errors.WithStack(err)
}

_, exist, err := unstructured.NestedMap(*fields, "f:spec", "f:healthCheckNodePort")
if err != nil {
return errors.WithStack(err)
}
if !exist {
continue
}
// Because the format in ManagedFields is `f:healthCheckNodePort: {}`,
// cannot get the value, check whether exists is enough.
// Found healthCheckNodePort in ManagedFields.
// No need to delete. Return.
return nil
}

// Cannot find HealthCheckNodePort from Annotation and
// ManagedFields, which means it's auto-generated. Delete it.
service.Spec.HealthCheckNodePort = 0

return nil
}

func deleteNodePorts(service *corev1api.Service) error {
if service.Spec.Type == corev1api.ServiceTypeExternalName {
return nil
Expand Down
161 changes: 160 additions & 1 deletion pkg/restore/service_action_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,8 @@ import (
func svcJSON(ports ...corev1api.ServicePort) string {
svc := corev1api.Service{
Spec: corev1api.ServiceSpec{
Ports: ports,
HealthCheckNodePort: 8080,
Ports: ports,
},
}

Expand Down Expand Up @@ -486,6 +487,164 @@ func TestServiceActionExecute(t *testing.T) {
},
},
},
{
name: "If PreserveNodePorts is True in restore spec then HealthCheckNodePort always preserved.",
obj: corev1api.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "svc-1",
},
Spec: corev1api.ServiceSpec{
HealthCheckNodePort: 8080,
ExternalTrafficPolicy: corev1api.ServiceExternalTrafficPolicyTypeLocal,
Type: corev1api.ServiceTypeLoadBalancer,
Ports: []corev1api.ServicePort{
{
Name: "http",
Port: 80,
NodePort: 8080,
},
{
Name: "hepsiburada",
NodePort: 9025,
},
},
},
},
restore: builder.ForRestore(api.DefaultNamespace, "").PreserveNodePorts(true).Result(),
expectedRes: corev1api.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "svc-1",
},
Spec: corev1api.ServiceSpec{
HealthCheckNodePort: 8080,
ExternalTrafficPolicy: corev1api.ServiceExternalTrafficPolicyTypeLocal,
Type: corev1api.ServiceTypeLoadBalancer,
Ports: []corev1api.ServicePort{
{
Name: "http",
Port: 80,
NodePort: 8080,
},
{
Name: "hepsiburada",
NodePort: 9025,
},
},
},
},
},
{
name: "If PreserveNodePorts is False in restore spec then HealthCheckNodePort should be cleaned.",
obj: corev1api.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "svc-1",
},
Spec: corev1api.ServiceSpec{
HealthCheckNodePort: 8080,
ExternalTrafficPolicy: corev1api.ServiceExternalTrafficPolicyTypeLocal,
Type: corev1api.ServiceTypeLoadBalancer,
},
},
restore: builder.ForRestore(api.DefaultNamespace, "").PreserveNodePorts(false).Result(),
expectedRes: corev1api.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "svc-1",
},
Spec: corev1api.ServiceSpec{
HealthCheckNodePort: 0,
ExternalTrafficPolicy: corev1api.ServiceExternalTrafficPolicyTypeLocal,
Type: corev1api.ServiceTypeLoadBalancer,
},
},
},
{
name: "If PreserveNodePorts is false in restore spec, but service is not expected, then HealthCheckNodePort should be kept.",
obj: corev1api.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "svc-1",
},
Spec: corev1api.ServiceSpec{
HealthCheckNodePort: 8080,
ExternalTrafficPolicy: corev1api.ServiceExternalTrafficPolicyTypeCluster,
Type: corev1api.ServiceTypeLoadBalancer,
},
},
restore: builder.ForRestore(api.DefaultNamespace, "").PreserveNodePorts(false).Result(),
expectedRes: corev1api.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "svc-1",
},
Spec: corev1api.ServiceSpec{
HealthCheckNodePort: 8080,
ExternalTrafficPolicy: corev1api.ServiceExternalTrafficPolicyTypeCluster,
Type: corev1api.ServiceTypeLoadBalancer,
},
},
},
{
name: "If PreserveNodePorts is false in restore spec, but HealthCheckNodePort can be found in Annotation, then it should be kept.",
obj: corev1api.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "svc-1",
Annotations: map[string]string{annotationLastAppliedConfig: svcJSON()},
},
Spec: corev1api.ServiceSpec{
HealthCheckNodePort: 8080,
ExternalTrafficPolicy: corev1api.ServiceExternalTrafficPolicyTypeLocal,
Type: corev1api.ServiceTypeLoadBalancer,
},
},
restore: builder.ForRestore(api.DefaultNamespace, "").PreserveNodePorts(false).Result(),
expectedRes: corev1api.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "svc-1",
Annotations: map[string]string{annotationLastAppliedConfig: svcJSON()},
},
Spec: corev1api.ServiceSpec{
HealthCheckNodePort: 8080,
ExternalTrafficPolicy: corev1api.ServiceExternalTrafficPolicyTypeLocal,
Type: corev1api.ServiceTypeLoadBalancer,
},
},
},
{
name: "If PreserveNodePorts is false in restore spec, but HealthCheckNodePort can be found in ManagedFields, then it should be kept.",
obj: corev1api.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "svc-1",
ManagedFields: []metav1.ManagedFieldsEntry{
{
FieldsV1: &metav1.FieldsV1{
Raw: []byte(`{"f:spec":{"f:healthCheckNodePort":{}}}`),
},
},
},
},
Spec: corev1api.ServiceSpec{
HealthCheckNodePort: 8080,
ExternalTrafficPolicy: corev1api.ServiceExternalTrafficPolicyTypeLocal,
Type: corev1api.ServiceTypeLoadBalancer,
},
},
restore: builder.ForRestore(api.DefaultNamespace, "").PreserveNodePorts(false).Result(),
expectedRes: corev1api.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "svc-1",
ManagedFields: []metav1.ManagedFieldsEntry{
{
FieldsV1: &metav1.FieldsV1{
Raw: []byte(`{"f:spec":{"f:healthCheckNodePort":{}}}`),
},
},
},
},
Spec: corev1api.ServiceSpec{
HealthCheckNodePort: 8080,
ExternalTrafficPolicy: corev1api.ServiceExternalTrafficPolicyTypeLocal,
Type: corev1api.ServiceTypeLoadBalancer,
},
},
},
}

for _, test := range tests {
Expand Down
16 changes: 9 additions & 7 deletions site/content/docs/main/restore-reference.md
Original file line number Diff line number Diff line change
Expand Up @@ -280,21 +280,23 @@ There are two ways to delete a Restore object:
1. Deleting with `velero restore delete` will delete the Custom Resource representing the restore, along with its individual log and results files. It will not delete any objects that were created by the restore in your cluster.
2. Deleting with `kubectl -n velero delete restore` will delete the Custom Resource representing the restore. It will not delete restore log or results files from object storage, or any objects that were created during the restore in your cluster.

## What happens to NodePorts when restoring Services
## What happens to NodePorts and HealthCheckNodePort when restoring Services

During a restore, Velero deletes **Auto assigned** NodePorts by default and Services get new **auto assigned** nodePorts after restore.
During a restore, Velero deletes **Auto assigned** NodePorts and HealthCheckNodePort by default and Services get new **auto assigned** nodePorts and healthCheckNodePort after restore.

Velero auto detects **explicitly specified** NodePorts using **`last-applied-config`** annotation and they are **preserved** after restore. NodePorts can be explicitly specified as `.spec.ports[*].nodePort` field on Service definition.
Velero auto detects **explicitly specified** NodePorts using **`last-applied-config`** annotation and **`managedFields`**. They are **preserved** after restore. NodePorts can be explicitly specified as `.spec.ports[*].nodePort` field on Service definition.

### Always Preserve NodePorts
Velero will do the same to the `HealthCheckNodePort` as `NodePorts`.

It is not always possible to set nodePorts explicitly on some big clusters because of operational complexity. As the Kubernetes [NodePort documentation](https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport) states, "if you want a specific port number, you can specify a value in the `nodePort` field. The control plane will either allocate you that port or report that the API transaction failed. This means that you need to take care of possible port collisions yourself. You also have to use a valid port number, one that's inside the range configured for NodePort use.""
### Always Preserve NodePorts and HealthCheckNodePort

It is not always possible to set nodePorts and healthCheckNodePort explicitly on some big clusters because of operational complexity. As the Kubernetes [NodePort documentation](https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport) states, "if you want a specific port number, you can specify a value in the `nodePort` field. The control plane will either allocate you that port or report that the API transaction failed. This means that you need to take care of possible port collisions yourself. You also have to use a valid port number, one that's inside the range configured for NodePort use.""

The clusters which are not explicitly specifying nodePorts may still need to restore original NodePorts in the event of a disaster. Auto assigned nodePorts are typically defined on Load Balancers located in front of cluster. Changing all these nodePorts on Load Balancers is another operation complexity you are responsible for updating after disaster if nodePorts are changed.

Use the `velero restore create ` command's `--preserve-nodeports` flag to preserve Service nodePorts always, regardless of whether nodePorts are explicitly specified or not. This flag is used for preserving the original nodePorts from a backup and can be used as `--preserve-nodeports` or `--preserve-nodeports=true`. If this flag is present, Velero will not remove the nodePorts when restoring a Service, but will try to use the nodePorts from the backup.
Use the `velero restore create ` command's `--preserve-nodeports` flag to preserve Service nodePorts and healthCheckNodePort always, regardless of whether nodePorts are explicitly specified or not. This flag is used for preserving the original nodePorts and healthCheckNodePort from a backup and can be used as `--preserve-nodeports` or `--preserve-nodeports=true`. If this flag is present, Velero will not remove the nodePorts and healthCheckNodePort when restoring a Service, but will try to use the nodePorts from the backup.

Trying to preserve nodePorts may cause port conflicts when restoring on situations below:
Trying to preserve nodePorts and healthCheckNodePort may cause port conflicts when restoring on situations below:

- If the nodePort from the backup is already allocated on the target cluster then Velero prints error log as shown below and continues the restore operation.

Expand Down

0 comments on commit 9ff4b1e

Please sign in to comment.