diff --git a/.github/workflows/build-container-image.yml b/.github/workflows/build-container-image.yml index 29023b270..61c0dda0a 100644 --- a/.github/workflows/build-container-image.yml +++ b/.github/workflows/build-container-image.yml @@ -17,12 +17,12 @@ jobs: packages: write steps: - name: Check out repository - uses: actions/checkout@v3.1.0 + uses: actions/checkout@v4 - name: Install Go - uses: actions/setup-go@v3.4.0 + uses: actions/setup-go@v5 with: - go-version: "1.21.1" + go-version: '1.22' - name: Set up Docker Buildx id: docker-setup diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml index 1a6af13b0..07fb3da01 100644 --- a/.github/workflows/codespell.yml +++ b/.github/workflows/codespell.yml @@ -18,4 +18,4 @@ jobs: with: check_filenames: true skip: ./.git,./.github/workflows/codespell.yml,.git,*.png,*.jpg,*.svg,*.sum,./vendor,go.sum,crd-csi-snapshot-ga.yaml,crd-csi-snapshot.yaml - ignore_words_list: "AKS,aks,ro" + ignore_words_list: "AKS,aks,ro,accend" diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 31ec40d83..bfd821d2f 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -13,15 +13,14 @@ jobs: - name: Check out repository uses: actions/checkout@v4 - - name: Set up Go - uses: actions/setup-go@v4 + - name: Set up Go 1.x + uses: actions/setup-go@v5 with: - go-version: "1.21.1" + go-version: '1.22' id: go - name: Install Dependencies run: sudo apt-get update && sudo apt-get install -y libcryptsetup-dev - name: Build Test - run: | - go test -covermode=count -coverprofile=profile.cov ./pkg/... + run: go test -race -covermode=atomic -coverprofile=profile.cov ./pkg/... diff --git a/.github/workflows/static.yaml b/.github/workflows/static.yaml index 4f2ffadc1..97238ed19 100644 --- a/.github/workflows/static.yaml +++ b/.github/workflows/static.yaml @@ -11,9 +11,9 @@ jobs: runs-on: ubuntu-latest steps: - name: Set up Go 1.x - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: "1.21.1" + go-version: '1.22' - name: Checkout code uses: actions/checkout@v4 @@ -22,9 +22,6 @@ jobs: run: sudo apt-get update && sudo apt-get -y install libcryptsetup-dev - name: Run linter - uses: golangci/golangci-lint-action@v3 + uses: golangci/golangci-lint-action@v4 with: - version: v1.51 - skip-pkg-cache: true - skip-build-cache: true - args: -E=gofmt,deadcode,unused,varcheck,ineffassign,revive,misspell,exportloopref,asciicheck,bodyclose,depguard,dogsled,durationcheck,errname,forbidigo -D=staticcheck --timeout=30m0s + version: v1.54 diff --git a/.github/workflows/trivy.yaml b/.github/workflows/trivy.yaml index 8c7f31e99..ae6ba985a 100644 --- a/.github/workflows/trivy.yaml +++ b/.github/workflows/trivy.yaml @@ -11,9 +11,9 @@ jobs: runs-on: ubuntu-latest steps: - name: Set up Go 1.x - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: "1.21.1" + go-version: '1.22' id: go - name: Checkout code diff --git a/.golangci.yml b/.golangci.yml index bf43d3e64..e8cd5ae38 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,9 +1,34 @@ run: - deadline: 10m + deadline: 30m linters: disable: - typecheck - structcheck + - staticcheck enable: - - golint + - gofmt + - unused + - ineffassign + - revive + - misspell + - exportloopref + - asciicheck + - bodyclose + - depguard + - dogsled + - durationcheck + - errname + - forbidigo +linters-settings: + depguard: + rules: + main: + files: + - $all + - "!$test" + allow: + - $gostd + - k8s.io + - sigs.k8s.io + - github.com diff --git a/Dockerfile b/Dockerfile index e0529fff9..5f6e5725a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,8 +1,8 @@ -FROM debian:bullseye AS build +FROM debian:bookworm AS build ENV DEBIAN_FRONTEND="noninteractive" RUN apt-get update && apt-get install -y build-essential git wget pkg-config libcryptsetup12 libcryptsetup-dev -ARG GO_VER=1.21.1 +ARG GO_VER=1.22.4 RUN wget https://go.dev/dl/go${GO_VER}.linux-amd64.tar.gz && \ tar -C /usr/local -xzf go${GO_VER}.linux-amd64.tar.gz && \ rm go${GO_VER}.linux-amd64.tar.gz diff --git a/Makefile b/Makefile index 9ca716981..c5cdcb5b9 100644 --- a/Makefile +++ b/Makefile @@ -37,7 +37,7 @@ REGISTRY_NAME ?= $(shell echo $(REGISTRY) | sed "s/.azurecr.io//g") IMAGE_NAME ?= constellation/azure-csi-driver ifneq ($(BUILD_V2), true) PLUGIN_NAME = azurediskplugin -IMAGE_VERSION ?= v1.3.0 +IMAGE_VERSION ?= v1.4.0 CHART_VERSION ?= latest else PLUGIN_NAME = azurediskpluginv2 @@ -129,14 +129,6 @@ sanity-test: azuredisk sanity-test-v2: azuredisk-v2 go test -v -timeout=30m ./test/sanity --temp-use-driver-v2 -.PHONY: integration-test -integration-test: azuredisk - go test -v -timeout=30m ./test/integration - -.PHONY: integration-test-v2 -integration-test-v2: azuredisk-v2 - go test -v -timeout=30m ./test/integration --temp-use-driver-v2 - .PHONY: e2e-bootstrap e2e-bootstrap: install-helm ifdef WINDOWS_USE_HOST_PROCESS_CONTAINERS diff --git a/charts/README.md b/charts/README.md index 05eeda45e..4fd9c7230 100644 --- a/charts/README.md +++ b/charts/README.md @@ -71,7 +71,7 @@ helm repo update azuredisk-csi-driver ### install a specific version ```console -helm install azuredisk-csi-driver azuredisk-csi-driver/azuredisk-csi-driver --namespace kube-system --version v1.29.0 +helm install azuredisk-csi-driver azuredisk-csi-driver/azuredisk-csi-driver --namespace kube-system --version v1.29.2 ``` ### install on Azure Stack @@ -131,19 +131,19 @@ The following table lists the configurable parameters of the latest Azure Disk C | `image.azuredisk.tag` | azuredisk-csi-driver docker image tag | `` | | `image.azuredisk.pullPolicy` | azuredisk-csi-driver image pull policy | `IfNotPresent` | | `image.csiProvisioner.repository` | csi-provisioner docker image | `/oss/kubernetes-csi/csi-provisioner` | -| `image.csiProvisioner.tag` | csi-provisioner docker image tag | `v3.5.0` | +| `image.csiProvisioner.tag` | csi-provisioner docker image tag | `v4.0.0` | | `image.csiProvisioner.pullPolicy` | csi-provisioner image pull policy | `IfNotPresent` | | `image.csiAttacher.repository` | csi-attacher docker image | `/oss/kubernetes-csi/csi-attacher` | -| `image.csiAttacher.tag` | csi-attacher docker image tag | `v4.3.0` | +| `image.csiAttacher.tag` | csi-attacher docker image tag | `v4.5.0` | | `image.csiAttacher.pullPolicy` | csi-attacher image pull policy | `IfNotPresent` | | `image.csiResizer.repository` | csi-resizer docker image | `/oss/kubernetes-csi/csi-resizer` | -| `image.csiResizer.tag` | csi-resizer docker image tag | `v1.8.0` | +| `image.csiResizer.tag` | csi-resizer docker image tag | `v1.9.3` | | `image.csiResizer.pullPolicy` | csi-resizer image pull policy | `IfNotPresent` | | `image.livenessProbe.repository` | liveness-probe docker image | `/oss/kubernetes-csi/livenessprobe` | -| `image.livenessProbe.tag` | liveness-probe docker image tag | `v2.10.0` | +| `image.livenessProbe.tag` | liveness-probe docker image tag | `v2.12.0` | | `image.livenessProbe.pullPolicy` | liveness-probe image pull policy | `IfNotPresent` | | `image.nodeDriverRegistrar.repository` | csi-node-driver-registrar docker image | `/oss/kubernetes-csi/csi-node-driver-registrar` | -| `image.nodeDriverRegistrar.tag` | csi-node-driver-registrar docker image tag | `v2.8.0` | +| `image.nodeDriverRegistrar.tag` | csi-node-driver-registrar docker image tag | `v2.10.0` | | `image.nodeDriverRegistrar.pullPolicy` | csi-node-driver-registrar image pull policy | `IfNotPresent` | | `imagePullSecrets` | Specify docker-registry secret names as an array | [] (does not add image pull secrets to deployed pods) | | | `serviceAccount.create` | whether create service account of csi-azuredisk-controller, csi-azuredisk-node, and snapshot-controller| `true` | @@ -205,10 +205,10 @@ The following table lists the configurable parameters of the latest Azure Disk C | `node.logLevel` | node driver log level |`5` | | `snapshot.enabled` | whether enable snapshot feature | `false` | | `snapshot.image.csiSnapshotter.repository` | csi-snapshotter docker image | `/oss/kubernetes-csi/csi-snapshotter` | -| `snapshot.image.csiSnapshotter.tag` | csi-snapshotter docker image tag | `v6.2.2` | +| `snapshot.image.csiSnapshotter.tag` | csi-snapshotter docker image tag | `v6.3.3` | | `snapshot.image.csiSnapshotter.pullPolicy` | csi-snapshotter image pull policy | `IfNotPresent` | | `snapshot.image.csiSnapshotController.repository` | snapshot-controller docker image | `/oss/kubernetes-csi/snapshot-controller` | -| `snapshot.image.csiSnapshotController.tag` | snapshot-controller docker image tag | `v6.2.2` | +| `snapshot.image.csiSnapshotController.tag` | snapshot-controller docker image tag | `v6.3.3` | | `snapshot.image.csiSnapshotController.pullPolicy` | snapshot-controller image pull policy | `IfNotPresent` | | `snapshot.snapshotController.name` | snapshot controller name | `csi-snapshot-controller` | | `snapshot.snapshotController.replicas` | the replicas of snapshot-controller | `2` | diff --git a/charts/artifacthub-repo.yml b/charts/artifacthub-repo.yml new file mode 100644 index 000000000..1b4a754b4 --- /dev/null +++ b/charts/artifacthub-repo.yml @@ -0,0 +1,4 @@ +repositoryID: 80d41cba-2a79-4d87-a985-7a438993aff5 +owners: + - name: andyzhangx + email: xiazhang@microsoft.com diff --git a/charts/edgeless/Chart.yaml b/charts/edgeless/Chart.yaml index fbab5e66d..3ce5248a0 100644 --- a/charts/edgeless/Chart.yaml +++ b/charts/edgeless/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: "v1.3.0" +appVersion: "v1.4.0" description: Azure disk Container Storage Interface (CSI) Storage Plugin with on-node encryption support name: azuredisk-csi-driver -version: v1.3.0 +version: v1.4.0 diff --git a/charts/edgeless/templates/csi-azuredisk-controller.yaml b/charts/edgeless/templates/csi-azuredisk-controller.yaml index f7ac6f2db..94a7cfd41 100644 --- a/charts/edgeless/templates/csi-azuredisk-controller.yaml +++ b/charts/edgeless/templates/csi-azuredisk-controller.yaml @@ -120,6 +120,7 @@ spec: - "-leader-election" - "--leader-election-namespace={{ .Release.Namespace }}" - "-v=2" + - "--timeout=1200s" env: - name: ADDRESS value: /csi/csi.sock @@ -157,7 +158,7 @@ spec: args: - --csi-address=/csi/csi.sock - --probe-timeout=3s - - --health-port={{ .Values.controller.livenessProbe.healthPort }} + - --http-endpoint=localhost:{{ .Values.controller.livenessProbe.healthPort }} - --v=2 volumeMounts: - name: socket-dir @@ -197,18 +198,20 @@ spec: - "--enable-traffic-manager={{ .Values.controller.enableTrafficManager }}" - "--traffic-manager-port={{ .Values.controller.trafficManagerPort }}" - "--enable-otel-tracing={{ .Values.controller.otelTracing.enabled }}" + - "--check-disk-lun-collision=true" + {{- range $value := .Values.controller.extraArgs }} + - {{ $value | quote }} + {{- end }} ports: - - containerPort: {{ .Values.controller.livenessProbe.healthPort }} - name: healthz - protocol: TCP - containerPort: {{ .Values.controller.metricsPort }} name: metrics protocol: TCP livenessProbe: failureThreshold: 5 httpGet: + host: localhost path: /healthz - port: healthz + port: {{ .Values.controller.livenessProbe.healthPort }} initialDelaySeconds: 30 timeoutSeconds: 10 periodSeconds: 30 diff --git a/charts/edgeless/templates/csi-azuredisk-node.yaml b/charts/edgeless/templates/csi-azuredisk-node.yaml index 50d3b795c..78048e046 100644 --- a/charts/edgeless/templates/csi-azuredisk-node.yaml +++ b/charts/edgeless/templates/csi-azuredisk-node.yaml @@ -74,7 +74,7 @@ spec: args: - --csi-address=/csi/csi.sock - --probe-timeout=3s - - --health-port={{ .Values.node.livenessProbe.healthPort }} + - --http-endpoint=localhost:{{ .Values.node.livenessProbe.healthPort }} - --v=2 resources: {{- toYaml .Values.linux.resources.livenessProbe | nindent 12 }} - name: node-driver-registrar @@ -131,15 +131,12 @@ spec: - "--get-nodeid-from-imds={{ .Values.node.getNodeIDFromIMDS }}" - "--enable-otel-tracing={{ .Values.linux.otelTracing.enabled }}" - "--kms-addr={{ .Values.global.keyServiceName }}.{{ .Values.global.keyServiceNamespace | default .Release.Namespace }}:{{ .Values.global.keyServicePort }}" - ports: - - containerPort: {{ .Values.node.livenessProbe.healthPort }} - name: healthz - protocol: TCP livenessProbe: failureThreshold: 5 httpGet: + host: localhost path: /healthz - port: healthz + port: {{ .Values.node.livenessProbe.healthPort }} initialDelaySeconds: 30 timeoutSeconds: 10 periodSeconds: 30 diff --git a/charts/edgeless/values.yaml b/charts/edgeless/values.yaml index 944663770..c1ac7ca54 100644 --- a/charts/edgeless/values.yaml +++ b/charts/edgeless/values.yaml @@ -2,27 +2,27 @@ image: baseRepo: mcr.microsoft.com azuredisk: repository: ghcr.io/edgelesssys/constellation/azure-csi-driver - tag: v1.3.0@sha256:1e798f066ef78c293c4c87a31677f8948be4c8709980135969b73a9d7a46ca71 + tag: v1.4.0 #@sha256:1e798f066ef78c293c4c87a31677f8948be4c8709980135969b73a9d7a46ca71 pullPolicy: IfNotPresent csiProvisioner: repository: /oss/kubernetes-csi/csi-provisioner - tag: v3.5.0@sha256:fdf70099aa1538d1c2164976cf6d158ef8b3a5ee63db10bf0085de4ec66f59b4 + tag: v4.0.0@sha256:beadfb2cfa02f8bbb2efd88261a673023527cf51ebe7894daef82c4d928264a5 pullPolicy: IfNotPresent csiAttacher: repository: /oss/kubernetes-csi/csi-attacher - tag: v4.3.0@sha256:4306b80bfe8caea3fe53f6d1c15807c745be3072553ff508fc4f61da8f4a0c10 + tag: v4.5.0@sha256:172a9140780701b2223b7296729fc6cc3be8c86d0cfd2d0452e495f5ea28f51f pullPolicy: IfNotPresent csiResizer: repository: /oss/kubernetes-csi/csi-resizer - tag: v1.8.0@sha256:6f0e8c9f3d0bdcf7a5fb5e404276ffac624033099d7687c8080692bcb6d13cd1 + tag: v1.9.3@sha256:e20dc798f529436d2c861dd66bc7fcfa17623b562a2a65474aab38fb77c9824a pullPolicy: IfNotPresent livenessProbe: repository: /oss/kubernetes-csi/livenessprobe - tag: v2.10.0@sha256:3aeac313cffdb7db80b733539427f2533a3f662bf538e7b6434b0f898ceb701b + tag: v2.12.0@sha256:c762188c45d1b9bc9144b694b85313d5e49c741935a81d5b94fd7db978a40ae1 pullPolicy: IfNotPresent nodeDriverRegistrar: repository: /oss/kubernetes-csi/csi-node-driver-registrar - tag: v2.8.0@sha256:af6bf1b5ff310d4dc02cf8276be9b06014318f7ee31238b5fa278febd1a10ca9 + tag: v2.10.0@sha256:136e3a4a5897f111d1dedd404a5717ee7ff2f215e5fe878abdf4ce00c2292280 pullPolicy: IfNotPresent serviceAccount: @@ -140,11 +140,11 @@ snapshot: image: csiSnapshotter: repository: /oss/kubernetes-csi/csi-snapshotter - tag: v6.2.2 + tag: v6.3.3 pullPolicy: IfNotPresent csiSnapshotController: repository: /oss/kubernetes-csi/snapshot-controller - tag: v6.2.2 + tag: v6.3.3 pullPolicy: IfNotPresent snapshotController: name: csi-snapshot-controller diff --git a/charts/index.yaml b/charts/index.yaml index b2a211891..c95897a3f 100644 --- a/charts/index.yaml +++ b/charts/index.yaml @@ -3,7 +3,7 @@ entries: azuredisk-csi-driver: - apiVersion: v1 appVersion: latest-v2 - created: "2023-09-13T13:15:19.045813764Z" + created: "2024-04-12T02:56:56.093841169Z" description: Azure disk Container Storage Interface (CSI) Storage Plugin digest: b261d370dfb8bde4d15d6285d57c87369d9c66d12b92c8c6ad2b85495365759a name: azuredisk-csi-driver @@ -12,7 +12,7 @@ entries: version: v2.0.0-beta.7 - apiVersion: v1 appVersion: v2.0.0-beta.6 - created: "2023-09-13T13:15:19.099540801Z" + created: "2024-04-12T02:56:56.149696888Z" description: Azure disk Container Storage Interface (CSI) Storage Plugin digest: 0e7280d2bcd752668c4439078ffdfc4567ac97af97d8baa07c322b99c34fb741 name: azuredisk-csi-driver @@ -21,7 +21,7 @@ entries: version: v2.0.0-beta.6 - apiVersion: v1 appVersion: v2.0.0-beta.5 - created: "2023-09-13T13:15:19.097202452Z" + created: "2024-04-12T02:56:56.148072197Z" description: Azure disk Container Storage Interface (CSI) Storage Plugin digest: 10189dd78863d24c9572c468e0d81b8781f83625118e6b205ee65e157aab0602 name: azuredisk-csi-driver @@ -30,7 +30,7 @@ entries: version: v2.0.0-beta.5 - apiVersion: v1 appVersion: v2.0.0-beta.4 - created: "2023-09-13T13:15:19.095602901Z" + created: "2024-04-12T02:56:56.146592145Z" description: Azure disk Container Storage Interface (CSI) Storage Plugin digest: b38901738bb600ff55cc91e1efb45c84a3a601bb815908a348df793960cd7122 name: azuredisk-csi-driver @@ -39,7 +39,7 @@ entries: version: v2.0.0-beta.4 - apiVersion: v1 appVersion: v2.0.0-beta.3 - created: "2023-09-13T13:15:19.09398455Z" + created: "2024-04-12T02:56:56.144405926Z" description: Azure disk Container Storage Interface (CSI) Storage Plugin digest: ddfe774c7b2326ff61c71a7e6fbfe687e97f06b6a8cc5e87c0b50ac0f320c7fe name: azuredisk-csi-driver @@ -48,7 +48,7 @@ entries: version: v2.0.0-beta.3 - apiVersion: v1 appVersion: v2.0.0-beta.2 - created: "2023-09-13T13:15:19.092639901Z" + created: "2024-04-12T02:56:56.143149441Z" description: Azure disk Container Storage Interface (CSI) Storage Plugin digest: 7c1b8159465e7642b7460ec8e4e2a2965be11e8abb8310d86d9487676adb6adc name: azuredisk-csi-driver @@ -57,7 +57,7 @@ entries: version: v2.0.0-beta.2 - apiVersion: v1 appVersion: v2.0.0-beta.1 - created: "2023-09-13T13:15:19.090077437Z" + created: "2024-04-12T02:56:56.141877259Z" description: Azure disk Container Storage Interface (CSI) Storage Plugin digest: f327e1205e7197cf87693012848e2fd76a213c7852ac57a50753ba9e41207612 name: azuredisk-csi-driver @@ -66,7 +66,7 @@ entries: version: v2.0.0-beta.1 - apiVersion: v1 appVersion: v2.0.0-alpha.1 - created: "2023-09-13T13:15:19.088585564Z" + created: "2024-04-12T02:56:56.140620825Z" description: Azure disk Container Storage Interface (CSI) Storage Plugin digest: a00216ca8bdd4550f01071c0a57e7cf49f20b4915164ec6c785d2b5e51893870 name: azuredisk-csi-driver @@ -74,26 +74,80 @@ entries: - https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts/v2.0.0-alpha.1/azuredisk-csi-driver-v2.0.0-alpha.1.tgz version: v2.0.0-alpha.1 - apiVersion: v1 - appVersion: v1.29.0 - created: "2023-09-13T13:15:19.043617472Z" + appVersion: v1.30.1 + created: "2024-04-12T02:56:56.092114098Z" description: Azure disk Container Storage Interface (CSI) Storage Plugin - digest: 5d196d8b3c4ba7da7ff9bfaa712191c322a34a4af68682a717438fce3f490b5c + digest: e4464d1307aa8ad436b52f8647bb0ef74de214c2481515d1bcd3f2b6618de19f name: azuredisk-csi-driver urls: - - https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts/latest/azuredisk-csi-driver-v1.29.0.tgz - version: v1.29.0 + - https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts/latest/azuredisk-csi-driver-v1.30.1.tgz + version: v1.30.1 + - apiVersion: v1 + appVersion: v1.30.1 + created: "2024-04-12T02:56:56.132334224Z" + description: Azure disk Container Storage Interface (CSI) Storage Plugin + digest: e4464d1307aa8ad436b52f8647bb0ef74de214c2481515d1bcd3f2b6618de19f + name: azuredisk-csi-driver + urls: + - https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts/v1.30.1/azuredisk-csi-driver-v1.30.1.tgz + version: v1.30.1 + - apiVersion: v1 + appVersion: v1.30.0 + created: "2024-04-12T02:56:56.131162795Z" + description: Azure disk Container Storage Interface (CSI) Storage Plugin + digest: e76dc3ae89d3040cf078ca3232577d29e100813670ca5138a297bc3b2e1c5514 + name: azuredisk-csi-driver + urls: + - https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts/v1.30.0/azuredisk-csi-driver-v1.30.0.tgz + version: v1.30.0 + - apiVersion: v1 + appVersion: v1.29.2 + created: "2024-04-12T02:56:56.129427512Z" + description: Azure disk Container Storage Interface (CSI) Storage Plugin + digest: ae0c15b875bb0bd2a4c1135437f0abfee6bf3591abcc502604317b3fe46015ff + name: azuredisk-csi-driver + urls: + - https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts/v1.29.2/azuredisk-csi-driver-v1.29.2.tgz + version: v1.29.2 + - apiVersion: v1 + appVersion: v1.29.1 + created: "2024-04-12T02:56:56.127837606Z" + description: Azure disk Container Storage Interface (CSI) Storage Plugin + digest: 441c64c6487a89efda91f22aa40358b6f9d31c73e2f131cfc10dcddd628af54f + name: azuredisk-csi-driver + urls: + - https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts/v1.29.1/azuredisk-csi-driver-v1.29.1.tgz + version: v1.29.1 - apiVersion: v1 appVersion: v1.29.0 - created: "2023-09-13T13:15:19.078044871Z" + created: "2024-04-12T02:56:56.126328094Z" description: Azure disk Container Storage Interface (CSI) Storage Plugin digest: 611857811af7ac3304f7cae847be4f35bad6735482700d5b2c970dfbb53f9a3b name: azuredisk-csi-driver urls: - https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts/v1.29.0/azuredisk-csi-driver-v1.29.0.tgz version: v1.29.0 + - apiVersion: v1 + appVersion: v1.28.5 + created: "2024-04-12T02:56:56.125183331Z" + description: Azure disk Container Storage Interface (CSI) Storage Plugin + digest: 1c8d6bb8882ddc62b699f877734f637291f755436474b4daeb80b395a9d5cdf1 + name: azuredisk-csi-driver + urls: + - https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts/v1.28.5/azuredisk-csi-driver-v1.28.5.tgz + version: v1.28.5 + - apiVersion: v1 + appVersion: v1.28.4 + created: "2024-04-12T02:56:56.124075531Z" + description: Azure disk Container Storage Interface (CSI) Storage Plugin + digest: f1b8fa60b0769577c563e33d475544577b37bb2430d027bfe92b91ad8588744d + name: azuredisk-csi-driver + urls: + - https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts/v1.28.4/azuredisk-csi-driver-v1.28.4.tgz + version: v1.28.4 - apiVersion: v1 appVersion: v1.28.3 - created: "2023-09-13T13:15:19.076886009Z" + created: "2024-04-12T02:56:56.122923452Z" description: Azure disk Container Storage Interface (CSI) Storage Plugin digest: 12fe80d19091cc4bc0025585da35d12bcab9cbc2f3e4cfd90eb4368e6967931a name: azuredisk-csi-driver @@ -102,7 +156,7 @@ entries: version: v1.28.3 - apiVersion: v1 appVersion: v1.28.2 - created: "2023-09-13T13:15:19.075490208Z" + created: "2024-04-12T02:56:56.120825107Z" description: Azure disk Container Storage Interface (CSI) Storage Plugin digest: 044dc6ffb662914020a3a835f994455a3301a4b2b665c9d4b9496b422a93c8b1 name: azuredisk-csi-driver @@ -111,16 +165,34 @@ entries: version: v1.28.2 - apiVersion: v1 appVersion: v1.27.1 - created: "2023-09-13T13:15:19.07320679Z" + created: "2024-04-12T02:56:56.119715181Z" description: Azure disk Container Storage Interface (CSI) Storage Plugin digest: 805d2f7437e7442739c18791e3eaf839d75bc9f3c1a10710e6cc6cb7ee06cbc4 name: azuredisk-csi-driver urls: - https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts/v1.27.1/azuredisk-csi-driver-v1.27.1.tgz version: v1.27.1 + - apiVersion: v1 + appVersion: v1.26.8 + created: "2024-04-12T02:56:56.118594477Z" + description: Azure disk Container Storage Interface (CSI) Storage Plugin + digest: 3920ea0e2c358bc659b4bc243ad35420959d59c7532ebcf775de6c819ff0aca6 + name: azuredisk-csi-driver + urls: + - https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts/v1.26.8/azuredisk-csi-driver-v1.26.8.tgz + version: v1.26.8 + - apiVersion: v1 + appVersion: v1.26.7 + created: "2024-04-12T02:56:56.117562536Z" + description: Azure disk Container Storage Interface (CSI) Storage Plugin + digest: 8796e69d622aaef119bf87047d1d7ed699e88119d48655590813ec5a38a3ccab + name: azuredisk-csi-driver + urls: + - https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts/v1.26.7/azuredisk-csi-driver-v1.26.7.tgz + version: v1.26.7 - apiVersion: v1 appVersion: v1.26.6 - created: "2023-09-13T13:15:19.071903563Z" + created: "2024-04-12T02:56:56.116433899Z" description: Azure disk Container Storage Interface (CSI) Storage Plugin digest: 277051eba7872783d34d9b8ba7bd606f3161afe571523eaabfa8b400491010c9 name: azuredisk-csi-driver @@ -129,7 +201,7 @@ entries: version: v1.26.6 - apiVersion: v1 appVersion: v1.25.0 - created: "2023-09-13T13:15:19.070613888Z" + created: "2024-04-12T02:56:56.114408713Z" description: Azure disk Container Storage Interface (CSI) Storage Plugin digest: cc1a78eb68d741382945252fdc86dc38fc903bcc7448eef5753a1e663e4e6e3c name: azuredisk-csi-driver @@ -138,7 +210,7 @@ entries: version: v1.25.0 - apiVersion: v1 appVersion: v1.24.0 - created: "2023-09-13T13:15:19.06936172Z" + created: "2024-04-12T02:56:56.113383544Z" description: Azure disk Container Storage Interface (CSI) Storage Plugin digest: 8f4a2048cd3d8ef128e533c43c5b68464989e28949658010599d466b2ccb1b58 name: azuredisk-csi-driver @@ -147,7 +219,7 @@ entries: version: v1.24.0 - apiVersion: v1 appVersion: v1.23.0 - created: "2023-09-13T13:15:19.068106847Z" + created: "2024-04-12T02:56:56.112346351Z" description: Azure disk Container Storage Interface (CSI) Storage Plugin digest: 4347271a297c6c95e10c4a880a08453c22839c95ad7db232d2d8fc38e926b385 name: azuredisk-csi-driver @@ -156,7 +228,7 @@ entries: version: v1.23.0 - apiVersion: v1 appVersion: v1.22.0 - created: "2023-09-13T13:15:19.066874283Z" + created: "2024-04-12T02:56:56.111321723Z" description: Azure disk Container Storage Interface (CSI) Storage Plugin digest: fc7adc9ddb406356bee117d3ba5c360408b4ee9a7decc30d34fe310d0cdd6aef name: azuredisk-csi-driver @@ -165,7 +237,7 @@ entries: version: v1.22.0 - apiVersion: v1 appVersion: v1.21.0 - created: "2023-09-13T13:15:19.064747957Z" + created: "2024-04-12T02:56:56.110307977Z" description: Azure disk Container Storage Interface (CSI) Storage Plugin digest: 50a5c0860aa631c7e83affbaeac1a84c43c96b19175e1dd1c21cba472564798d name: azuredisk-csi-driver @@ -174,7 +246,7 @@ entries: version: v1.21.0 - apiVersion: v1 appVersion: v1.20.0 - created: "2023-09-13T13:15:19.063715037Z" + created: "2024-04-12T02:56:56.109028769Z" description: Azure disk Container Storage Interface (CSI) Storage Plugin digest: 9f2a936be5efc45002c12d5bbbd8cb453e51a0c1668f6ceae51df656ac190c95 name: azuredisk-csi-driver @@ -183,7 +255,7 @@ entries: version: v1.20.0 - apiVersion: v1 appVersion: v1.19.0 - created: "2023-09-13T13:15:19.061864017Z" + created: "2024-04-12T02:56:56.107093022Z" description: Azure disk Container Storage Interface (CSI) Storage Plugin digest: 6c044a33b4adf598b9810e8839f04a16b1214470eceeb61bbe48076552955296 name: azuredisk-csi-driver @@ -192,7 +264,7 @@ entries: version: v1.19.0 - apiVersion: v1 appVersion: v1.18.0 - created: "2023-09-13T13:15:19.060650727Z" + created: "2024-04-12T02:56:56.106120041Z" description: Azure disk Container Storage Interface (CSI) Storage Plugin digest: 79474616c7373ed65bbf75050748fda16ae2f6b290d240813006abd74e733628 name: azuredisk-csi-driver @@ -201,7 +273,7 @@ entries: version: v1.18.0 - apiVersion: v1 appVersion: v1.17.0 - created: "2023-09-13T13:15:19.059628452Z" + created: "2024-04-12T02:56:56.105163574Z" description: Azure disk Container Storage Interface (CSI) Storage Plugin digest: dcaad8438f57941c19f2269d9d4510591529f4de00353d12506fd277dc06378e name: azuredisk-csi-driver @@ -210,7 +282,7 @@ entries: version: v1.17.0 - apiVersion: v1 appVersion: v1.16.0 - created: "2023-09-13T13:15:19.058354887Z" + created: "2024-04-12T02:56:56.104186689Z" description: Azure disk Container Storage Interface (CSI) Storage Plugin digest: 95108dce3a4da8aad2729ff8bc1587906b917925e6fe2e5935b5d25805080fb4 name: azuredisk-csi-driver @@ -219,7 +291,7 @@ entries: version: v1.16.0 - apiVersion: v1 appVersion: v1.15.0 - created: "2023-09-13T13:15:19.056117939Z" + created: "2024-04-12T02:56:56.102498757Z" description: Azure disk Container Storage Interface (CSI) Storage Plugin digest: 3da85b9206af81dc4217a3304bf14839158112300de7dbb95841e30daaf5e4e5 name: azuredisk-csi-driver @@ -228,7 +300,7 @@ entries: version: v1.15.0 - apiVersion: v1 appVersion: v1.14.0 - created: "2023-09-13T13:15:19.055062659Z" + created: "2024-04-12T02:56:56.101167272Z" description: Azure disk Container Storage Interface (CSI) Storage Plugin digest: 89e87dead7767f8abd56096e436956b23a666802040f7aad91d5b6c36af8b098 name: azuredisk-csi-driver @@ -237,7 +309,7 @@ entries: version: v1.14.0 - apiVersion: v1 appVersion: v1.13.0 - created: "2023-09-13T13:15:19.053869527Z" + created: "2024-04-12T02:56:56.100231839Z" description: Azure disk Container Storage Interface (CSI) Storage Plugin digest: c109533218e0625e9ca33c2db5e73c4b550b88943f7950b33e02a344a8ff9f8c name: azuredisk-csi-driver @@ -246,7 +318,7 @@ entries: version: v1.13.0 - apiVersion: v1 appVersion: v1.12.0 - created: "2023-09-13T13:15:19.052680819Z" + created: "2024-04-12T02:56:56.099293218Z" description: Azure disk Container Storage Interface (CSI) Storage Plugin digest: e41f089e5216fc9263039310990d1e41bcb468a85879289af1834dea0c457aaa name: azuredisk-csi-driver @@ -255,7 +327,7 @@ entries: version: v1.12.0 - apiVersion: v1 appVersion: v1.11.0 - created: "2023-09-13T13:15:19.051690722Z" + created: "2024-04-12T02:56:56.098345252Z" description: Azure disk Container Storage Interface (CSI) Storage Plugin digest: f35447c256488b1942342e9fcf8b6bed5f47b4ae5fb0fdae037110c5926803fe name: azuredisk-csi-driver @@ -264,7 +336,7 @@ entries: version: v1.11.0 - apiVersion: v1 appVersion: v1.10.0 - created: "2023-09-13T13:15:19.050442896Z" + created: "2024-04-12T02:56:56.097264365Z" description: Azure disk Container Storage Interface (CSI) Storage Plugin digest: 12bd0e1f20a2e2adcb04b6954423438d9ece7a2ad0fb4b4fe2969844d14cfb86 name: azuredisk-csi-driver @@ -273,7 +345,7 @@ entries: version: v1.10.0 - apiVersion: v1 appVersion: v1.9.0 - created: "2023-09-13T13:15:19.087242404Z" + created: "2024-04-12T02:56:56.138551192Z" description: Azure disk Container Storage Interface (CSI) Storage Plugin digest: d917427661ca4ec13add77efcfe31d2c7c989e20b1427dd566a05ad1bdd98fa9 name: azuredisk-csi-driver @@ -282,7 +354,7 @@ entries: version: v1.9.0 - apiVersion: v1 appVersion: v1.8.0 - created: "2023-09-13T13:15:19.086013835Z" + created: "2024-04-12T02:56:56.137542551Z" description: Azure disk Container Storage Interface (CSI) Storage Plugin digest: a37491e845671e81b9567f2d1a746dbf4f182fde0c8f9e689dfa632c5651c6c9 name: azuredisk-csi-driver @@ -291,7 +363,7 @@ entries: version: v1.8.0 - apiVersion: v1 appVersion: v1.7.0 - created: "2023-09-13T13:15:19.084984648Z" + created: "2024-04-12T02:56:56.136770605Z" description: Azure disk Container Storage Interface (CSI) Storage Plugin digest: 4df7a10466206c60deab00efbca57f67f042afd1ecd5daca738df28526f3ffcc name: azuredisk-csi-driver @@ -300,7 +372,7 @@ entries: version: v1.7.0 - apiVersion: v1 appVersion: v1.6.0 - created: "2023-09-13T13:15:19.083819154Z" + created: "2024-04-12T02:56:56.136042088Z" description: Azure disk Container Storage Interface (CSI) Storage Plugin digest: b1a4c384fdfdb6577dd6dfd3709fb746b5c79247846e5c582b93c3a1187f6d88 name: azuredisk-csi-driver @@ -309,7 +381,7 @@ entries: version: v1.6.0 - apiVersion: v1 appVersion: v1.5.1 - created: "2023-09-13T13:15:19.081843834Z" + created: "2024-04-12T02:56:56.135288079Z" description: Azure disk Container Storage Interface (CSI) Storage Plugin digest: efb42e1d56cc5596eb4e695b658c2748bc05cc6c47862e3f20ea6b5b199d01d6 name: azuredisk-csi-driver @@ -318,7 +390,7 @@ entries: version: v1.5.1 - apiVersion: v1 appVersion: v1.5.0 - created: "2023-09-13T13:15:19.080883343Z" + created: "2024-04-12T02:56:56.134078637Z" description: Azure disk Container Storage Interface (CSI) Storage Plugin digest: c8490efde8d4a43bd25f5c619887498dfb4b41edf3929ac8dbf0a9d3f691e6ae name: azuredisk-csi-driver @@ -327,7 +399,7 @@ entries: version: v1.5.0 - apiVersion: v1 appVersion: v1.4.0 - created: "2023-09-13T13:15:19.080107582Z" + created: "2024-04-12T02:56:56.133029572Z" description: Azure disk Container Storage Interface (CSI) Storage Plugin digest: 55e5e17aac7c144b21a198867654ef70ad272987b0bb165782905adf91ca3c03 name: azuredisk-csi-driver @@ -336,7 +408,7 @@ entries: version: v1.4.0 - apiVersion: v1 appVersion: v1.3.0 - created: "2023-09-13T13:15:19.078816506Z" + created: "2024-04-12T02:56:56.129997686Z" description: Azure disk Container Storage Interface (CSI) Storage Plugin digest: 2665483e922a577feb8539ca7f774bc70c945ce490294fd3378f098c2d244dde name: azuredisk-csi-driver @@ -345,7 +417,7 @@ entries: version: v1.3.0 - apiVersion: v1 appVersion: v1.2.0 - created: "2023-09-13T13:15:19.062536167Z" + created: "2024-04-12T02:56:56.107605678Z" description: Azure disk Container Storage Interface (CSI) Storage Plugin digest: 2bbfe2f9d080f1b3ff10590c7168d05ce026c5a73332b4d48014610a52337808 name: azuredisk-csi-driver @@ -354,7 +426,7 @@ entries: version: v1.2.0 - apiVersion: v1 appVersion: v1.1.1 - created: "2023-09-13T13:15:19.04844493Z" + created: "2024-04-12T02:56:56.095429434Z" description: Azure disk Container Storage Interface (CSI) Storage Plugin digest: dd7066be8f499f6c1a396ab27c0013c09f5a8d8319cc04fbdd480d31107bb851 name: azuredisk-csi-driver @@ -363,7 +435,7 @@ entries: version: v1.1.1 - apiVersion: v1 appVersion: v1.1.0 - created: "2023-09-13T13:15:19.04770152Z" + created: "2024-04-12T02:56:56.094889143Z" description: Azure disk Container Storage Interface (CSI) Storage Plugin digest: 3d2a5189416dd6a43bd3e2097bbe23a8db347b6e1a36c6a43fd59cc9c9633ff3 name: azuredisk-csi-driver @@ -372,11 +444,11 @@ entries: version: v1.1.0 - apiVersion: v1 appVersion: v1.0.0 - created: "2023-09-13T13:15:19.046966216Z" + created: "2024-04-12T02:56:56.094379661Z" description: Azure disk Container Storage Interface (CSI) Storage Plugin digest: d179bc6f338518859b6efdc3b3bed8d06513313e8047563eb4b654b2d417c81e name: azuredisk-csi-driver urls: - https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts/v1.0.0/azuredisk-csi-driver-v1.0.0.tgz version: v1.0.0 -generated: "2023-09-13T13:15:19.042282197Z" +generated: "2024-04-12T02:56:56.090696034Z" diff --git a/charts/latest/azuredisk-csi-driver-v1.29.0.tgz b/charts/latest/azuredisk-csi-driver-v1.29.0.tgz deleted file mode 100644 index e2234d847..000000000 Binary files a/charts/latest/azuredisk-csi-driver-v1.29.0.tgz and /dev/null differ diff --git a/charts/latest/azuredisk-csi-driver-v1.30.1.tgz b/charts/latest/azuredisk-csi-driver-v1.30.1.tgz new file mode 100644 index 000000000..b4f170736 Binary files /dev/null and b/charts/latest/azuredisk-csi-driver-v1.30.1.tgz differ diff --git a/charts/latest/azuredisk-csi-driver/Chart.yaml b/charts/latest/azuredisk-csi-driver/Chart.yaml index 862a675c6..d25a27c52 100644 --- a/charts/latest/azuredisk-csi-driver/Chart.yaml +++ b/charts/latest/azuredisk-csi-driver/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v1 -appVersion: v1.29.0 +appVersion: v1.30.1 description: Azure disk Container Storage Interface (CSI) Storage Plugin name: azuredisk-csi-driver -version: v1.29.0 +version: v1.30.1 diff --git a/charts/latest/azuredisk-csi-driver/templates/NOTES.txt b/charts/latest/azuredisk-csi-driver/templates/NOTES.txt index 6a9695dad..c198152ef 100644 --- a/charts/latest/azuredisk-csi-driver/templates/NOTES.txt +++ b/charts/latest/azuredisk-csi-driver/templates/NOTES.txt @@ -2,4 +2,4 @@ The Azure Disk CSI Driver is getting deployed to your cluster. To check Azure Disk CSI Driver pods status, please run: - kubectl --namespace={{ .Release.Namespace }} get pods --selector="release={{ .Release.Name }}" --watch + kubectl --namespace={{ .Release.Namespace }} get pods --selector="app.kubernetes.io/name={{ .Release.Name }}" --watch diff --git a/charts/latest/azuredisk-csi-driver/templates/csi-azuredisk-controller.yaml b/charts/latest/azuredisk-csi-driver/templates/csi-azuredisk-controller.yaml index 9dcb1adfa..45e5f2f53 100644 --- a/charts/latest/azuredisk-csi-driver/templates/csi-azuredisk-controller.yaml +++ b/charts/latest/azuredisk-csi-driver/templates/csi-azuredisk-controller.yaml @@ -120,6 +120,7 @@ spec: - "-leader-election" - "--leader-election-namespace={{ .Release.Namespace }}" - "-v=2" + - "--timeout=1200s" env: - name: ADDRESS value: /csi/csi.sock @@ -157,7 +158,7 @@ spec: args: - --csi-address=/csi/csi.sock - --probe-timeout=3s - - --health-port={{ .Values.controller.livenessProbe.healthPort }} + - --http-endpoint=localhost:{{ .Values.controller.livenessProbe.healthPort }} - --v=2 volumeMounts: - name: socket-dir @@ -197,18 +198,20 @@ spec: - "--enable-traffic-manager={{ .Values.controller.enableTrafficManager }}" - "--traffic-manager-port={{ .Values.controller.trafficManagerPort }}" - "--enable-otel-tracing={{ .Values.controller.otelTracing.enabled }}" + - "--check-disk-lun-collision=true" + {{- range $value := .Values.controller.extraArgs }} + - {{ $value | quote }} + {{- end }} ports: - - containerPort: {{ .Values.controller.livenessProbe.healthPort }} - name: healthz - protocol: TCP - containerPort: {{ .Values.controller.metricsPort }} name: metrics protocol: TCP livenessProbe: failureThreshold: 5 httpGet: + host: localhost path: /healthz - port: healthz + port: {{ .Values.controller.livenessProbe.healthPort }} initialDelaySeconds: 30 timeoutSeconds: 10 periodSeconds: 30 diff --git a/charts/latest/azuredisk-csi-driver/templates/csi-azuredisk-node.yaml b/charts/latest/azuredisk-csi-driver/templates/csi-azuredisk-node.yaml index e9cab38ce..dea8d3e55 100644 --- a/charts/latest/azuredisk-csi-driver/templates/csi-azuredisk-node.yaml +++ b/charts/latest/azuredisk-csi-driver/templates/csi-azuredisk-node.yaml @@ -75,7 +75,7 @@ spec: args: - --csi-address=/csi/csi.sock - --probe-timeout=3s - - --health-port={{ .Values.node.livenessProbe.healthPort }} + - --http-endpoint=localhost:{{ .Values.node.livenessProbe.healthPort }} - --v=2 resources: {{- toYaml .Values.linux.resources.livenessProbe | nindent 12 }} - name: node-driver-registrar @@ -131,15 +131,12 @@ spec: - "--get-node-info-from-labels={{ .Values.linux.getNodeInfoFromLabels }}" - "--get-nodeid-from-imds={{ .Values.node.getNodeIDFromIMDS }}" - "--enable-otel-tracing={{ .Values.linux.otelTracing.enabled }}" - ports: - - containerPort: {{ .Values.node.livenessProbe.healthPort }} - name: healthz - protocol: TCP livenessProbe: failureThreshold: 5 httpGet: + host: localhost path: /healthz - port: healthz + port: {{ .Values.node.livenessProbe.healthPort }} initialDelaySeconds: 30 timeoutSeconds: 10 periodSeconds: 30 diff --git a/charts/latest/azuredisk-csi-driver/values.yaml b/charts/latest/azuredisk-csi-driver/values.yaml index 112f2ab49..62c02a754 100644 --- a/charts/latest/azuredisk-csi-driver/values.yaml +++ b/charts/latest/azuredisk-csi-driver/values.yaml @@ -2,27 +2,27 @@ image: baseRepo: mcr.microsoft.com azuredisk: repository: /oss/kubernetes-csi/azuredisk-csi - tag: v1.29.0 + tag: v1.30.1 pullPolicy: IfNotPresent csiProvisioner: repository: /oss/kubernetes-csi/csi-provisioner - tag: v3.5.0 + tag: v4.0.0 pullPolicy: IfNotPresent csiAttacher: repository: /oss/kubernetes-csi/csi-attacher - tag: v4.3.0 + tag: v4.5.0 pullPolicy: IfNotPresent csiResizer: repository: /oss/kubernetes-csi/csi-resizer - tag: v1.8.0 + tag: v1.9.3 pullPolicy: IfNotPresent livenessProbe: repository: /oss/kubernetes-csi/livenessprobe - tag: v2.10.0 + tag: v2.12.0 pullPolicy: IfNotPresent nodeDriverRegistrar: repository: /oss/kubernetes-csi/csi-node-driver-registrar - tag: v2.8.0 + tag: v2.10.0 pullPolicy: IfNotPresent serviceAccount: @@ -54,6 +54,7 @@ controller: attacherWorkerThreads: 1000 vmssCacheTTLInSeconds: -1 logLevel: 5 + extraArgs: [] otelTracing: enabled: false otelServiceName: csi-azuredisk-controller @@ -130,11 +131,11 @@ snapshot: image: csiSnapshotter: repository: /oss/kubernetes-csi/csi-snapshotter - tag: v6.2.2 + tag: v6.3.3 pullPolicy: IfNotPresent csiSnapshotController: repository: /oss/kubernetes-csi/snapshot-controller - tag: v6.2.2 + tag: v6.3.3 pullPolicy: IfNotPresent snapshotController: name: csi-snapshot-controller diff --git a/charts/v1.26.7/azuredisk-csi-driver-v1.26.7.tgz b/charts/v1.26.7/azuredisk-csi-driver-v1.26.7.tgz new file mode 100644 index 000000000..c278d9959 Binary files /dev/null and b/charts/v1.26.7/azuredisk-csi-driver-v1.26.7.tgz differ diff --git a/charts/v1.26.7/azuredisk-csi-driver/Chart.yaml b/charts/v1.26.7/azuredisk-csi-driver/Chart.yaml new file mode 100644 index 000000000..38a713c43 --- /dev/null +++ b/charts/v1.26.7/azuredisk-csi-driver/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: v1.26.7 +description: Azure disk Container Storage Interface (CSI) Storage Plugin +name: azuredisk-csi-driver +version: v1.26.7 diff --git a/charts/v1.26.7/azuredisk-csi-driver/templates/NOTES.txt b/charts/v1.26.7/azuredisk-csi-driver/templates/NOTES.txt new file mode 100644 index 000000000..6a9695dad --- /dev/null +++ b/charts/v1.26.7/azuredisk-csi-driver/templates/NOTES.txt @@ -0,0 +1,5 @@ +The Azure Disk CSI Driver is getting deployed to your cluster. + +To check Azure Disk CSI Driver pods status, please run: + + kubectl --namespace={{ .Release.Namespace }} get pods --selector="release={{ .Release.Name }}" --watch diff --git a/charts/v1.26.7/azuredisk-csi-driver/templates/_helpers.tpl b/charts/v1.26.7/azuredisk-csi-driver/templates/_helpers.tpl new file mode 100644 index 000000000..51fcc8a09 --- /dev/null +++ b/charts/v1.26.7/azuredisk-csi-driver/templates/_helpers.tpl @@ -0,0 +1,26 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* Expand the name of the chart.*/}} +{{- define "azuredisk.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* labels for helm resources */}} +{{- define "azuredisk.labels" -}} +labels: + app.kubernetes.io/instance: "{{ .Release.Name }}" + app.kubernetes.io/managed-by: "{{ .Release.Service }}" + app.kubernetes.io/name: "{{ template "azuredisk.name" . }}" + app.kubernetes.io/version: "{{ .Chart.AppVersion }}" + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" +{{- end -}} + +{{/* pull secrets for containers */}} +{{- define "azuredisk.pullSecrets" -}} +{{- if .Values.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- end }} +{{- end -}} \ No newline at end of file diff --git a/charts/v1.26.7/azuredisk-csi-driver/templates/crd-csi-snapshot.yaml b/charts/v1.26.7/azuredisk-csi-driver/templates/crd-csi-snapshot.yaml new file mode 100644 index 000000000..b0e29453c --- /dev/null +++ b/charts/v1.26.7/azuredisk-csi-driver/templates/crd-csi-snapshot.yaml @@ -0,0 +1,661 @@ +{{- if .Values.snapshot.enabled -}} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/419" + creationTimestamp: null + name: volumesnapshots.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshot + listKind: VolumeSnapshotList + plural: volumesnapshots + shortNames: + - vs + singular: volumesnapshot + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: If a new snapshot needs to be created, this contains the name of the source PVC from which this snapshot was (or will be) created. + jsonPath: .spec.source.persistentVolumeClaimName + name: SourcePVC + type: string + - description: If a snapshot already exists, this contains the name of the existing VolumeSnapshotContent object representing the existing snapshot. + jsonPath: .spec.source.volumeSnapshotContentName + name: SourceSnapshotContent + type: string + - description: Represents the minimum size of volume required to rehydrate from this snapshot. + jsonPath: .status.restoreSize + name: RestoreSize + type: string + - description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot. + jsonPath: .spec.volumeSnapshotClassName + name: SnapshotClass + type: string + - description: Name of the VolumeSnapshotContent object to which the VolumeSnapshot object intends to bind to. Please note that verification of binding actually requires checking both VolumeSnapshot and VolumeSnapshotContent to ensure both are pointing at each other. Binding MUST be verified prior to usage of this object. + jsonPath: .status.boundVolumeSnapshotContentName + name: SnapshotContent + type: string + - description: Timestamp when the point-in-time snapshot was taken by the underlying storage system. + jsonPath: .status.creationTime + name: CreationTime + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshot is a user's request for either creating a point-in-time snapshot of a persistent volume, or binding to a pre-existing snapshot. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: 'spec defines the desired characteristics of a snapshot requested by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots Required.' + properties: + source: + description: source specifies where a snapshot will be created from. This field is immutable after creation. Required. + properties: + persistentVolumeClaimName: + description: persistentVolumeClaimName specifies the name of the PersistentVolumeClaim object representing the volume from which a snapshot should be created. This PVC is assumed to be in the same namespace as the VolumeSnapshot object. This field should be set if the snapshot does not exists, and needs to be created. This field is immutable. + type: string + volumeSnapshotContentName: + description: volumeSnapshotContentName specifies the name of a pre-existing VolumeSnapshotContent object representing an existing volume snapshot. This field should be set if the snapshot already exists and only needs a representation in Kubernetes. This field is immutable. + type: string + type: object + oneOf: + - required: ["persistentVolumeClaimName"] + - required: ["volumeSnapshotContentName"] + volumeSnapshotClassName: + description: 'VolumeSnapshotClassName is the name of the VolumeSnapshotClass requested by the VolumeSnapshot. VolumeSnapshotClassName may be left nil to indicate that the default SnapshotClass should be used. A given cluster may have multiple default Volume SnapshotClasses: one default per CSI Driver. If a VolumeSnapshot does not specify a SnapshotClass, VolumeSnapshotSource will be checked to figure out what the associated CSI Driver is, and the default VolumeSnapshotClass associated with that CSI Driver will be used. If more than one VolumeSnapshotClass exist for a given CSI Driver and more than one have been marked as default, CreateSnapshot will fail and generate an event. Empty string is not allowed for this field.' + type: string + required: + - source + type: object + status: + description: status represents the current information of a snapshot. Consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object. + properties: + boundVolumeSnapshotContentName: + description: 'boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent object to which this VolumeSnapshot object intends to bind to. If not specified, it indicates that the VolumeSnapshot object has not been successfully bound to a VolumeSnapshotContent object yet. NOTE: To avoid possible security issues, consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object.' + type: string + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it may indicate that the creation time of the snapshot is unknown. + format: date-time + type: string + error: + description: error is the last observed error during snapshot creation, if any. This field could be helpful to upper level controllers(i.e., application controller) to decide whether they should continue on waiting for the snapshot to be created based on the type of error reported. The snapshot controller will keep retrying when an error occurrs during the snapshot creation. Upon success, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if the snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + type: string + description: restoreSize represents the minimum size of volume required to create a volume from this snapshot. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: If a new snapshot needs to be created, this contains the name of the source PVC from which this snapshot was (or will be) created. + jsonPath: .spec.source.persistentVolumeClaimName + name: SourcePVC + type: string + - description: If a snapshot already exists, this contains the name of the existing VolumeSnapshotContent object representing the existing snapshot. + jsonPath: .spec.source.volumeSnapshotContentName + name: SourceSnapshotContent + type: string + - description: Represents the minimum size of volume required to rehydrate from this snapshot. + jsonPath: .status.restoreSize + name: RestoreSize + type: string + - description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot. + jsonPath: .spec.volumeSnapshotClassName + name: SnapshotClass + type: string + - description: Name of the VolumeSnapshotContent object to which the VolumeSnapshot object intends to bind to. Please note that verification of binding actually requires checking both VolumeSnapshot and VolumeSnapshotContent to ensure both are pointing at each other. Binding MUST be verified prior to usage of this object. + jsonPath: .status.boundVolumeSnapshotContentName + name: SnapshotContent + type: string + - description: Timestamp when the point-in-time snapshot was taken by the underlying storage system. + jsonPath: .status.creationTime + name: CreationTime + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshot is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshot" + schema: + openAPIV3Schema: + description: VolumeSnapshot is a user's request for either creating a point-in-time snapshot of a persistent volume, or binding to a pre-existing snapshot. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: 'spec defines the desired characteristics of a snapshot requested by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots Required.' + properties: + source: + description: source specifies where a snapshot will be created from. This field is immutable after creation. Required. + properties: + persistentVolumeClaimName: + description: persistentVolumeClaimName specifies the name of the PersistentVolumeClaim object representing the volume from which a snapshot should be created. This PVC is assumed to be in the same namespace as the VolumeSnapshot object. This field should be set if the snapshot does not exists, and needs to be created. This field is immutable. + type: string + volumeSnapshotContentName: + description: volumeSnapshotContentName specifies the name of a pre-existing VolumeSnapshotContent object representing an existing volume snapshot. This field should be set if the snapshot already exists and only needs a representation in Kubernetes. This field is immutable. + type: string + type: object + volumeSnapshotClassName: + description: 'VolumeSnapshotClassName is the name of the VolumeSnapshotClass requested by the VolumeSnapshot. VolumeSnapshotClassName may be left nil to indicate that the default SnapshotClass should be used. A given cluster may have multiple default Volume SnapshotClasses: one default per CSI Driver. If a VolumeSnapshot does not specify a SnapshotClass, VolumeSnapshotSource will be checked to figure out what the associated CSI Driver is, and the default VolumeSnapshotClass associated with that CSI Driver will be used. If more than one VolumeSnapshotClass exist for a given CSI Driver and more than one have been marked as default, CreateSnapshot will fail and generate an event. Empty string is not allowed for this field.' + type: string + required: + - source + type: object + status: + description: status represents the current information of a snapshot. Consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object. + properties: + boundVolumeSnapshotContentName: + description: 'boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent object to which this VolumeSnapshot object intends to bind to. If not specified, it indicates that the VolumeSnapshot object has not been successfully bound to a VolumeSnapshotContent object yet. NOTE: To avoid possible security issues, consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object.' + type: string + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it may indicate that the creation time of the snapshot is unknown. + format: date-time + type: string + error: + description: error is the last observed error during snapshot creation, if any. This field could be helpful to upper level controllers(i.e., application controller) to decide whether they should continue on waiting for the snapshot to be created based on the type of error reported. The snapshot controller will keep retrying when an error occurrs during the snapshot creation. Upon success, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if the snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + type: string + description: restoreSize represents the minimum size of volume required to create a volume from this snapshot. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/419" + creationTimestamp: null + name: volumesnapshotclasses.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotClass + listKind: VolumeSnapshotClassList + plural: volumesnapshotclasses + shortNames: + - vsclass + - vsclasses + singular: volumesnapshotclass + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .driver + name: Driver + type: string + - description: Determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .deletionPolicy + name: DeletionPolicy + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshotClass specifies parameters that a underlying storage system uses when creating a volume snapshot. A specific VolumeSnapshotClass is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses are non-namespaced + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + deletionPolicy: + description: deletionPolicy determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the storage driver that handles this VolumeSnapshotClass. Required. + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + parameters: + additionalProperties: + type: string + description: parameters is a key-value map with storage driver specific parameters for creating snapshots. These values are opaque to Kubernetes. + type: object + required: + - deletionPolicy + - driver + type: object + served: true + storage: true + subresources: {} + - additionalPrinterColumns: + - jsonPath: .driver + name: Driver + type: string + - description: Determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .deletionPolicy + name: DeletionPolicy + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshotClass is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshotClass" + schema: + openAPIV3Schema: + description: VolumeSnapshotClass specifies parameters that a underlying storage system uses when creating a volume snapshot. A specific VolumeSnapshotClass is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses are non-namespaced + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + deletionPolicy: + description: deletionPolicy determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the storage driver that handles this VolumeSnapshotClass. Required. + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + parameters: + additionalProperties: + type: string + description: parameters is a key-value map with storage driver specific parameters for creating snapshots. These values are opaque to Kubernetes. + type: object + required: + - deletionPolicy + - driver + type: object + served: true + storage: false + subresources: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/419" + creationTimestamp: null + name: volumesnapshotcontents.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotContent + listKind: VolumeSnapshotContentList + plural: volumesnapshotcontents + shortNames: + - vsc + - vscs + singular: volumesnapshotcontent + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: Represents the complete size of the snapshot in bytes + jsonPath: .status.restoreSize + name: RestoreSize + type: integer + - description: Determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .spec.deletionPolicy + name: DeletionPolicy + type: string + - description: Name of the CSI driver used to create the physical snapshot on the underlying storage system. + jsonPath: .spec.driver + name: Driver + type: string + - description: Name of the VolumeSnapshotClass to which this snapshot belongs. + jsonPath: .spec.volumeSnapshotClassName + name: VolumeSnapshotClass + type: string + - description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.name + name: VolumeSnapshot + type: string + - description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.namespace + name: VolumeSnapshotNamespace + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshotContent represents the actual "on-disk" snapshot object in the underlying storage system + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: spec defines properties of a VolumeSnapshotContent created by the underlying storage system. Required. + properties: + deletionPolicy: + description: deletionPolicy determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. For dynamically provisioned snapshots, this field will automatically be filled in by the CSI snapshotter sidecar with the "DeletionPolicy" field defined in the corresponding VolumeSnapshotClass. For pre-existing snapshots, users MUST specify this field when creating the VolumeSnapshotContent object. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the CSI driver used to create the physical snapshot on the underlying storage system. This MUST be the same as the name returned by the CSI GetPluginName() call for that driver. Required. + type: string + source: + description: source specifies whether the snapshot is (or should be) dynamically provisioned or already exists, and just requires a Kubernetes object representation. This field is immutable after creation. Required. + properties: + snapshotHandle: + description: snapshotHandle specifies the CSI "snapshot_id" of a pre-existing snapshot on the underlying storage system for which a Kubernetes object representation was (or should be) created. This field is immutable. + type: string + volumeHandle: + description: volumeHandle specifies the CSI "volume_id" of the volume from which a snapshot should be dynamically taken from. This field is immutable. + type: string + type: object + oneOf: + - required: ["snapshotHandle"] + - required: ["volumeHandle"] + volumeSnapshotClassName: + description: name of the VolumeSnapshotClass from which this snapshot was (or will be) created. Note that after provisioning, the VolumeSnapshotClass may be deleted or recreated with different set of values, and as such, should not be referenced post-snapshot creation. + type: string + volumeSnapshotRef: + description: volumeSnapshotRef specifies the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName field must reference to this VolumeSnapshotContent's name for the bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent object, name and namespace of the VolumeSnapshot object MUST be provided for binding to happen. This field is immutable after creation. Required. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + required: + - deletionPolicy + - driver + - source + - volumeSnapshotRef + type: object + status: + description: status represents the current information of a snapshot. + properties: + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it indicates the creation time is unknown. The format of this field is a Unix nanoseconds time encoded as an int64. On Unix, the command `date +%s%N` returns the current time in nanoseconds since 1970-01-01 00:00:00 UTC. + format: int64 + type: integer + error: + description: error is the last observed error during snapshot creation, if any. Upon success after retry, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + description: restoreSize represents the complete size of the snapshot in bytes. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown. + format: int64 + minimum: 0 + type: integer + snapshotHandle: + description: snapshotHandle is the CSI "snapshot_id" of a snapshot on the underlying storage system. If not specified, it indicates that dynamic snapshot creation has either failed or it is still in progress. + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: Represents the complete size of the snapshot in bytes + jsonPath: .status.restoreSize + name: RestoreSize + type: integer + - description: Determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .spec.deletionPolicy + name: DeletionPolicy + type: string + - description: Name of the CSI driver used to create the physical snapshot on the underlying storage system. + jsonPath: .spec.driver + name: Driver + type: string + - description: Name of the VolumeSnapshotClass to which this snapshot belongs. + jsonPath: .spec.volumeSnapshotClassName + name: VolumeSnapshotClass + type: string + - description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.name + name: VolumeSnapshot + type: string + - description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.namespace + name: VolumeSnapshotNamespace + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshotContent is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshotContent" + schema: + openAPIV3Schema: + description: VolumeSnapshotContent represents the actual "on-disk" snapshot object in the underlying storage system + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: spec defines properties of a VolumeSnapshotContent created by the underlying storage system. Required. + properties: + deletionPolicy: + description: deletionPolicy determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. For dynamically provisioned snapshots, this field will automatically be filled in by the CSI snapshotter sidecar with the "DeletionPolicy" field defined in the corresponding VolumeSnapshotClass. For pre-existing snapshots, users MUST specify this field when creating the VolumeSnapshotContent object. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the CSI driver used to create the physical snapshot on the underlying storage system. This MUST be the same as the name returned by the CSI GetPluginName() call for that driver. Required. + type: string + source: + description: source specifies whether the snapshot is (or should be) dynamically provisioned or already exists, and just requires a Kubernetes object representation. This field is immutable after creation. Required. + properties: + snapshotHandle: + description: snapshotHandle specifies the CSI "snapshot_id" of a pre-existing snapshot on the underlying storage system for which a Kubernetes object representation was (or should be) created. This field is immutable. + type: string + volumeHandle: + description: volumeHandle specifies the CSI "volume_id" of the volume from which a snapshot should be dynamically taken from. This field is immutable. + type: string + type: object + volumeSnapshotClassName: + description: name of the VolumeSnapshotClass from which this snapshot was (or will be) created. Note that after provisioning, the VolumeSnapshotClass may be deleted or recreated with different set of values, and as such, should not be referenced post-snapshot creation. + type: string + volumeSnapshotRef: + description: volumeSnapshotRef specifies the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName field must reference to this VolumeSnapshotContent's name for the bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent object, name and namespace of the VolumeSnapshot object MUST be provided for binding to happen. This field is immutable after creation. Required. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + required: + - deletionPolicy + - driver + - source + - volumeSnapshotRef + type: object + status: + description: status represents the current information of a snapshot. + properties: + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it indicates the creation time is unknown. The format of this field is a Unix nanoseconds time encoded as an int64. On Unix, the command `date +%s%N` returns the current time in nanoseconds since 1970-01-01 00:00:00 UTC. + format: int64 + type: integer + error: + description: error is the last observed error during snapshot creation, if any. Upon success after retry, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + description: restoreSize represents the complete size of the snapshot in bytes. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown. + format: int64 + minimum: 0 + type: integer + snapshotHandle: + description: snapshotHandle is the CSI "snapshot_id" of a snapshot on the underlying storage system. If not specified, it indicates that dynamic snapshot creation has either failed or it is still in progress. + type: string + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +{{- end -}} diff --git a/charts/v1.26.7/azuredisk-csi-driver/templates/csi-azuredisk-controller.yaml b/charts/v1.26.7/azuredisk-csi-driver/templates/csi-azuredisk-controller.yaml new file mode 100644 index 000000000..bfde3bdf0 --- /dev/null +++ b/charts/v1.26.7/azuredisk-csi-driver/templates/csi-azuredisk-controller.yaml @@ -0,0 +1,260 @@ +kind: Deployment +apiVersion: apps/v1 +metadata: + name: {{ .Values.controller.name }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- with .Values.controller.labels }} +{{ . | toYaml | indent 4 }} +{{- end }} +{{- with .Values.controller.annotations }} + annotations: +{{ . | toYaml | indent 4 }} +{{- end }} +spec: + replicas: {{ .Values.controller.replicas }} + selector: + matchLabels: + app: {{ .Values.controller.name }} + template: + metadata: +{{ include "azuredisk.labels" . | indent 6 }} + app: {{ .Values.controller.name }} +{{- with .Values.controller.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.controller.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + hostNetwork: {{ .Values.controller.hostNetwork }} + serviceAccountName: {{ .Values.serviceAccount.controller }} + nodeSelector: + kubernetes.io/os: linux +{{- with .Values.controller.nodeSelector }} +{{ toYaml . | indent 8 }} +{{- end }} + {{- if .Values.controller.runOnMaster}} + node-role.kubernetes.io/master: "" + {{- end}} + {{- if .Values.controller.runOnControlPlane}} + node-role.kubernetes.io/control-plane: "" + {{- end}} + priorityClassName: system-cluster-critical +{{- with .Values.controller.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.controller.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} + {{- include "azuredisk.pullSecrets" . | indent 6 }} + containers: + - name: csi-provisioner +{{- if hasPrefix "/" .Values.image.csiProvisioner.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.csiProvisioner.repository }}:{{ .Values.image.csiProvisioner.tag }}" +{{- else }} + image: "{{ .Values.image.csiProvisioner.repository }}:{{ .Values.image.csiProvisioner.tag }}" +{{- end }} + args: + - "--feature-gates=Topology=true" + - "--csi-address=$(ADDRESS)" + - "--v=2" + - "--timeout=30s" + - "--leader-election" + - "--leader-election-namespace={{ .Release.Namespace }}" + - "--worker-threads={{ .Values.controller.provisionerWorkerThreads }}" + - "--extra-create-metadata=true" + - "--strict-topology=true" + - "--kube-api-qps=50" + - "--kube-api-burst=100" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + resources: {{- toYaml .Values.controller.resources.csiProvisioner | nindent 12 }} + - name: csi-attacher +{{- if hasPrefix "/" .Values.image.csiAttacher.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.csiAttacher.repository }}:{{ .Values.image.csiAttacher.tag }}" +{{- else }} + image: "{{ .Values.image.csiAttacher.repository }}:{{ .Values.image.csiAttacher.tag }}" +{{- end }} + args: + - "-v=2" + - "-csi-address=$(ADDRESS)" + - "-timeout=1200s" + - "-leader-election" + - "--leader-election-namespace={{ .Release.Namespace }}" + - "-worker-threads={{ .Values.controller.attacherWorkerThreads }}" + - "-kube-api-qps=50" + - "-kube-api-burst=100" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + resources: {{- toYaml .Values.controller.resources.csiAttacher | nindent 12 }} + - name: csi-snapshotter +{{- if hasPrefix "/" .Values.snapshot.image.csiSnapshotter.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.snapshot.image.csiSnapshotter.repository }}:{{ .Values.snapshot.image.csiSnapshotter.tag }}" +{{- else }} + image: "{{ .Values.snapshot.image.csiSnapshotter.repository }}:{{ .Values.snapshot.image.csiSnapshotter.tag }}" +{{- end }} + args: + - "-csi-address=$(ADDRESS)" + - "-leader-election" + - "--leader-election-namespace={{ .Release.Namespace }}" + - "-v=2" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: {{- toYaml .Values.controller.resources.csiSnapshotter | nindent 12 }} + - name: csi-resizer +{{- if hasPrefix "/" .Values.image.csiResizer.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.csiResizer.repository }}:{{ .Values.image.csiResizer.tag }}" +{{- else }} + image: "{{ .Values.image.csiResizer.repository }}:{{ .Values.image.csiResizer.tag }}" +{{- end }} + args: + - "-csi-address=$(ADDRESS)" + - "-v=2" + - "-leader-election" + - "--leader-election-namespace={{ .Release.Namespace }}" + - '-handle-volume-inuse-error=false' + - '-feature-gates=RecoverVolumeExpansionFailure=true' + - "-timeout=240s" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: {{- toYaml .Values.controller.resources.csiResizer | nindent 12 }} + - name: liveness-probe +{{- if hasPrefix "/" .Values.image.livenessProbe.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- else }} + image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- end }} + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --health-port={{ .Values.controller.livenessProbe.healthPort }} + - --v=2 + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: {{- toYaml .Values.controller.resources.livenessProbe | nindent 12 }} + - name: azuredisk +{{- if hasPrefix "/" .Values.image.azuredisk.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- else }} + image: "{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- end }} + args: + - "--v={{ .Values.controller.logLevel }}" + - "--endpoint=$(CSI_ENDPOINT)" + - "--metrics-address=0.0.0.0:{{ .Values.controller.metricsPort }}" + - "--disable-avset-nodes={{ .Values.controller.disableAvailabilitySetNodes }}" + - "--vm-type={{ .Values.controller.vmType }}" + - "--drivername={{ .Values.driver.name }}" + - "--cloud-config-secret-name={{ .Values.controller.cloudConfigSecretName }}" + - "--cloud-config-secret-namespace={{ .Values.controller.cloudConfigSecretNamespace }}" + - "--custom-user-agent={{ .Values.driver.customUserAgent }}" + - "--user-agent-suffix={{ .Values.driver.userAgentSuffix }}" + - "--allow-empty-cloud-config={{ .Values.controller.allowEmptyCloudConfig }}" + - "--vmss-cache-ttl-seconds={{ .Values.controller.vmssCacheTTLInSeconds }}" + ports: + - containerPort: {{ .Values.controller.livenessProbe.healthPort }} + name: healthz + protocol: TCP + - containerPort: {{ .Values.controller.metricsPort }} + name: metrics + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path + optional: true + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + {{- if ne .Values.driver.httpsProxy "" }} + - name: HTTPS_PROXY + value: {{ .Values.driver.httpsProxy }} + {{- end }} + {{- if ne .Values.driver.httpProxy "" }} + - name: HTTP_PROXY + value: {{ .Values.driver.httpProxy }} + {{- end }} + {{- if ne .Values.driver.noProxy "" }} + - name: NO_PROXY + value: {{ .Values.driver.noProxy }} + {{- end }} + - name: AZURE_GO_SDK_LOG_LEVEL + value: {{ .Values.driver.azureGoSDKLogLevel }} + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: AZURE_ENVIRONMENT_FILEPATH + value: /etc/kubernetes/azurestackcloud.json + {{- end }} + imagePullPolicy: {{ .Values.image.azuredisk.pullPolicy }} + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /etc/kubernetes/ + name: azure-cred + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: ssl + mountPath: /etc/ssl/certs + readOnly: true + {{- end }} + {{- if eq .Values.linux.distro "fedora" }} + - name: ssl + mountPath: /etc/ssl/certs + readOnly: true + - name: ssl-pki + mountPath: /etc/pki/ca-trust/extracted + readOnly: true + {{- end }} + resources: {{- toYaml .Values.controller.resources.azuredisk | nindent 12 }} + volumes: + - name: socket-dir + emptyDir: {} + - name: azure-cred + hostPath: + path: /etc/kubernetes/ + type: DirectoryOrCreate + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: ssl + hostPath: + path: /etc/ssl/certs + {{- end }} + {{- if eq .Values.linux.distro "fedora" }} + - name: ssl + hostPath: + path: /etc/ssl/certs + - name: ssl-pki + hostPath: + path: /etc/pki/ca-trust/extracted + {{- end }} diff --git a/charts/v1.26.7/azuredisk-csi-driver/templates/csi-azuredisk-driver.yaml b/charts/v1.26.7/azuredisk-csi-driver/templates/csi-azuredisk-driver.yaml new file mode 100644 index 000000000..21f941748 --- /dev/null +++ b/charts/v1.26.7/azuredisk-csi-driver/templates/csi-azuredisk-driver.yaml @@ -0,0 +1,13 @@ +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: {{ .Values.driver.name }} + annotations: + csiDriver: "{{ .Values.image.azuredisk.tag }}" + snapshot: "{{ .Values.snapshot.image.csiSnapshotter.tag }}" +spec: + attachRequired: true + podInfoOnMount: false + {{- if .Values.feature.enableFSGroupPolicy}} + fsGroupPolicy: File + {{- end}} diff --git a/charts/v1.26.7/azuredisk-csi-driver/templates/csi-azuredisk-node-windows-hostprocess.yaml b/charts/v1.26.7/azuredisk-csi-driver/templates/csi-azuredisk-node-windows-hostprocess.yaml new file mode 100644 index 000000000..e7696de03 --- /dev/null +++ b/charts/v1.26.7/azuredisk-csi-driver/templates/csi-azuredisk-node-windows-hostprocess.yaml @@ -0,0 +1,192 @@ +{{- if and (.Values.windows.enabled) (eq .Values.windows.useHostProcessContainers true) }} +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: {{ .Values.windows.dsName }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- with .Values.windows.labels }} +{{ . | toYaml | indent 4 }} +{{- end }} +{{- with .Values.windows.annotations }} + annotations: +{{ . | toYaml | indent 4 }} +{{- end }} +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: {{ .Values.node.maxUnavailable }} + type: RollingUpdate + selector: + matchLabels: + app: {{ .Values.windows.dsName }} + template: + metadata: +{{ include "azuredisk.labels" . | indent 6 }} + app: {{ .Values.windows.dsName }} +{{- with .Values.windows.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.windows.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + serviceAccountName: {{ .Values.serviceAccount.node }} +{{- with .Values.windows.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} + nodeSelector: + kubernetes.io/os: windows +{{- with .Values.windows.nodeSelector }} +{{ toYaml . | indent 8 }} +{{- end }} + affinity: +{{- with .Values.windows.affinity }} +{{ toYaml . | indent 8 }} +{{- end }} + nodeAffinity: +{{ toYaml .Values.windows.nodeAffinity | indent 10 }} + priorityClassName: system-node-critical + {{- include "azuredisk.pullSecrets" . | indent 6 }} + securityContext: + windowsOptions: + hostProcess: true + runAsUserName: "NT AUTHORITY\\SYSTEM" + hostNetwork: true + initContainers: + - name: init +{{- if hasPrefix "/" .Values.image.azuredisk.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- else }} + image: "{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- end }} + imagePullPolicy: {{ .Values.image.azuredisk.pullPolicy }} + command: + - "powershell.exe" + - "-c" + - "New-Item" + - "-ItemType" + - "Directory" + - "-Path" + - "C:\\var\\lib\\kubelet\\plugins\\{{ .Values.driver.name }}\\" + - "-Force" + containers: + - name: liveness-probe +{{- if hasPrefix "/" .Values.image.livenessProbe.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- else }} + image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- end }} + command: + - "livenessprobe.exe" + args: + - "--csi-address=$(CSI_ENDPOINT)" + - "--probe-timeout=3s" + - "--health-port={{ .Values.node.livenessProbe.healthPort }}" + - "--v=2" + env: + - name: CSI_ENDPOINT + value: unix://C:\\var\\lib\\kubelet\\plugins\\{{ .Values.driver.name }}\\csi.sock + imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }} + - name: node-driver-registrar +{{- if hasPrefix "/" .Values.image.nodeDriverRegistrar.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- else }} + image: "{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- end }} + command: + - "csi-node-driver-registrar.exe" + args: + - "--v=2" + - "--csi-address=$(CSI_ENDPOINT)" + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" + - "--plugin-registration-path=$(PLUGIN_REG_DIR)" + livenessProbe: + exec: + command: + - csi-node-driver-registrar.exe + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --mode=kubelet-registration-probe + initialDelaySeconds: 60 + timeoutSeconds: 30 + env: + - name: CSI_ENDPOINT + value: unix://C:\\var\\lib\\kubelet\\plugins\\{{ .Values.driver.name }}\\csi.sock + - name: DRIVER_REG_SOCK_PATH + value: C:\\var\\lib\\kubelet\\plugins\\{{ .Values.driver.name }}\\csi.sock + - name: PLUGIN_REG_DIR + value: C:\\var\\lib\\kubelet\\plugins_registry\\ + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + imagePullPolicy: {{ .Values.image.nodeDriverRegistrar.pullPolicy }} + - name: azuredisk +{{- if hasPrefix "/" .Values.image.azuredisk.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- else }} + image: "{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- end }} + command: + - "azurediskplugin.exe" + args: + - "--v={{ .Values.node.logLevel }}" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--metrics-address=0.0.0.0:{{ .Values.node.metricsPort }}" + - "--drivername={{ .Values.driver.name }}" + - "--volume-attach-limit={{ .Values.driver.volumeAttachLimit }}" + - "--cloud-config-secret-name={{ .Values.node.cloudConfigSecretName }}" + - "--cloud-config-secret-namespace={{ .Values.node.cloudConfigSecretNamespace }}" + - "--custom-user-agent={{ .Values.driver.customUserAgent }}" + - "--user-agent-suffix={{ .Values.driver.userAgentSuffix }}" + - "--allow-empty-cloud-config={{ .Values.node.allowEmptyCloudConfig }}" + - "--support-zone={{ .Values.node.supportZone }}" + ports: + - containerPort: {{ .Values.node.livenessProbe.healthPort }} + name: healthz + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path-windows + optional: true + - name: CSI_ENDPOINT + value: unix://C:\\var\\lib\\kubelet\\plugins\\{{ .Values.driver.name }}\\csi.sock + {{- if ne .Values.driver.httpsProxy "" }} + - name: HTTPS_PROXY + value: {{ .Values.driver.httpsProxy }} + {{- end }} + {{- if ne .Values.driver.httpProxy "" }} + - name: HTTP_PROXY + value: {{ .Values.driver.httpProxy }} + {{- end }} + {{- if ne .Values.driver.noProxy "" }} + - name: NO_PROXY + value: {{ .Values.driver.noProxy }} + {{- end }} + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: AZURE_GO_SDK_LOG_LEVEL + value: {{ .Values.driver.azureGoSDKLogLevel }} + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: AZURE_ENVIRONMENT_FILEPATH + value: C:\k\azurestackcloud.json + {{- end }} + imagePullPolicy: {{ .Values.image.azuredisk.pullPolicy }} +{{- end -}} diff --git a/charts/v1.26.7/azuredisk-csi-driver/templates/csi-azuredisk-node-windows.yaml b/charts/v1.26.7/azuredisk-csi-driver/templates/csi-azuredisk-node-windows.yaml new file mode 100644 index 000000000..29e47252c --- /dev/null +++ b/charts/v1.26.7/azuredisk-csi-driver/templates/csi-azuredisk-node-windows.yaml @@ -0,0 +1,233 @@ +{{- if and (.Values.windows.enabled) (ne .Values.windows.useHostProcessContainers true) }} +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: {{ .Values.windows.dsName }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- with .Values.windows.labels }} +{{ . | toYaml | indent 4 }} +{{- end }} +{{- with .Values.windows.annotations }} + annotations: +{{ . | toYaml | indent 4 }} +{{- end }} +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: {{ .Values.node.maxUnavailable }} + type: RollingUpdate + selector: + matchLabels: + app: {{ .Values.windows.dsName }} + template: + metadata: +{{ include "azuredisk.labels" . | indent 6 }} + app: {{ .Values.windows.dsName }} +{{- with .Values.windows.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.windows.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + serviceAccountName: {{ .Values.serviceAccount.node }} +{{- with .Values.windows.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} + nodeSelector: + kubernetes.io/os: windows +{{- with .Values.windows.nodeSelector }} +{{ toYaml . | indent 8 }} +{{- end }} + affinity: +{{- with .Values.windows.affinity }} +{{ toYaml . | indent 8 }} +{{- end }} + nodeAffinity: +{{ toYaml .Values.windows.nodeAffinity | indent 10 }} + priorityClassName: system-node-critical + {{- include "azuredisk.pullSecrets" . | indent 6 }} + containers: + - name: liveness-probe + volumeMounts: + - mountPath: C:\csi + name: plugin-dir +{{- if hasPrefix "/" .Values.image.livenessProbe.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- else }} + image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- end }} + args: + - "--csi-address=$(CSI_ENDPOINT)" + - "--probe-timeout=3s" + - "--health-port={{ .Values.node.livenessProbe.healthPort }}" + - "--v=2" + env: + - name: CSI_ENDPOINT + value: unix://C:\\csi\\csi.sock + imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }} + resources: {{- toYaml .Values.windows.resources.livenessProbe | nindent 12 }} + - name: node-driver-registrar +{{- if hasPrefix "/" .Values.image.nodeDriverRegistrar.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- else }} + image: "{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- end }} + args: + - "--v=2" + - "--csi-address=$(CSI_ENDPOINT)" + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" + livenessProbe: + exec: + command: + - /csi-node-driver-registrar.exe + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --mode=kubelet-registration-probe + initialDelaySeconds: 60 + timeoutSeconds: 30 + env: + - name: CSI_ENDPOINT + value: unix://C:\\csi\\csi.sock + - name: DRIVER_REG_SOCK_PATH + value: C:\\var\\lib\\kubelet\\plugins\\{{ .Values.driver.name }}\\csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + imagePullPolicy: {{ .Values.image.nodeDriverRegistrar.pullPolicy }} + volumeMounts: + - name: kubelet-dir + mountPath: "C:\\var\\lib\\kubelet" + - name: plugin-dir + mountPath: C:\csi + - name: registration-dir + mountPath: C:\registration + resources: {{- toYaml .Values.windows.resources.nodeDriverRegistrar | nindent 12 }} + - name: azuredisk +{{- if hasPrefix "/" .Values.image.azuredisk.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- else }} + image: "{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- end }} + args: + - "--v={{ .Values.node.logLevel }}" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--metrics-address=0.0.0.0:{{ .Values.node.metricsPort }}" + - "--drivername={{ .Values.driver.name }}" + - "--volume-attach-limit={{ .Values.driver.volumeAttachLimit }}" + - "--cloud-config-secret-name={{ .Values.node.cloudConfigSecretName }}" + - "--cloud-config-secret-namespace={{ .Values.node.cloudConfigSecretNamespace }}" + - "--custom-user-agent={{ .Values.driver.customUserAgent }}" + - "--user-agent-suffix={{ .Values.driver.userAgentSuffix }}" + - "--allow-empty-cloud-config={{ .Values.node.allowEmptyCloudConfig }}" + - "--support-zone={{ .Values.node.supportZone }}" + - "--get-node-info-from-labels={{ .Values.windows.getNodeInfoFromLabels }}" + ports: + - containerPort: {{ .Values.node.livenessProbe.healthPort }} + name: healthz + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path-windows + optional: true + - name: CSI_ENDPOINT + value: unix://C:\\csi\\csi.sock + {{- if ne .Values.driver.httpsProxy "" }} + - name: HTTPS_PROXY + value: {{ .Values.driver.httpsProxy }} + {{- end }} + {{- if ne .Values.driver.httpProxy "" }} + - name: HTTP_PROXY + value: {{ .Values.driver.httpProxy }} + {{- end }} + {{- if ne .Values.driver.noProxy "" }} + - name: NO_PROXY + value: {{ .Values.driver.noProxy }} + {{- end }} + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: AZURE_GO_SDK_LOG_LEVEL + value: {{ .Values.driver.azureGoSDKLogLevel }} + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: AZURE_ENVIRONMENT_FILEPATH + value: C:\k\azurestackcloud.json + {{- end }} + imagePullPolicy: {{ .Values.image.azuredisk.pullPolicy }} + volumeMounts: + - name: kubelet-dir + mountPath: "C:\\var\\lib\\kubelet" + - name: plugin-dir + mountPath: C:\csi + - name: azure-config + mountPath: C:\k + - name: csi-proxy-fs-pipe-v1 + mountPath: \\.\pipe\csi-proxy-filesystem-v1 + - name: csi-proxy-disk-pipe-v1 + mountPath: \\.\pipe\csi-proxy-disk-v1 + - name: csi-proxy-volume-pipe-v1 + mountPath: \\.\pipe\csi-proxy-volume-v1 + # these paths are still included for compatibility, they're used + # only if the node has still the beta version of the CSI proxy + - name: csi-proxy-fs-pipe-v1beta1 + mountPath: \\.\pipe\csi-proxy-filesystem-v1beta1 + - name: csi-proxy-disk-pipe-v1beta2 + mountPath: \\.\pipe\csi-proxy-disk-v1beta2 + - name: csi-proxy-volume-pipe-v1beta2 + mountPath: \\.\pipe\csi-proxy-volume-v1beta2 + resources: {{- toYaml .Values.windows.resources.azuredisk | nindent 12 }} + volumes: + - name: csi-proxy-fs-pipe-v1 + hostPath: + path: \\.\pipe\csi-proxy-filesystem-v1 + - name: csi-proxy-disk-pipe-v1 + hostPath: + path: \\.\pipe\csi-proxy-disk-v1 + - name: csi-proxy-volume-pipe-v1 + hostPath: + path: \\.\pipe\csi-proxy-volume-v1 + # these paths are still included for compatibility, they're used + # only if the node has still the beta version of the CSI proxy + - name: csi-proxy-fs-pipe-v1beta1 + hostPath: + path: \\.\pipe\csi-proxy-filesystem-v1beta1 + - name: csi-proxy-disk-pipe-v1beta2 + hostPath: + path: \\.\pipe\csi-proxy-disk-v1beta2 + - name: csi-proxy-volume-pipe-v1beta2 + hostPath: + path: \\.\pipe\csi-proxy-volume-v1beta2 + - name: registration-dir + hostPath: + path: {{ .Values.windows.kubelet }}\plugins_registry\ + type: Directory + - name: kubelet-dir + hostPath: + path: {{ .Values.windows.kubelet }}\ + type: Directory + - name: plugin-dir + hostPath: + path: {{ .Values.windows.kubelet }}\plugins\{{ .Values.driver.name }}\ + type: DirectoryOrCreate + - name: azure-config + hostPath: + path: C:\k + type: DirectoryOrCreate +{{- end -}} diff --git a/charts/v1.26.7/azuredisk-csi-driver/templates/csi-azuredisk-node.yaml b/charts/v1.26.7/azuredisk-csi-driver/templates/csi-azuredisk-node.yaml new file mode 100644 index 000000000..556202012 --- /dev/null +++ b/charts/v1.26.7/azuredisk-csi-driver/templates/csi-azuredisk-node.yaml @@ -0,0 +1,242 @@ +{{- if .Values.linux.enabled}} +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: {{ .Values.linux.dsName }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- with .Values.linux.labels }} +{{ . | toYaml | indent 4 }} +{{- end }} +{{- with .Values.linux.annotations }} + annotations: +{{ . | toYaml | indent 4 }} +{{- end }} +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: {{ .Values.node.maxUnavailable }} + type: RollingUpdate + selector: + matchLabels: + app: {{ .Values.linux.dsName }} + template: + metadata: +{{ include "azuredisk.labels" . | indent 6 }} + app: {{ .Values.linux.dsName }} +{{- with .Values.linux.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.linux.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + hostNetwork: {{ .Values.linux.hostNetwork }} + dnsPolicy: Default + serviceAccountName: {{ .Values.serviceAccount.node }} + nodeSelector: + kubernetes.io/os: linux +{{- with .Values.linux.nodeSelector }} +{{ toYaml . | indent 8 }} +{{- end }} + affinity: +{{- with .Values.linux.affinity }} +{{ toYaml . | indent 8 }} +{{- end }} + nodeAffinity: +{{ toYaml .Values.linux.nodeAffinity | indent 10 }} + priorityClassName: system-node-critical +{{- with .Values.linux.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} + {{- include "azuredisk.pullSecrets" . | indent 6 }} + containers: + - name: liveness-probe + volumeMounts: + - mountPath: /csi + name: socket-dir +{{- if hasPrefix "/" .Values.image.livenessProbe.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- else }} + image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- end }} + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --health-port={{ .Values.node.livenessProbe.healthPort }} + - --v=2 + resources: {{- toYaml .Values.linux.resources.livenessProbe | nindent 12 }} + - name: node-driver-registrar +{{- if hasPrefix "/" .Values.image.nodeDriverRegistrar.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- else }} + image: "{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- end }} + args: + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --v=2 + livenessProbe: + exec: + command: + - /csi-node-driver-registrar + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --mode=kubelet-registration-probe + initialDelaySeconds: 30 + timeoutSeconds: 15 + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: {{ .Values.linux.kubelet }}/plugins/{{ .Values.driver.name }}/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: registration-dir + mountPath: /registration + resources: {{- toYaml .Values.linux.resources.nodeDriverRegistrar | nindent 12 }} + - name: azuredisk +{{- if hasPrefix "/" .Values.image.azuredisk.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- else }} + image: "{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- end }} + args: + - "--v={{ .Values.node.logLevel }}" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--metrics-address=0.0.0.0:{{ .Values.node.metricsPort }}" + - "--enable-perf-optimization={{ .Values.linux.enablePerfOptimization }}" + - "--drivername={{ .Values.driver.name }}" + - "--volume-attach-limit={{ .Values.driver.volumeAttachLimit }}" + - "--cloud-config-secret-name={{ .Values.node.cloudConfigSecretName }}" + - "--cloud-config-secret-namespace={{ .Values.node.cloudConfigSecretNamespace }}" + - "--custom-user-agent={{ .Values.driver.customUserAgent }}" + - "--user-agent-suffix={{ .Values.driver.userAgentSuffix }}" + - "--allow-empty-cloud-config={{ .Values.node.allowEmptyCloudConfig }}" + - "--support-zone={{ .Values.node.supportZone }}" + - "--get-node-info-from-labels={{ .Values.linux.getNodeInfoFromLabels }}" + ports: + - containerPort: {{ .Values.node.livenessProbe.healthPort }} + name: healthz + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path + optional: true + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + {{- if ne .Values.driver.httpsProxy "" }} + - name: HTTPS_PROXY + value: {{ .Values.driver.httpsProxy }} + {{- end }} + {{- if ne .Values.driver.httpProxy "" }} + - name: HTTP_PROXY + value: {{ .Values.driver.httpProxy }} + {{- end }} + {{- if ne .Values.driver.noProxy "" }} + - name: NO_PROXY + value: {{ .Values.driver.noProxy }} + {{- end }} + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: AZURE_GO_SDK_LOG_LEVEL + value: {{ .Values.driver.azureGoSDKLogLevel }} + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: AZURE_ENVIRONMENT_FILEPATH + value: /etc/kubernetes/azurestackcloud.json + {{- end }} + imagePullPolicy: {{ .Values.image.azuredisk.pullPolicy }} + securityContext: + privileged: true + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: {{ .Values.linux.kubelet }}/ + mountPropagation: Bidirectional + name: mountpoint-dir + - mountPath: /etc/kubernetes/ + name: azure-cred + - mountPath: /dev + name: device-dir + - mountPath: /sys/bus/scsi/devices + name: sys-devices-dir + - mountPath: /sys/class/ + name: sys-class + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: ssl + mountPath: /etc/ssl/certs + readOnly: true + {{- end }} + {{- if eq .Values.linux.distro "fedora" }} + - name: ssl + mountPath: /etc/ssl/certs + readOnly: true + - name: ssl-pki + mountPath: /etc/pki/ca-trust/extracted + readOnly: true + {{- end }} + resources: {{- toYaml .Values.linux.resources.azuredisk | nindent 12 }} + volumes: + - hostPath: + path: {{ .Values.linux.kubelet }}/plugins/{{ .Values.driver.name }} + type: DirectoryOrCreate + name: socket-dir + - hostPath: + path: {{ .Values.linux.kubelet }}/ + type: DirectoryOrCreate + name: mountpoint-dir + - hostPath: + path: {{ .Values.linux.kubelet }}/plugins_registry/ + type: DirectoryOrCreate + name: registration-dir + - hostPath: + path: /etc/kubernetes/ + type: DirectoryOrCreate + name: azure-cred + - hostPath: + path: /dev + type: Directory + name: device-dir + - hostPath: + path: /sys/bus/scsi/devices + type: Directory + name: sys-devices-dir + - hostPath: + path: /sys/class/ + type: Directory + name: sys-class + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: ssl + hostPath: + path: /etc/ssl/certs + {{- end }} + {{- if eq .Values.linux.distro "fedora" }} + - name: ssl + hostPath: + path: /etc/ssl/certs + - name: ssl-pki + hostPath: + path: /etc/pki/ca-trust/extracted + {{- end }} +{{- end -}} diff --git a/charts/v1.26.7/azuredisk-csi-driver/templates/csi-snapshot-controller.yaml b/charts/v1.26.7/azuredisk-csi-driver/templates/csi-snapshot-controller.yaml new file mode 100644 index 000000000..66f512fd8 --- /dev/null +++ b/charts/v1.26.7/azuredisk-csi-driver/templates/csi-snapshot-controller.yaml @@ -0,0 +1,58 @@ +{{- if .Values.snapshot.enabled -}} +kind: Deployment +apiVersion: apps/v1 +metadata: + name: {{ .Values.snapshot.snapshotController.name }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- with .Values.snapshot.snapshotController.labels }} +{{ . | toYaml | indent 4 }} +{{- end }} +{{- with .Values.snapshot.snapshotController.annotations }} + annotations: +{{ . | toYaml | indent 4 }} +{{- end }} +spec: + replicas: {{ .Values.snapshot.snapshotController.replicas }} + selector: + matchLabels: + app: {{ .Values.snapshot.snapshotController.name }} + template: + metadata: +{{ include "azuredisk.labels" . | indent 6 }} + app: {{ .Values.snapshot.snapshotController.name }} +{{- with .Values.snapshot.snapshotController.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.snapshot.snapshotController.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + serviceAccountName: {{ .Values.serviceAccount.snapshotController }} + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical +{{- with .Values.controller.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.controller.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} + {{- include "azuredisk.pullSecrets" . | indent 6 }} + containers: + - name: {{ .Values.snapshot.snapshotController.name }} +{{- if hasPrefix "/" .Values.snapshot.image.csiSnapshotController.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.snapshot.image.csiSnapshotController.repository }}:{{ .Values.snapshot.image.csiSnapshotController.tag }}" +{{- else }} + image: "{{ .Values.snapshot.image.csiSnapshotController.repository }}:{{ .Values.snapshot.image.csiSnapshotController.tag }}" +{{- end }} + args: + - "--v=2" + - "--leader-election=true" + - "--leader-election-namespace={{ .Release.Namespace }}" + resources: {{- toYaml .Values.snapshot.snapshotController.resources | nindent 12 }} + imagePullPolicy: {{ .Values.snapshot.image.csiSnapshotController.pullPolicy }} +{{- end -}} diff --git a/charts/v1.26.7/azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml b/charts/v1.26.7/azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml new file mode 100644 index 000000000..b78940fc6 --- /dev/null +++ b/charts/v1.26.7/azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml @@ -0,0 +1,199 @@ +{{- if .Values.rbac.create -}} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-external-provisioner-role +{{ include "azuredisk.labels" . | indent 2 }} +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["get", "list"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-csi-provisioner-binding +{{ include "azuredisk.labels" . | indent 2 }} +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ .Values.rbac.name }}-external-provisioner-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-external-attacher-role +{{ include "azuredisk.labels" . | indent 2 }} +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["csi.storage.k8s.io"] + resources: ["csinodeinfos"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments/status"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-csi-attacher-binding +{{ include "azuredisk.labels" . | indent 2 }} +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ .Values.rbac.name }}-external-attacher-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-external-snapshotter-role +{{ include "azuredisk.labels" . | indent 2 }} +rules: + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-csi-snapshotter-binding +{{ include "azuredisk.labels" . | indent 2 }} +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ .Values.rbac.name }}-external-snapshotter-role + apiGroup: rbac.authorization.k8s.io + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-external-resizer-role +{{ include "azuredisk.labels" . | indent 2 }} +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-csi-resizer-role +{{ include "azuredisk.labels" . | indent 2 }} +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ .Values.rbac.name }}-external-resizer-role + apiGroup: rbac.authorization.k8s.io + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-{{ .Values.rbac.name }}-controller-secret-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-{{ .Values.rbac.name }}-controller-secret-binding +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: csi-{{ .Values.rbac.name }}-controller-secret-role + apiGroup: rbac.authorization.k8s.io +{{ end }} diff --git a/charts/v1.26.7/azuredisk-csi-driver/templates/rbac-csi-azuredisk-node.yaml b/charts/v1.26.7/azuredisk-csi-driver/templates/rbac-csi-azuredisk-node.yaml new file mode 100644 index 000000000..ae7e080af --- /dev/null +++ b/charts/v1.26.7/azuredisk-csi-driver/templates/rbac-csi-azuredisk-node.yaml @@ -0,0 +1,28 @@ +{{- if .Values.rbac.create -}} +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-{{ .Values.rbac.name }}-node-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-{{ .Values.rbac.name }}-node-secret-binding +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.node }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: csi-{{ .Values.rbac.name }}-node-role + apiGroup: rbac.authorization.k8s.io +{{ end }} diff --git a/charts/v1.26.7/azuredisk-csi-driver/templates/rbac-csi-snapshot-controller.yaml b/charts/v1.26.7/azuredisk-csi-driver/templates/rbac-csi-snapshot-controller.yaml new file mode 100644 index 000000000..8e4278b6f --- /dev/null +++ b/charts/v1.26.7/azuredisk-csi-driver/templates/rbac-csi-snapshot-controller.yaml @@ -0,0 +1,72 @@ +{{- if and .Values.snapshot.enabled .Values.rbac.create -}} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots/status"] + verbs: ["update", "patch"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-binding +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.snapshotController }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: csi-snapshot-controller-role + apiGroup: rbac.authorization.k8s.io + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-leaderelection-role +rules: + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-leaderelection-binding +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.snapshotController }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: csi-snapshot-controller-leaderelection-role + apiGroup: rbac.authorization.k8s.io +{{ end }} diff --git a/charts/v1.26.7/azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-controller.yaml b/charts/v1.26.7/azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-controller.yaml new file mode 100644 index 000000000..08bd06cda --- /dev/null +++ b/charts/v1.26.7/azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-controller.yaml @@ -0,0 +1,8 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- end -}} diff --git a/charts/v1.26.7/azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-node.yaml b/charts/v1.26.7/azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-node.yaml new file mode 100644 index 000000000..941f3eb92 --- /dev/null +++ b/charts/v1.26.7/azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-node.yaml @@ -0,0 +1,8 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.node }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- end -}} diff --git a/charts/v1.26.7/azuredisk-csi-driver/templates/serviceaccount-csi-snapshot-controller.yaml b/charts/v1.26.7/azuredisk-csi-driver/templates/serviceaccount-csi-snapshot-controller.yaml new file mode 100644 index 000000000..7cdaad0b4 --- /dev/null +++ b/charts/v1.26.7/azuredisk-csi-driver/templates/serviceaccount-csi-snapshot-controller.yaml @@ -0,0 +1,7 @@ +{{- if and .Values.snapshot.enabled .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.snapshotController }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/charts/v1.26.7/azuredisk-csi-driver/values.yaml b/charts/v1.26.7/azuredisk-csi-driver/values.yaml new file mode 100644 index 000000000..0fbaaf413 --- /dev/null +++ b/charts/v1.26.7/azuredisk-csi-driver/values.yaml @@ -0,0 +1,256 @@ +image: + baseRepo: mcr.microsoft.com + azuredisk: + repository: /oss/kubernetes-csi/azuredisk-csi + tag: v1.26.7 + pullPolicy: IfNotPresent + csiProvisioner: + repository: /oss/kubernetes-csi/csi-provisioner + tag: v3.3.0 + pullPolicy: IfNotPresent + csiAttacher: + repository: /oss/kubernetes-csi/csi-attacher + tag: v4.0.0 + pullPolicy: IfNotPresent + csiResizer: + repository: /oss/kubernetes-csi/csi-resizer + tag: v1.6.0 + pullPolicy: IfNotPresent + livenessProbe: + repository: /oss/kubernetes-csi/livenessprobe + tag: v2.8.0 + pullPolicy: IfNotPresent + nodeDriverRegistrar: + repository: /oss/kubernetes-csi/csi-node-driver-registrar + tag: v2.6.2 + pullPolicy: IfNotPresent + +serviceAccount: + create: true # When true, service accounts will be created for you. Set to false if you want to use your own. + controller: csi-azuredisk-controller-sa # Name of Service Account to be created or used + node: csi-azuredisk-node-sa # Name of Service Account to be created or used + snapshotController: csi-snapshot-controller-sa # Name of Service Account to be created or used + +rbac: + create: true + name: azuredisk + +controller: + name: csi-azuredisk-controller + cloudConfigSecretName: azure-cloud-provider + cloudConfigSecretNamespace: kube-system + allowEmptyCloudConfig: false + replicas: 2 + metricsPort: 29604 + livenessProbe: + healthPort: 29602 + runOnMaster: false + runOnControlPlane: false + disableAvailabilitySetNodes: false + vmType: "" + provisionerWorkerThreads: 100 + attacherWorkerThreads: 500 + vmssCacheTTLInSeconds: -1 + logLevel: 5 + tolerations: + - key: "node-role.kubernetes.io/master" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/controlplane" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/control-plane" + operator: "Exists" + effect: "NoSchedule" + hostNetwork: true # this setting could be disabled if controller does not depend on MSI setting + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + nodeSelector: {} + affinity: {} + resources: + csiProvisioner: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + csiAttacher: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + csiResizer: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + csiSnapshotter: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + livenessProbe: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + azuredisk: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + +node: + cloudConfigSecretName: azure-cloud-provider + cloudConfigSecretNamespace: kube-system + supportZone: true + allowEmptyCloudConfig: true + maxUnavailable: 1 + metricsPort: 29605 + logLevel: 5 + livenessProbe: + healthPort: 29603 + +snapshot: + enabled: false + name: csi-snapshot-controller + image: + csiSnapshotter: + repository: /oss/kubernetes-csi/csi-snapshotter + tag: v5.0.1 + pullPolicy: IfNotPresent + csiSnapshotController: + repository: /oss/kubernetes-csi/snapshot-controller + tag: v5.0.1 + pullPolicy: IfNotPresent + snapshotController: + name: csi-snapshot-controller + replicas: 2 + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + +feature: + enableFSGroupPolicy: true + +driver: + name: disk.csi.azure.com + # maximum number of attachable volumes per node, + # maximum number is defined according to node instance type by default(-1) + volumeAttachLimit: -1 + customUserAgent: "" + userAgentSuffix: "OSS-helm" + azureGoSDKLogLevel: "" # available values: ""(no logs), DEBUG, INFO, WARNING, ERROR + httpsProxy: "" + httpProxy: "" + noProxy: "" + +linux: + enabled: true + dsName: csi-azuredisk-node # daemonset name + kubelet: /var/lib/kubelet + distro: debian # available values: debian, fedora + enablePerfOptimization: true + tolerations: + - operator: "Exists" + hostNetwork: true # this setting could be disabled if perfProfile is `none` + getNodeInfoFromLabels: false # get node info from node labels instead of IMDS + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + nodeSelector: {} + affinity: {} + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: type + operator: NotIn + values: + - virtual-kubelet + resources: + livenessProbe: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + nodeDriverRegistrar: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + azuredisk: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 20Mi + +windows: + enabled: true + dsName: csi-azuredisk-node-win # daemonset name + kubelet: 'C:\var\lib\kubelet' + getNodeInfoFromLabels: false # get node info from node labels instead of IMDS + tolerations: + - key: "node.kubernetes.io/os" + operator: "Exists" + effect: "NoSchedule" + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + nodeSelector: {} + affinity: {} + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: type + operator: NotIn + values: + - virtual-kubelet + resources: + livenessProbe: + limits: + memory: 150Mi + requests: + cpu: 10m + memory: 40Mi + nodeDriverRegistrar: + limits: + memory: 150Mi + requests: + cpu: 30m + memory: 40Mi + azuredisk: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 40Mi + useHostProcessContainers: false + +cloud: AzurePublicCloud + +## Reference to one or more secrets to be used when pulling images +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ +## +imagePullSecrets: [] +# - name: "image-pull-secret" diff --git a/charts/v1.26.8/azuredisk-csi-driver-v1.26.8.tgz b/charts/v1.26.8/azuredisk-csi-driver-v1.26.8.tgz new file mode 100644 index 000000000..1b57d04ba Binary files /dev/null and b/charts/v1.26.8/azuredisk-csi-driver-v1.26.8.tgz differ diff --git a/charts/v1.26.8/azuredisk-csi-driver/Chart.yaml b/charts/v1.26.8/azuredisk-csi-driver/Chart.yaml new file mode 100644 index 000000000..c22919f31 --- /dev/null +++ b/charts/v1.26.8/azuredisk-csi-driver/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: v1.26.8 +description: Azure disk Container Storage Interface (CSI) Storage Plugin +name: azuredisk-csi-driver +version: v1.26.8 diff --git a/charts/v1.26.8/azuredisk-csi-driver/templates/NOTES.txt b/charts/v1.26.8/azuredisk-csi-driver/templates/NOTES.txt new file mode 100644 index 000000000..6a9695dad --- /dev/null +++ b/charts/v1.26.8/azuredisk-csi-driver/templates/NOTES.txt @@ -0,0 +1,5 @@ +The Azure Disk CSI Driver is getting deployed to your cluster. + +To check Azure Disk CSI Driver pods status, please run: + + kubectl --namespace={{ .Release.Namespace }} get pods --selector="release={{ .Release.Name }}" --watch diff --git a/charts/v1.26.8/azuredisk-csi-driver/templates/_helpers.tpl b/charts/v1.26.8/azuredisk-csi-driver/templates/_helpers.tpl new file mode 100644 index 000000000..51fcc8a09 --- /dev/null +++ b/charts/v1.26.8/azuredisk-csi-driver/templates/_helpers.tpl @@ -0,0 +1,26 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* Expand the name of the chart.*/}} +{{- define "azuredisk.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* labels for helm resources */}} +{{- define "azuredisk.labels" -}} +labels: + app.kubernetes.io/instance: "{{ .Release.Name }}" + app.kubernetes.io/managed-by: "{{ .Release.Service }}" + app.kubernetes.io/name: "{{ template "azuredisk.name" . }}" + app.kubernetes.io/version: "{{ .Chart.AppVersion }}" + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" +{{- end -}} + +{{/* pull secrets for containers */}} +{{- define "azuredisk.pullSecrets" -}} +{{- if .Values.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- end }} +{{- end -}} \ No newline at end of file diff --git a/charts/v1.26.8/azuredisk-csi-driver/templates/crd-csi-snapshot.yaml b/charts/v1.26.8/azuredisk-csi-driver/templates/crd-csi-snapshot.yaml new file mode 100644 index 000000000..b0e29453c --- /dev/null +++ b/charts/v1.26.8/azuredisk-csi-driver/templates/crd-csi-snapshot.yaml @@ -0,0 +1,661 @@ +{{- if .Values.snapshot.enabled -}} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/419" + creationTimestamp: null + name: volumesnapshots.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshot + listKind: VolumeSnapshotList + plural: volumesnapshots + shortNames: + - vs + singular: volumesnapshot + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: If a new snapshot needs to be created, this contains the name of the source PVC from which this snapshot was (or will be) created. + jsonPath: .spec.source.persistentVolumeClaimName + name: SourcePVC + type: string + - description: If a snapshot already exists, this contains the name of the existing VolumeSnapshotContent object representing the existing snapshot. + jsonPath: .spec.source.volumeSnapshotContentName + name: SourceSnapshotContent + type: string + - description: Represents the minimum size of volume required to rehydrate from this snapshot. + jsonPath: .status.restoreSize + name: RestoreSize + type: string + - description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot. + jsonPath: .spec.volumeSnapshotClassName + name: SnapshotClass + type: string + - description: Name of the VolumeSnapshotContent object to which the VolumeSnapshot object intends to bind to. Please note that verification of binding actually requires checking both VolumeSnapshot and VolumeSnapshotContent to ensure both are pointing at each other. Binding MUST be verified prior to usage of this object. + jsonPath: .status.boundVolumeSnapshotContentName + name: SnapshotContent + type: string + - description: Timestamp when the point-in-time snapshot was taken by the underlying storage system. + jsonPath: .status.creationTime + name: CreationTime + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshot is a user's request for either creating a point-in-time snapshot of a persistent volume, or binding to a pre-existing snapshot. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: 'spec defines the desired characteristics of a snapshot requested by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots Required.' + properties: + source: + description: source specifies where a snapshot will be created from. This field is immutable after creation. Required. + properties: + persistentVolumeClaimName: + description: persistentVolumeClaimName specifies the name of the PersistentVolumeClaim object representing the volume from which a snapshot should be created. This PVC is assumed to be in the same namespace as the VolumeSnapshot object. This field should be set if the snapshot does not exists, and needs to be created. This field is immutable. + type: string + volumeSnapshotContentName: + description: volumeSnapshotContentName specifies the name of a pre-existing VolumeSnapshotContent object representing an existing volume snapshot. This field should be set if the snapshot already exists and only needs a representation in Kubernetes. This field is immutable. + type: string + type: object + oneOf: + - required: ["persistentVolumeClaimName"] + - required: ["volumeSnapshotContentName"] + volumeSnapshotClassName: + description: 'VolumeSnapshotClassName is the name of the VolumeSnapshotClass requested by the VolumeSnapshot. VolumeSnapshotClassName may be left nil to indicate that the default SnapshotClass should be used. A given cluster may have multiple default Volume SnapshotClasses: one default per CSI Driver. If a VolumeSnapshot does not specify a SnapshotClass, VolumeSnapshotSource will be checked to figure out what the associated CSI Driver is, and the default VolumeSnapshotClass associated with that CSI Driver will be used. If more than one VolumeSnapshotClass exist for a given CSI Driver and more than one have been marked as default, CreateSnapshot will fail and generate an event. Empty string is not allowed for this field.' + type: string + required: + - source + type: object + status: + description: status represents the current information of a snapshot. Consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object. + properties: + boundVolumeSnapshotContentName: + description: 'boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent object to which this VolumeSnapshot object intends to bind to. If not specified, it indicates that the VolumeSnapshot object has not been successfully bound to a VolumeSnapshotContent object yet. NOTE: To avoid possible security issues, consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object.' + type: string + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it may indicate that the creation time of the snapshot is unknown. + format: date-time + type: string + error: + description: error is the last observed error during snapshot creation, if any. This field could be helpful to upper level controllers(i.e., application controller) to decide whether they should continue on waiting for the snapshot to be created based on the type of error reported. The snapshot controller will keep retrying when an error occurrs during the snapshot creation. Upon success, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if the snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + type: string + description: restoreSize represents the minimum size of volume required to create a volume from this snapshot. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: If a new snapshot needs to be created, this contains the name of the source PVC from which this snapshot was (or will be) created. + jsonPath: .spec.source.persistentVolumeClaimName + name: SourcePVC + type: string + - description: If a snapshot already exists, this contains the name of the existing VolumeSnapshotContent object representing the existing snapshot. + jsonPath: .spec.source.volumeSnapshotContentName + name: SourceSnapshotContent + type: string + - description: Represents the minimum size of volume required to rehydrate from this snapshot. + jsonPath: .status.restoreSize + name: RestoreSize + type: string + - description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot. + jsonPath: .spec.volumeSnapshotClassName + name: SnapshotClass + type: string + - description: Name of the VolumeSnapshotContent object to which the VolumeSnapshot object intends to bind to. Please note that verification of binding actually requires checking both VolumeSnapshot and VolumeSnapshotContent to ensure both are pointing at each other. Binding MUST be verified prior to usage of this object. + jsonPath: .status.boundVolumeSnapshotContentName + name: SnapshotContent + type: string + - description: Timestamp when the point-in-time snapshot was taken by the underlying storage system. + jsonPath: .status.creationTime + name: CreationTime + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshot is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshot" + schema: + openAPIV3Schema: + description: VolumeSnapshot is a user's request for either creating a point-in-time snapshot of a persistent volume, or binding to a pre-existing snapshot. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: 'spec defines the desired characteristics of a snapshot requested by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots Required.' + properties: + source: + description: source specifies where a snapshot will be created from. This field is immutable after creation. Required. + properties: + persistentVolumeClaimName: + description: persistentVolumeClaimName specifies the name of the PersistentVolumeClaim object representing the volume from which a snapshot should be created. This PVC is assumed to be in the same namespace as the VolumeSnapshot object. This field should be set if the snapshot does not exists, and needs to be created. This field is immutable. + type: string + volumeSnapshotContentName: + description: volumeSnapshotContentName specifies the name of a pre-existing VolumeSnapshotContent object representing an existing volume snapshot. This field should be set if the snapshot already exists and only needs a representation in Kubernetes. This field is immutable. + type: string + type: object + volumeSnapshotClassName: + description: 'VolumeSnapshotClassName is the name of the VolumeSnapshotClass requested by the VolumeSnapshot. VolumeSnapshotClassName may be left nil to indicate that the default SnapshotClass should be used. A given cluster may have multiple default Volume SnapshotClasses: one default per CSI Driver. If a VolumeSnapshot does not specify a SnapshotClass, VolumeSnapshotSource will be checked to figure out what the associated CSI Driver is, and the default VolumeSnapshotClass associated with that CSI Driver will be used. If more than one VolumeSnapshotClass exist for a given CSI Driver and more than one have been marked as default, CreateSnapshot will fail and generate an event. Empty string is not allowed for this field.' + type: string + required: + - source + type: object + status: + description: status represents the current information of a snapshot. Consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object. + properties: + boundVolumeSnapshotContentName: + description: 'boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent object to which this VolumeSnapshot object intends to bind to. If not specified, it indicates that the VolumeSnapshot object has not been successfully bound to a VolumeSnapshotContent object yet. NOTE: To avoid possible security issues, consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object.' + type: string + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it may indicate that the creation time of the snapshot is unknown. + format: date-time + type: string + error: + description: error is the last observed error during snapshot creation, if any. This field could be helpful to upper level controllers(i.e., application controller) to decide whether they should continue on waiting for the snapshot to be created based on the type of error reported. The snapshot controller will keep retrying when an error occurrs during the snapshot creation. Upon success, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if the snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + type: string + description: restoreSize represents the minimum size of volume required to create a volume from this snapshot. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/419" + creationTimestamp: null + name: volumesnapshotclasses.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotClass + listKind: VolumeSnapshotClassList + plural: volumesnapshotclasses + shortNames: + - vsclass + - vsclasses + singular: volumesnapshotclass + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .driver + name: Driver + type: string + - description: Determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .deletionPolicy + name: DeletionPolicy + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshotClass specifies parameters that a underlying storage system uses when creating a volume snapshot. A specific VolumeSnapshotClass is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses are non-namespaced + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + deletionPolicy: + description: deletionPolicy determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the storage driver that handles this VolumeSnapshotClass. Required. + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + parameters: + additionalProperties: + type: string + description: parameters is a key-value map with storage driver specific parameters for creating snapshots. These values are opaque to Kubernetes. + type: object + required: + - deletionPolicy + - driver + type: object + served: true + storage: true + subresources: {} + - additionalPrinterColumns: + - jsonPath: .driver + name: Driver + type: string + - description: Determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .deletionPolicy + name: DeletionPolicy + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshotClass is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshotClass" + schema: + openAPIV3Schema: + description: VolumeSnapshotClass specifies parameters that a underlying storage system uses when creating a volume snapshot. A specific VolumeSnapshotClass is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses are non-namespaced + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + deletionPolicy: + description: deletionPolicy determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the storage driver that handles this VolumeSnapshotClass. Required. + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + parameters: + additionalProperties: + type: string + description: parameters is a key-value map with storage driver specific parameters for creating snapshots. These values are opaque to Kubernetes. + type: object + required: + - deletionPolicy + - driver + type: object + served: true + storage: false + subresources: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/419" + creationTimestamp: null + name: volumesnapshotcontents.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotContent + listKind: VolumeSnapshotContentList + plural: volumesnapshotcontents + shortNames: + - vsc + - vscs + singular: volumesnapshotcontent + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: Represents the complete size of the snapshot in bytes + jsonPath: .status.restoreSize + name: RestoreSize + type: integer + - description: Determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .spec.deletionPolicy + name: DeletionPolicy + type: string + - description: Name of the CSI driver used to create the physical snapshot on the underlying storage system. + jsonPath: .spec.driver + name: Driver + type: string + - description: Name of the VolumeSnapshotClass to which this snapshot belongs. + jsonPath: .spec.volumeSnapshotClassName + name: VolumeSnapshotClass + type: string + - description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.name + name: VolumeSnapshot + type: string + - description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.namespace + name: VolumeSnapshotNamespace + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshotContent represents the actual "on-disk" snapshot object in the underlying storage system + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: spec defines properties of a VolumeSnapshotContent created by the underlying storage system. Required. + properties: + deletionPolicy: + description: deletionPolicy determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. For dynamically provisioned snapshots, this field will automatically be filled in by the CSI snapshotter sidecar with the "DeletionPolicy" field defined in the corresponding VolumeSnapshotClass. For pre-existing snapshots, users MUST specify this field when creating the VolumeSnapshotContent object. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the CSI driver used to create the physical snapshot on the underlying storage system. This MUST be the same as the name returned by the CSI GetPluginName() call for that driver. Required. + type: string + source: + description: source specifies whether the snapshot is (or should be) dynamically provisioned or already exists, and just requires a Kubernetes object representation. This field is immutable after creation. Required. + properties: + snapshotHandle: + description: snapshotHandle specifies the CSI "snapshot_id" of a pre-existing snapshot on the underlying storage system for which a Kubernetes object representation was (or should be) created. This field is immutable. + type: string + volumeHandle: + description: volumeHandle specifies the CSI "volume_id" of the volume from which a snapshot should be dynamically taken from. This field is immutable. + type: string + type: object + oneOf: + - required: ["snapshotHandle"] + - required: ["volumeHandle"] + volumeSnapshotClassName: + description: name of the VolumeSnapshotClass from which this snapshot was (or will be) created. Note that after provisioning, the VolumeSnapshotClass may be deleted or recreated with different set of values, and as such, should not be referenced post-snapshot creation. + type: string + volumeSnapshotRef: + description: volumeSnapshotRef specifies the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName field must reference to this VolumeSnapshotContent's name for the bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent object, name and namespace of the VolumeSnapshot object MUST be provided for binding to happen. This field is immutable after creation. Required. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + required: + - deletionPolicy + - driver + - source + - volumeSnapshotRef + type: object + status: + description: status represents the current information of a snapshot. + properties: + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it indicates the creation time is unknown. The format of this field is a Unix nanoseconds time encoded as an int64. On Unix, the command `date +%s%N` returns the current time in nanoseconds since 1970-01-01 00:00:00 UTC. + format: int64 + type: integer + error: + description: error is the last observed error during snapshot creation, if any. Upon success after retry, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + description: restoreSize represents the complete size of the snapshot in bytes. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown. + format: int64 + minimum: 0 + type: integer + snapshotHandle: + description: snapshotHandle is the CSI "snapshot_id" of a snapshot on the underlying storage system. If not specified, it indicates that dynamic snapshot creation has either failed or it is still in progress. + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: Represents the complete size of the snapshot in bytes + jsonPath: .status.restoreSize + name: RestoreSize + type: integer + - description: Determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .spec.deletionPolicy + name: DeletionPolicy + type: string + - description: Name of the CSI driver used to create the physical snapshot on the underlying storage system. + jsonPath: .spec.driver + name: Driver + type: string + - description: Name of the VolumeSnapshotClass to which this snapshot belongs. + jsonPath: .spec.volumeSnapshotClassName + name: VolumeSnapshotClass + type: string + - description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.name + name: VolumeSnapshot + type: string + - description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.namespace + name: VolumeSnapshotNamespace + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshotContent is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshotContent" + schema: + openAPIV3Schema: + description: VolumeSnapshotContent represents the actual "on-disk" snapshot object in the underlying storage system + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: spec defines properties of a VolumeSnapshotContent created by the underlying storage system. Required. + properties: + deletionPolicy: + description: deletionPolicy determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. For dynamically provisioned snapshots, this field will automatically be filled in by the CSI snapshotter sidecar with the "DeletionPolicy" field defined in the corresponding VolumeSnapshotClass. For pre-existing snapshots, users MUST specify this field when creating the VolumeSnapshotContent object. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the CSI driver used to create the physical snapshot on the underlying storage system. This MUST be the same as the name returned by the CSI GetPluginName() call for that driver. Required. + type: string + source: + description: source specifies whether the snapshot is (or should be) dynamically provisioned or already exists, and just requires a Kubernetes object representation. This field is immutable after creation. Required. + properties: + snapshotHandle: + description: snapshotHandle specifies the CSI "snapshot_id" of a pre-existing snapshot on the underlying storage system for which a Kubernetes object representation was (or should be) created. This field is immutable. + type: string + volumeHandle: + description: volumeHandle specifies the CSI "volume_id" of the volume from which a snapshot should be dynamically taken from. This field is immutable. + type: string + type: object + volumeSnapshotClassName: + description: name of the VolumeSnapshotClass from which this snapshot was (or will be) created. Note that after provisioning, the VolumeSnapshotClass may be deleted or recreated with different set of values, and as such, should not be referenced post-snapshot creation. + type: string + volumeSnapshotRef: + description: volumeSnapshotRef specifies the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName field must reference to this VolumeSnapshotContent's name for the bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent object, name and namespace of the VolumeSnapshot object MUST be provided for binding to happen. This field is immutable after creation. Required. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + required: + - deletionPolicy + - driver + - source + - volumeSnapshotRef + type: object + status: + description: status represents the current information of a snapshot. + properties: + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it indicates the creation time is unknown. The format of this field is a Unix nanoseconds time encoded as an int64. On Unix, the command `date +%s%N` returns the current time in nanoseconds since 1970-01-01 00:00:00 UTC. + format: int64 + type: integer + error: + description: error is the last observed error during snapshot creation, if any. Upon success after retry, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + description: restoreSize represents the complete size of the snapshot in bytes. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown. + format: int64 + minimum: 0 + type: integer + snapshotHandle: + description: snapshotHandle is the CSI "snapshot_id" of a snapshot on the underlying storage system. If not specified, it indicates that dynamic snapshot creation has either failed or it is still in progress. + type: string + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +{{- end -}} diff --git a/charts/v1.26.8/azuredisk-csi-driver/templates/csi-azuredisk-controller.yaml b/charts/v1.26.8/azuredisk-csi-driver/templates/csi-azuredisk-controller.yaml new file mode 100644 index 000000000..bfde3bdf0 --- /dev/null +++ b/charts/v1.26.8/azuredisk-csi-driver/templates/csi-azuredisk-controller.yaml @@ -0,0 +1,260 @@ +kind: Deployment +apiVersion: apps/v1 +metadata: + name: {{ .Values.controller.name }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- with .Values.controller.labels }} +{{ . | toYaml | indent 4 }} +{{- end }} +{{- with .Values.controller.annotations }} + annotations: +{{ . | toYaml | indent 4 }} +{{- end }} +spec: + replicas: {{ .Values.controller.replicas }} + selector: + matchLabels: + app: {{ .Values.controller.name }} + template: + metadata: +{{ include "azuredisk.labels" . | indent 6 }} + app: {{ .Values.controller.name }} +{{- with .Values.controller.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.controller.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + hostNetwork: {{ .Values.controller.hostNetwork }} + serviceAccountName: {{ .Values.serviceAccount.controller }} + nodeSelector: + kubernetes.io/os: linux +{{- with .Values.controller.nodeSelector }} +{{ toYaml . | indent 8 }} +{{- end }} + {{- if .Values.controller.runOnMaster}} + node-role.kubernetes.io/master: "" + {{- end}} + {{- if .Values.controller.runOnControlPlane}} + node-role.kubernetes.io/control-plane: "" + {{- end}} + priorityClassName: system-cluster-critical +{{- with .Values.controller.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.controller.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} + {{- include "azuredisk.pullSecrets" . | indent 6 }} + containers: + - name: csi-provisioner +{{- if hasPrefix "/" .Values.image.csiProvisioner.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.csiProvisioner.repository }}:{{ .Values.image.csiProvisioner.tag }}" +{{- else }} + image: "{{ .Values.image.csiProvisioner.repository }}:{{ .Values.image.csiProvisioner.tag }}" +{{- end }} + args: + - "--feature-gates=Topology=true" + - "--csi-address=$(ADDRESS)" + - "--v=2" + - "--timeout=30s" + - "--leader-election" + - "--leader-election-namespace={{ .Release.Namespace }}" + - "--worker-threads={{ .Values.controller.provisionerWorkerThreads }}" + - "--extra-create-metadata=true" + - "--strict-topology=true" + - "--kube-api-qps=50" + - "--kube-api-burst=100" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + resources: {{- toYaml .Values.controller.resources.csiProvisioner | nindent 12 }} + - name: csi-attacher +{{- if hasPrefix "/" .Values.image.csiAttacher.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.csiAttacher.repository }}:{{ .Values.image.csiAttacher.tag }}" +{{- else }} + image: "{{ .Values.image.csiAttacher.repository }}:{{ .Values.image.csiAttacher.tag }}" +{{- end }} + args: + - "-v=2" + - "-csi-address=$(ADDRESS)" + - "-timeout=1200s" + - "-leader-election" + - "--leader-election-namespace={{ .Release.Namespace }}" + - "-worker-threads={{ .Values.controller.attacherWorkerThreads }}" + - "-kube-api-qps=50" + - "-kube-api-burst=100" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + resources: {{- toYaml .Values.controller.resources.csiAttacher | nindent 12 }} + - name: csi-snapshotter +{{- if hasPrefix "/" .Values.snapshot.image.csiSnapshotter.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.snapshot.image.csiSnapshotter.repository }}:{{ .Values.snapshot.image.csiSnapshotter.tag }}" +{{- else }} + image: "{{ .Values.snapshot.image.csiSnapshotter.repository }}:{{ .Values.snapshot.image.csiSnapshotter.tag }}" +{{- end }} + args: + - "-csi-address=$(ADDRESS)" + - "-leader-election" + - "--leader-election-namespace={{ .Release.Namespace }}" + - "-v=2" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: {{- toYaml .Values.controller.resources.csiSnapshotter | nindent 12 }} + - name: csi-resizer +{{- if hasPrefix "/" .Values.image.csiResizer.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.csiResizer.repository }}:{{ .Values.image.csiResizer.tag }}" +{{- else }} + image: "{{ .Values.image.csiResizer.repository }}:{{ .Values.image.csiResizer.tag }}" +{{- end }} + args: + - "-csi-address=$(ADDRESS)" + - "-v=2" + - "-leader-election" + - "--leader-election-namespace={{ .Release.Namespace }}" + - '-handle-volume-inuse-error=false' + - '-feature-gates=RecoverVolumeExpansionFailure=true' + - "-timeout=240s" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: {{- toYaml .Values.controller.resources.csiResizer | nindent 12 }} + - name: liveness-probe +{{- if hasPrefix "/" .Values.image.livenessProbe.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- else }} + image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- end }} + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --health-port={{ .Values.controller.livenessProbe.healthPort }} + - --v=2 + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: {{- toYaml .Values.controller.resources.livenessProbe | nindent 12 }} + - name: azuredisk +{{- if hasPrefix "/" .Values.image.azuredisk.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- else }} + image: "{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- end }} + args: + - "--v={{ .Values.controller.logLevel }}" + - "--endpoint=$(CSI_ENDPOINT)" + - "--metrics-address=0.0.0.0:{{ .Values.controller.metricsPort }}" + - "--disable-avset-nodes={{ .Values.controller.disableAvailabilitySetNodes }}" + - "--vm-type={{ .Values.controller.vmType }}" + - "--drivername={{ .Values.driver.name }}" + - "--cloud-config-secret-name={{ .Values.controller.cloudConfigSecretName }}" + - "--cloud-config-secret-namespace={{ .Values.controller.cloudConfigSecretNamespace }}" + - "--custom-user-agent={{ .Values.driver.customUserAgent }}" + - "--user-agent-suffix={{ .Values.driver.userAgentSuffix }}" + - "--allow-empty-cloud-config={{ .Values.controller.allowEmptyCloudConfig }}" + - "--vmss-cache-ttl-seconds={{ .Values.controller.vmssCacheTTLInSeconds }}" + ports: + - containerPort: {{ .Values.controller.livenessProbe.healthPort }} + name: healthz + protocol: TCP + - containerPort: {{ .Values.controller.metricsPort }} + name: metrics + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path + optional: true + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + {{- if ne .Values.driver.httpsProxy "" }} + - name: HTTPS_PROXY + value: {{ .Values.driver.httpsProxy }} + {{- end }} + {{- if ne .Values.driver.httpProxy "" }} + - name: HTTP_PROXY + value: {{ .Values.driver.httpProxy }} + {{- end }} + {{- if ne .Values.driver.noProxy "" }} + - name: NO_PROXY + value: {{ .Values.driver.noProxy }} + {{- end }} + - name: AZURE_GO_SDK_LOG_LEVEL + value: {{ .Values.driver.azureGoSDKLogLevel }} + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: AZURE_ENVIRONMENT_FILEPATH + value: /etc/kubernetes/azurestackcloud.json + {{- end }} + imagePullPolicy: {{ .Values.image.azuredisk.pullPolicy }} + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /etc/kubernetes/ + name: azure-cred + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: ssl + mountPath: /etc/ssl/certs + readOnly: true + {{- end }} + {{- if eq .Values.linux.distro "fedora" }} + - name: ssl + mountPath: /etc/ssl/certs + readOnly: true + - name: ssl-pki + mountPath: /etc/pki/ca-trust/extracted + readOnly: true + {{- end }} + resources: {{- toYaml .Values.controller.resources.azuredisk | nindent 12 }} + volumes: + - name: socket-dir + emptyDir: {} + - name: azure-cred + hostPath: + path: /etc/kubernetes/ + type: DirectoryOrCreate + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: ssl + hostPath: + path: /etc/ssl/certs + {{- end }} + {{- if eq .Values.linux.distro "fedora" }} + - name: ssl + hostPath: + path: /etc/ssl/certs + - name: ssl-pki + hostPath: + path: /etc/pki/ca-trust/extracted + {{- end }} diff --git a/charts/v1.26.8/azuredisk-csi-driver/templates/csi-azuredisk-driver.yaml b/charts/v1.26.8/azuredisk-csi-driver/templates/csi-azuredisk-driver.yaml new file mode 100644 index 000000000..21f941748 --- /dev/null +++ b/charts/v1.26.8/azuredisk-csi-driver/templates/csi-azuredisk-driver.yaml @@ -0,0 +1,13 @@ +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: {{ .Values.driver.name }} + annotations: + csiDriver: "{{ .Values.image.azuredisk.tag }}" + snapshot: "{{ .Values.snapshot.image.csiSnapshotter.tag }}" +spec: + attachRequired: true + podInfoOnMount: false + {{- if .Values.feature.enableFSGroupPolicy}} + fsGroupPolicy: File + {{- end}} diff --git a/charts/v1.26.8/azuredisk-csi-driver/templates/csi-azuredisk-node-windows-hostprocess.yaml b/charts/v1.26.8/azuredisk-csi-driver/templates/csi-azuredisk-node-windows-hostprocess.yaml new file mode 100644 index 000000000..e7696de03 --- /dev/null +++ b/charts/v1.26.8/azuredisk-csi-driver/templates/csi-azuredisk-node-windows-hostprocess.yaml @@ -0,0 +1,192 @@ +{{- if and (.Values.windows.enabled) (eq .Values.windows.useHostProcessContainers true) }} +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: {{ .Values.windows.dsName }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- with .Values.windows.labels }} +{{ . | toYaml | indent 4 }} +{{- end }} +{{- with .Values.windows.annotations }} + annotations: +{{ . | toYaml | indent 4 }} +{{- end }} +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: {{ .Values.node.maxUnavailable }} + type: RollingUpdate + selector: + matchLabels: + app: {{ .Values.windows.dsName }} + template: + metadata: +{{ include "azuredisk.labels" . | indent 6 }} + app: {{ .Values.windows.dsName }} +{{- with .Values.windows.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.windows.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + serviceAccountName: {{ .Values.serviceAccount.node }} +{{- with .Values.windows.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} + nodeSelector: + kubernetes.io/os: windows +{{- with .Values.windows.nodeSelector }} +{{ toYaml . | indent 8 }} +{{- end }} + affinity: +{{- with .Values.windows.affinity }} +{{ toYaml . | indent 8 }} +{{- end }} + nodeAffinity: +{{ toYaml .Values.windows.nodeAffinity | indent 10 }} + priorityClassName: system-node-critical + {{- include "azuredisk.pullSecrets" . | indent 6 }} + securityContext: + windowsOptions: + hostProcess: true + runAsUserName: "NT AUTHORITY\\SYSTEM" + hostNetwork: true + initContainers: + - name: init +{{- if hasPrefix "/" .Values.image.azuredisk.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- else }} + image: "{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- end }} + imagePullPolicy: {{ .Values.image.azuredisk.pullPolicy }} + command: + - "powershell.exe" + - "-c" + - "New-Item" + - "-ItemType" + - "Directory" + - "-Path" + - "C:\\var\\lib\\kubelet\\plugins\\{{ .Values.driver.name }}\\" + - "-Force" + containers: + - name: liveness-probe +{{- if hasPrefix "/" .Values.image.livenessProbe.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- else }} + image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- end }} + command: + - "livenessprobe.exe" + args: + - "--csi-address=$(CSI_ENDPOINT)" + - "--probe-timeout=3s" + - "--health-port={{ .Values.node.livenessProbe.healthPort }}" + - "--v=2" + env: + - name: CSI_ENDPOINT + value: unix://C:\\var\\lib\\kubelet\\plugins\\{{ .Values.driver.name }}\\csi.sock + imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }} + - name: node-driver-registrar +{{- if hasPrefix "/" .Values.image.nodeDriverRegistrar.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- else }} + image: "{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- end }} + command: + - "csi-node-driver-registrar.exe" + args: + - "--v=2" + - "--csi-address=$(CSI_ENDPOINT)" + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" + - "--plugin-registration-path=$(PLUGIN_REG_DIR)" + livenessProbe: + exec: + command: + - csi-node-driver-registrar.exe + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --mode=kubelet-registration-probe + initialDelaySeconds: 60 + timeoutSeconds: 30 + env: + - name: CSI_ENDPOINT + value: unix://C:\\var\\lib\\kubelet\\plugins\\{{ .Values.driver.name }}\\csi.sock + - name: DRIVER_REG_SOCK_PATH + value: C:\\var\\lib\\kubelet\\plugins\\{{ .Values.driver.name }}\\csi.sock + - name: PLUGIN_REG_DIR + value: C:\\var\\lib\\kubelet\\plugins_registry\\ + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + imagePullPolicy: {{ .Values.image.nodeDriverRegistrar.pullPolicy }} + - name: azuredisk +{{- if hasPrefix "/" .Values.image.azuredisk.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- else }} + image: "{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- end }} + command: + - "azurediskplugin.exe" + args: + - "--v={{ .Values.node.logLevel }}" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--metrics-address=0.0.0.0:{{ .Values.node.metricsPort }}" + - "--drivername={{ .Values.driver.name }}" + - "--volume-attach-limit={{ .Values.driver.volumeAttachLimit }}" + - "--cloud-config-secret-name={{ .Values.node.cloudConfigSecretName }}" + - "--cloud-config-secret-namespace={{ .Values.node.cloudConfigSecretNamespace }}" + - "--custom-user-agent={{ .Values.driver.customUserAgent }}" + - "--user-agent-suffix={{ .Values.driver.userAgentSuffix }}" + - "--allow-empty-cloud-config={{ .Values.node.allowEmptyCloudConfig }}" + - "--support-zone={{ .Values.node.supportZone }}" + ports: + - containerPort: {{ .Values.node.livenessProbe.healthPort }} + name: healthz + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path-windows + optional: true + - name: CSI_ENDPOINT + value: unix://C:\\var\\lib\\kubelet\\plugins\\{{ .Values.driver.name }}\\csi.sock + {{- if ne .Values.driver.httpsProxy "" }} + - name: HTTPS_PROXY + value: {{ .Values.driver.httpsProxy }} + {{- end }} + {{- if ne .Values.driver.httpProxy "" }} + - name: HTTP_PROXY + value: {{ .Values.driver.httpProxy }} + {{- end }} + {{- if ne .Values.driver.noProxy "" }} + - name: NO_PROXY + value: {{ .Values.driver.noProxy }} + {{- end }} + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: AZURE_GO_SDK_LOG_LEVEL + value: {{ .Values.driver.azureGoSDKLogLevel }} + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: AZURE_ENVIRONMENT_FILEPATH + value: C:\k\azurestackcloud.json + {{- end }} + imagePullPolicy: {{ .Values.image.azuredisk.pullPolicy }} +{{- end -}} diff --git a/charts/v1.26.8/azuredisk-csi-driver/templates/csi-azuredisk-node-windows.yaml b/charts/v1.26.8/azuredisk-csi-driver/templates/csi-azuredisk-node-windows.yaml new file mode 100644 index 000000000..29e47252c --- /dev/null +++ b/charts/v1.26.8/azuredisk-csi-driver/templates/csi-azuredisk-node-windows.yaml @@ -0,0 +1,233 @@ +{{- if and (.Values.windows.enabled) (ne .Values.windows.useHostProcessContainers true) }} +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: {{ .Values.windows.dsName }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- with .Values.windows.labels }} +{{ . | toYaml | indent 4 }} +{{- end }} +{{- with .Values.windows.annotations }} + annotations: +{{ . | toYaml | indent 4 }} +{{- end }} +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: {{ .Values.node.maxUnavailable }} + type: RollingUpdate + selector: + matchLabels: + app: {{ .Values.windows.dsName }} + template: + metadata: +{{ include "azuredisk.labels" . | indent 6 }} + app: {{ .Values.windows.dsName }} +{{- with .Values.windows.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.windows.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + serviceAccountName: {{ .Values.serviceAccount.node }} +{{- with .Values.windows.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} + nodeSelector: + kubernetes.io/os: windows +{{- with .Values.windows.nodeSelector }} +{{ toYaml . | indent 8 }} +{{- end }} + affinity: +{{- with .Values.windows.affinity }} +{{ toYaml . | indent 8 }} +{{- end }} + nodeAffinity: +{{ toYaml .Values.windows.nodeAffinity | indent 10 }} + priorityClassName: system-node-critical + {{- include "azuredisk.pullSecrets" . | indent 6 }} + containers: + - name: liveness-probe + volumeMounts: + - mountPath: C:\csi + name: plugin-dir +{{- if hasPrefix "/" .Values.image.livenessProbe.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- else }} + image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- end }} + args: + - "--csi-address=$(CSI_ENDPOINT)" + - "--probe-timeout=3s" + - "--health-port={{ .Values.node.livenessProbe.healthPort }}" + - "--v=2" + env: + - name: CSI_ENDPOINT + value: unix://C:\\csi\\csi.sock + imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }} + resources: {{- toYaml .Values.windows.resources.livenessProbe | nindent 12 }} + - name: node-driver-registrar +{{- if hasPrefix "/" .Values.image.nodeDriverRegistrar.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- else }} + image: "{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- end }} + args: + - "--v=2" + - "--csi-address=$(CSI_ENDPOINT)" + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" + livenessProbe: + exec: + command: + - /csi-node-driver-registrar.exe + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --mode=kubelet-registration-probe + initialDelaySeconds: 60 + timeoutSeconds: 30 + env: + - name: CSI_ENDPOINT + value: unix://C:\\csi\\csi.sock + - name: DRIVER_REG_SOCK_PATH + value: C:\\var\\lib\\kubelet\\plugins\\{{ .Values.driver.name }}\\csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + imagePullPolicy: {{ .Values.image.nodeDriverRegistrar.pullPolicy }} + volumeMounts: + - name: kubelet-dir + mountPath: "C:\\var\\lib\\kubelet" + - name: plugin-dir + mountPath: C:\csi + - name: registration-dir + mountPath: C:\registration + resources: {{- toYaml .Values.windows.resources.nodeDriverRegistrar | nindent 12 }} + - name: azuredisk +{{- if hasPrefix "/" .Values.image.azuredisk.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- else }} + image: "{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- end }} + args: + - "--v={{ .Values.node.logLevel }}" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--metrics-address=0.0.0.0:{{ .Values.node.metricsPort }}" + - "--drivername={{ .Values.driver.name }}" + - "--volume-attach-limit={{ .Values.driver.volumeAttachLimit }}" + - "--cloud-config-secret-name={{ .Values.node.cloudConfigSecretName }}" + - "--cloud-config-secret-namespace={{ .Values.node.cloudConfigSecretNamespace }}" + - "--custom-user-agent={{ .Values.driver.customUserAgent }}" + - "--user-agent-suffix={{ .Values.driver.userAgentSuffix }}" + - "--allow-empty-cloud-config={{ .Values.node.allowEmptyCloudConfig }}" + - "--support-zone={{ .Values.node.supportZone }}" + - "--get-node-info-from-labels={{ .Values.windows.getNodeInfoFromLabels }}" + ports: + - containerPort: {{ .Values.node.livenessProbe.healthPort }} + name: healthz + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path-windows + optional: true + - name: CSI_ENDPOINT + value: unix://C:\\csi\\csi.sock + {{- if ne .Values.driver.httpsProxy "" }} + - name: HTTPS_PROXY + value: {{ .Values.driver.httpsProxy }} + {{- end }} + {{- if ne .Values.driver.httpProxy "" }} + - name: HTTP_PROXY + value: {{ .Values.driver.httpProxy }} + {{- end }} + {{- if ne .Values.driver.noProxy "" }} + - name: NO_PROXY + value: {{ .Values.driver.noProxy }} + {{- end }} + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: AZURE_GO_SDK_LOG_LEVEL + value: {{ .Values.driver.azureGoSDKLogLevel }} + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: AZURE_ENVIRONMENT_FILEPATH + value: C:\k\azurestackcloud.json + {{- end }} + imagePullPolicy: {{ .Values.image.azuredisk.pullPolicy }} + volumeMounts: + - name: kubelet-dir + mountPath: "C:\\var\\lib\\kubelet" + - name: plugin-dir + mountPath: C:\csi + - name: azure-config + mountPath: C:\k + - name: csi-proxy-fs-pipe-v1 + mountPath: \\.\pipe\csi-proxy-filesystem-v1 + - name: csi-proxy-disk-pipe-v1 + mountPath: \\.\pipe\csi-proxy-disk-v1 + - name: csi-proxy-volume-pipe-v1 + mountPath: \\.\pipe\csi-proxy-volume-v1 + # these paths are still included for compatibility, they're used + # only if the node has still the beta version of the CSI proxy + - name: csi-proxy-fs-pipe-v1beta1 + mountPath: \\.\pipe\csi-proxy-filesystem-v1beta1 + - name: csi-proxy-disk-pipe-v1beta2 + mountPath: \\.\pipe\csi-proxy-disk-v1beta2 + - name: csi-proxy-volume-pipe-v1beta2 + mountPath: \\.\pipe\csi-proxy-volume-v1beta2 + resources: {{- toYaml .Values.windows.resources.azuredisk | nindent 12 }} + volumes: + - name: csi-proxy-fs-pipe-v1 + hostPath: + path: \\.\pipe\csi-proxy-filesystem-v1 + - name: csi-proxy-disk-pipe-v1 + hostPath: + path: \\.\pipe\csi-proxy-disk-v1 + - name: csi-proxy-volume-pipe-v1 + hostPath: + path: \\.\pipe\csi-proxy-volume-v1 + # these paths are still included for compatibility, they're used + # only if the node has still the beta version of the CSI proxy + - name: csi-proxy-fs-pipe-v1beta1 + hostPath: + path: \\.\pipe\csi-proxy-filesystem-v1beta1 + - name: csi-proxy-disk-pipe-v1beta2 + hostPath: + path: \\.\pipe\csi-proxy-disk-v1beta2 + - name: csi-proxy-volume-pipe-v1beta2 + hostPath: + path: \\.\pipe\csi-proxy-volume-v1beta2 + - name: registration-dir + hostPath: + path: {{ .Values.windows.kubelet }}\plugins_registry\ + type: Directory + - name: kubelet-dir + hostPath: + path: {{ .Values.windows.kubelet }}\ + type: Directory + - name: plugin-dir + hostPath: + path: {{ .Values.windows.kubelet }}\plugins\{{ .Values.driver.name }}\ + type: DirectoryOrCreate + - name: azure-config + hostPath: + path: C:\k + type: DirectoryOrCreate +{{- end -}} diff --git a/charts/v1.26.8/azuredisk-csi-driver/templates/csi-azuredisk-node.yaml b/charts/v1.26.8/azuredisk-csi-driver/templates/csi-azuredisk-node.yaml new file mode 100644 index 000000000..556202012 --- /dev/null +++ b/charts/v1.26.8/azuredisk-csi-driver/templates/csi-azuredisk-node.yaml @@ -0,0 +1,242 @@ +{{- if .Values.linux.enabled}} +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: {{ .Values.linux.dsName }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- with .Values.linux.labels }} +{{ . | toYaml | indent 4 }} +{{- end }} +{{- with .Values.linux.annotations }} + annotations: +{{ . | toYaml | indent 4 }} +{{- end }} +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: {{ .Values.node.maxUnavailable }} + type: RollingUpdate + selector: + matchLabels: + app: {{ .Values.linux.dsName }} + template: + metadata: +{{ include "azuredisk.labels" . | indent 6 }} + app: {{ .Values.linux.dsName }} +{{- with .Values.linux.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.linux.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + hostNetwork: {{ .Values.linux.hostNetwork }} + dnsPolicy: Default + serviceAccountName: {{ .Values.serviceAccount.node }} + nodeSelector: + kubernetes.io/os: linux +{{- with .Values.linux.nodeSelector }} +{{ toYaml . | indent 8 }} +{{- end }} + affinity: +{{- with .Values.linux.affinity }} +{{ toYaml . | indent 8 }} +{{- end }} + nodeAffinity: +{{ toYaml .Values.linux.nodeAffinity | indent 10 }} + priorityClassName: system-node-critical +{{- with .Values.linux.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} + {{- include "azuredisk.pullSecrets" . | indent 6 }} + containers: + - name: liveness-probe + volumeMounts: + - mountPath: /csi + name: socket-dir +{{- if hasPrefix "/" .Values.image.livenessProbe.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- else }} + image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- end }} + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --health-port={{ .Values.node.livenessProbe.healthPort }} + - --v=2 + resources: {{- toYaml .Values.linux.resources.livenessProbe | nindent 12 }} + - name: node-driver-registrar +{{- if hasPrefix "/" .Values.image.nodeDriverRegistrar.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- else }} + image: "{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- end }} + args: + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --v=2 + livenessProbe: + exec: + command: + - /csi-node-driver-registrar + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --mode=kubelet-registration-probe + initialDelaySeconds: 30 + timeoutSeconds: 15 + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: {{ .Values.linux.kubelet }}/plugins/{{ .Values.driver.name }}/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: registration-dir + mountPath: /registration + resources: {{- toYaml .Values.linux.resources.nodeDriverRegistrar | nindent 12 }} + - name: azuredisk +{{- if hasPrefix "/" .Values.image.azuredisk.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- else }} + image: "{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- end }} + args: + - "--v={{ .Values.node.logLevel }}" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--metrics-address=0.0.0.0:{{ .Values.node.metricsPort }}" + - "--enable-perf-optimization={{ .Values.linux.enablePerfOptimization }}" + - "--drivername={{ .Values.driver.name }}" + - "--volume-attach-limit={{ .Values.driver.volumeAttachLimit }}" + - "--cloud-config-secret-name={{ .Values.node.cloudConfigSecretName }}" + - "--cloud-config-secret-namespace={{ .Values.node.cloudConfigSecretNamespace }}" + - "--custom-user-agent={{ .Values.driver.customUserAgent }}" + - "--user-agent-suffix={{ .Values.driver.userAgentSuffix }}" + - "--allow-empty-cloud-config={{ .Values.node.allowEmptyCloudConfig }}" + - "--support-zone={{ .Values.node.supportZone }}" + - "--get-node-info-from-labels={{ .Values.linux.getNodeInfoFromLabels }}" + ports: + - containerPort: {{ .Values.node.livenessProbe.healthPort }} + name: healthz + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path + optional: true + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + {{- if ne .Values.driver.httpsProxy "" }} + - name: HTTPS_PROXY + value: {{ .Values.driver.httpsProxy }} + {{- end }} + {{- if ne .Values.driver.httpProxy "" }} + - name: HTTP_PROXY + value: {{ .Values.driver.httpProxy }} + {{- end }} + {{- if ne .Values.driver.noProxy "" }} + - name: NO_PROXY + value: {{ .Values.driver.noProxy }} + {{- end }} + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: AZURE_GO_SDK_LOG_LEVEL + value: {{ .Values.driver.azureGoSDKLogLevel }} + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: AZURE_ENVIRONMENT_FILEPATH + value: /etc/kubernetes/azurestackcloud.json + {{- end }} + imagePullPolicy: {{ .Values.image.azuredisk.pullPolicy }} + securityContext: + privileged: true + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: {{ .Values.linux.kubelet }}/ + mountPropagation: Bidirectional + name: mountpoint-dir + - mountPath: /etc/kubernetes/ + name: azure-cred + - mountPath: /dev + name: device-dir + - mountPath: /sys/bus/scsi/devices + name: sys-devices-dir + - mountPath: /sys/class/ + name: sys-class + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: ssl + mountPath: /etc/ssl/certs + readOnly: true + {{- end }} + {{- if eq .Values.linux.distro "fedora" }} + - name: ssl + mountPath: /etc/ssl/certs + readOnly: true + - name: ssl-pki + mountPath: /etc/pki/ca-trust/extracted + readOnly: true + {{- end }} + resources: {{- toYaml .Values.linux.resources.azuredisk | nindent 12 }} + volumes: + - hostPath: + path: {{ .Values.linux.kubelet }}/plugins/{{ .Values.driver.name }} + type: DirectoryOrCreate + name: socket-dir + - hostPath: + path: {{ .Values.linux.kubelet }}/ + type: DirectoryOrCreate + name: mountpoint-dir + - hostPath: + path: {{ .Values.linux.kubelet }}/plugins_registry/ + type: DirectoryOrCreate + name: registration-dir + - hostPath: + path: /etc/kubernetes/ + type: DirectoryOrCreate + name: azure-cred + - hostPath: + path: /dev + type: Directory + name: device-dir + - hostPath: + path: /sys/bus/scsi/devices + type: Directory + name: sys-devices-dir + - hostPath: + path: /sys/class/ + type: Directory + name: sys-class + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: ssl + hostPath: + path: /etc/ssl/certs + {{- end }} + {{- if eq .Values.linux.distro "fedora" }} + - name: ssl + hostPath: + path: /etc/ssl/certs + - name: ssl-pki + hostPath: + path: /etc/pki/ca-trust/extracted + {{- end }} +{{- end -}} diff --git a/charts/v1.26.8/azuredisk-csi-driver/templates/csi-snapshot-controller.yaml b/charts/v1.26.8/azuredisk-csi-driver/templates/csi-snapshot-controller.yaml new file mode 100644 index 000000000..66f512fd8 --- /dev/null +++ b/charts/v1.26.8/azuredisk-csi-driver/templates/csi-snapshot-controller.yaml @@ -0,0 +1,58 @@ +{{- if .Values.snapshot.enabled -}} +kind: Deployment +apiVersion: apps/v1 +metadata: + name: {{ .Values.snapshot.snapshotController.name }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- with .Values.snapshot.snapshotController.labels }} +{{ . | toYaml | indent 4 }} +{{- end }} +{{- with .Values.snapshot.snapshotController.annotations }} + annotations: +{{ . | toYaml | indent 4 }} +{{- end }} +spec: + replicas: {{ .Values.snapshot.snapshotController.replicas }} + selector: + matchLabels: + app: {{ .Values.snapshot.snapshotController.name }} + template: + metadata: +{{ include "azuredisk.labels" . | indent 6 }} + app: {{ .Values.snapshot.snapshotController.name }} +{{- with .Values.snapshot.snapshotController.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.snapshot.snapshotController.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + serviceAccountName: {{ .Values.serviceAccount.snapshotController }} + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical +{{- with .Values.controller.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.controller.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} + {{- include "azuredisk.pullSecrets" . | indent 6 }} + containers: + - name: {{ .Values.snapshot.snapshotController.name }} +{{- if hasPrefix "/" .Values.snapshot.image.csiSnapshotController.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.snapshot.image.csiSnapshotController.repository }}:{{ .Values.snapshot.image.csiSnapshotController.tag }}" +{{- else }} + image: "{{ .Values.snapshot.image.csiSnapshotController.repository }}:{{ .Values.snapshot.image.csiSnapshotController.tag }}" +{{- end }} + args: + - "--v=2" + - "--leader-election=true" + - "--leader-election-namespace={{ .Release.Namespace }}" + resources: {{- toYaml .Values.snapshot.snapshotController.resources | nindent 12 }} + imagePullPolicy: {{ .Values.snapshot.image.csiSnapshotController.pullPolicy }} +{{- end -}} diff --git a/charts/v1.26.8/azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml b/charts/v1.26.8/azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml new file mode 100644 index 000000000..b78940fc6 --- /dev/null +++ b/charts/v1.26.8/azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml @@ -0,0 +1,199 @@ +{{- if .Values.rbac.create -}} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-external-provisioner-role +{{ include "azuredisk.labels" . | indent 2 }} +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["get", "list"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-csi-provisioner-binding +{{ include "azuredisk.labels" . | indent 2 }} +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ .Values.rbac.name }}-external-provisioner-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-external-attacher-role +{{ include "azuredisk.labels" . | indent 2 }} +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["csi.storage.k8s.io"] + resources: ["csinodeinfos"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments/status"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-csi-attacher-binding +{{ include "azuredisk.labels" . | indent 2 }} +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ .Values.rbac.name }}-external-attacher-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-external-snapshotter-role +{{ include "azuredisk.labels" . | indent 2 }} +rules: + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-csi-snapshotter-binding +{{ include "azuredisk.labels" . | indent 2 }} +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ .Values.rbac.name }}-external-snapshotter-role + apiGroup: rbac.authorization.k8s.io + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-external-resizer-role +{{ include "azuredisk.labels" . | indent 2 }} +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-csi-resizer-role +{{ include "azuredisk.labels" . | indent 2 }} +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ .Values.rbac.name }}-external-resizer-role + apiGroup: rbac.authorization.k8s.io + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-{{ .Values.rbac.name }}-controller-secret-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-{{ .Values.rbac.name }}-controller-secret-binding +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: csi-{{ .Values.rbac.name }}-controller-secret-role + apiGroup: rbac.authorization.k8s.io +{{ end }} diff --git a/charts/v1.26.8/azuredisk-csi-driver/templates/rbac-csi-azuredisk-node.yaml b/charts/v1.26.8/azuredisk-csi-driver/templates/rbac-csi-azuredisk-node.yaml new file mode 100644 index 000000000..ae7e080af --- /dev/null +++ b/charts/v1.26.8/azuredisk-csi-driver/templates/rbac-csi-azuredisk-node.yaml @@ -0,0 +1,28 @@ +{{- if .Values.rbac.create -}} +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-{{ .Values.rbac.name }}-node-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-{{ .Values.rbac.name }}-node-secret-binding +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.node }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: csi-{{ .Values.rbac.name }}-node-role + apiGroup: rbac.authorization.k8s.io +{{ end }} diff --git a/charts/v1.26.8/azuredisk-csi-driver/templates/rbac-csi-snapshot-controller.yaml b/charts/v1.26.8/azuredisk-csi-driver/templates/rbac-csi-snapshot-controller.yaml new file mode 100644 index 000000000..8e4278b6f --- /dev/null +++ b/charts/v1.26.8/azuredisk-csi-driver/templates/rbac-csi-snapshot-controller.yaml @@ -0,0 +1,72 @@ +{{- if and .Values.snapshot.enabled .Values.rbac.create -}} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots/status"] + verbs: ["update", "patch"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-binding +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.snapshotController }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: csi-snapshot-controller-role + apiGroup: rbac.authorization.k8s.io + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-leaderelection-role +rules: + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-leaderelection-binding +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.snapshotController }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: csi-snapshot-controller-leaderelection-role + apiGroup: rbac.authorization.k8s.io +{{ end }} diff --git a/charts/v1.26.8/azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-controller.yaml b/charts/v1.26.8/azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-controller.yaml new file mode 100644 index 000000000..08bd06cda --- /dev/null +++ b/charts/v1.26.8/azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-controller.yaml @@ -0,0 +1,8 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- end -}} diff --git a/charts/v1.26.8/azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-node.yaml b/charts/v1.26.8/azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-node.yaml new file mode 100644 index 000000000..941f3eb92 --- /dev/null +++ b/charts/v1.26.8/azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-node.yaml @@ -0,0 +1,8 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.node }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- end -}} diff --git a/charts/v1.26.8/azuredisk-csi-driver/templates/serviceaccount-csi-snapshot-controller.yaml b/charts/v1.26.8/azuredisk-csi-driver/templates/serviceaccount-csi-snapshot-controller.yaml new file mode 100644 index 000000000..7cdaad0b4 --- /dev/null +++ b/charts/v1.26.8/azuredisk-csi-driver/templates/serviceaccount-csi-snapshot-controller.yaml @@ -0,0 +1,7 @@ +{{- if and .Values.snapshot.enabled .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.snapshotController }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/charts/v1.26.8/azuredisk-csi-driver/values.yaml b/charts/v1.26.8/azuredisk-csi-driver/values.yaml new file mode 100644 index 000000000..9e373cadd --- /dev/null +++ b/charts/v1.26.8/azuredisk-csi-driver/values.yaml @@ -0,0 +1,256 @@ +image: + baseRepo: mcr.microsoft.com + azuredisk: + repository: /oss/kubernetes-csi/azuredisk-csi + tag: v1.26.8 + pullPolicy: IfNotPresent + csiProvisioner: + repository: /oss/kubernetes-csi/csi-provisioner + tag: v3.3.0 + pullPolicy: IfNotPresent + csiAttacher: + repository: /oss/kubernetes-csi/csi-attacher + tag: v4.0.0 + pullPolicy: IfNotPresent + csiResizer: + repository: /oss/kubernetes-csi/csi-resizer + tag: v1.6.0 + pullPolicy: IfNotPresent + livenessProbe: + repository: /oss/kubernetes-csi/livenessprobe + tag: v2.8.0 + pullPolicy: IfNotPresent + nodeDriverRegistrar: + repository: /oss/kubernetes-csi/csi-node-driver-registrar + tag: v2.6.2 + pullPolicy: IfNotPresent + +serviceAccount: + create: true # When true, service accounts will be created for you. Set to false if you want to use your own. + controller: csi-azuredisk-controller-sa # Name of Service Account to be created or used + node: csi-azuredisk-node-sa # Name of Service Account to be created or used + snapshotController: csi-snapshot-controller-sa # Name of Service Account to be created or used + +rbac: + create: true + name: azuredisk + +controller: + name: csi-azuredisk-controller + cloudConfigSecretName: azure-cloud-provider + cloudConfigSecretNamespace: kube-system + allowEmptyCloudConfig: false + replicas: 2 + metricsPort: 29604 + livenessProbe: + healthPort: 29602 + runOnMaster: false + runOnControlPlane: false + disableAvailabilitySetNodes: false + vmType: "" + provisionerWorkerThreads: 100 + attacherWorkerThreads: 500 + vmssCacheTTLInSeconds: -1 + logLevel: 5 + tolerations: + - key: "node-role.kubernetes.io/master" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/controlplane" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/control-plane" + operator: "Exists" + effect: "NoSchedule" + hostNetwork: true # this setting could be disabled if controller does not depend on MSI setting + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + nodeSelector: {} + affinity: {} + resources: + csiProvisioner: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + csiAttacher: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + csiResizer: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + csiSnapshotter: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + livenessProbe: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + azuredisk: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + +node: + cloudConfigSecretName: azure-cloud-provider + cloudConfigSecretNamespace: kube-system + supportZone: true + allowEmptyCloudConfig: true + maxUnavailable: 1 + metricsPort: 29605 + logLevel: 5 + livenessProbe: + healthPort: 29603 + +snapshot: + enabled: false + name: csi-snapshot-controller + image: + csiSnapshotter: + repository: /oss/kubernetes-csi/csi-snapshotter + tag: v5.0.1 + pullPolicy: IfNotPresent + csiSnapshotController: + repository: /oss/kubernetes-csi/snapshot-controller + tag: v5.0.1 + pullPolicy: IfNotPresent + snapshotController: + name: csi-snapshot-controller + replicas: 2 + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + +feature: + enableFSGroupPolicy: true + +driver: + name: disk.csi.azure.com + # maximum number of attachable volumes per node, + # maximum number is defined according to node instance type by default(-1) + volumeAttachLimit: -1 + customUserAgent: "" + userAgentSuffix: "OSS-helm" + azureGoSDKLogLevel: "" # available values: ""(no logs), DEBUG, INFO, WARNING, ERROR + httpsProxy: "" + httpProxy: "" + noProxy: "" + +linux: + enabled: true + dsName: csi-azuredisk-node # daemonset name + kubelet: /var/lib/kubelet + distro: debian # available values: debian, fedora + enablePerfOptimization: true + tolerations: + - operator: "Exists" + hostNetwork: true # this setting could be disabled if perfProfile is `none` + getNodeInfoFromLabels: false # get node info from node labels instead of IMDS + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + nodeSelector: {} + affinity: {} + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: type + operator: NotIn + values: + - virtual-kubelet + resources: + livenessProbe: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + nodeDriverRegistrar: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + azuredisk: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 20Mi + +windows: + enabled: true + dsName: csi-azuredisk-node-win # daemonset name + kubelet: 'C:\var\lib\kubelet' + getNodeInfoFromLabels: false # get node info from node labels instead of IMDS + tolerations: + - key: "node.kubernetes.io/os" + operator: "Exists" + effect: "NoSchedule" + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + nodeSelector: {} + affinity: {} + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: type + operator: NotIn + values: + - virtual-kubelet + resources: + livenessProbe: + limits: + memory: 150Mi + requests: + cpu: 10m + memory: 40Mi + nodeDriverRegistrar: + limits: + memory: 150Mi + requests: + cpu: 30m + memory: 40Mi + azuredisk: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 40Mi + useHostProcessContainers: false + +cloud: AzurePublicCloud + +## Reference to one or more secrets to be used when pulling images +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ +## +imagePullSecrets: [] +# - name: "image-pull-secret" diff --git a/charts/v1.28.4/azuredisk-csi-driver-v1.28.4.tgz b/charts/v1.28.4/azuredisk-csi-driver-v1.28.4.tgz new file mode 100644 index 000000000..266510ee2 Binary files /dev/null and b/charts/v1.28.4/azuredisk-csi-driver-v1.28.4.tgz differ diff --git a/charts/v1.28.4/azuredisk-csi-driver/Chart.yaml b/charts/v1.28.4/azuredisk-csi-driver/Chart.yaml new file mode 100644 index 000000000..7972a9e01 --- /dev/null +++ b/charts/v1.28.4/azuredisk-csi-driver/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: v1.28.4 +description: Azure disk Container Storage Interface (CSI) Storage Plugin +name: azuredisk-csi-driver +version: v1.28.4 diff --git a/charts/v1.28.4/azuredisk-csi-driver/templates/NOTES.txt b/charts/v1.28.4/azuredisk-csi-driver/templates/NOTES.txt new file mode 100644 index 000000000..6a9695dad --- /dev/null +++ b/charts/v1.28.4/azuredisk-csi-driver/templates/NOTES.txt @@ -0,0 +1,5 @@ +The Azure Disk CSI Driver is getting deployed to your cluster. + +To check Azure Disk CSI Driver pods status, please run: + + kubectl --namespace={{ .Release.Namespace }} get pods --selector="release={{ .Release.Name }}" --watch diff --git a/charts/v1.28.4/azuredisk-csi-driver/templates/_helpers.tpl b/charts/v1.28.4/azuredisk-csi-driver/templates/_helpers.tpl new file mode 100644 index 000000000..51fcc8a09 --- /dev/null +++ b/charts/v1.28.4/azuredisk-csi-driver/templates/_helpers.tpl @@ -0,0 +1,26 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* Expand the name of the chart.*/}} +{{- define "azuredisk.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* labels for helm resources */}} +{{- define "azuredisk.labels" -}} +labels: + app.kubernetes.io/instance: "{{ .Release.Name }}" + app.kubernetes.io/managed-by: "{{ .Release.Service }}" + app.kubernetes.io/name: "{{ template "azuredisk.name" . }}" + app.kubernetes.io/version: "{{ .Chart.AppVersion }}" + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" +{{- end -}} + +{{/* pull secrets for containers */}} +{{- define "azuredisk.pullSecrets" -}} +{{- if .Values.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- end }} +{{- end -}} \ No newline at end of file diff --git a/charts/v1.28.4/azuredisk-csi-driver/templates/crd-csi-snapshot.yaml b/charts/v1.28.4/azuredisk-csi-driver/templates/crd-csi-snapshot.yaml new file mode 100644 index 000000000..76df8af7e --- /dev/null +++ b/charts/v1.28.4/azuredisk-csi-driver/templates/crd-csi-snapshot.yaml @@ -0,0 +1,840 @@ +{{- if .Values.snapshot.enabled -}} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/665" + creationTimestamp: null + name: volumesnapshots.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshot + listKind: VolumeSnapshotList + plural: volumesnapshots + shortNames: + - vs + singular: volumesnapshot + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: If a new snapshot needs to be created, this contains the name of + the source PVC from which this snapshot was (or will be) created. + jsonPath: .spec.source.persistentVolumeClaimName + name: SourcePVC + type: string + - description: If a snapshot already exists, this contains the name of the existing + VolumeSnapshotContent object representing the existing snapshot. + jsonPath: .spec.source.volumeSnapshotContentName + name: SourceSnapshotContent + type: string + - description: Represents the minimum size of volume required to rehydrate from + this snapshot. + jsonPath: .status.restoreSize + name: RestoreSize + type: string + - description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot. + jsonPath: .spec.volumeSnapshotClassName + name: SnapshotClass + type: string + - description: Name of the VolumeSnapshotContent object to which the VolumeSnapshot + object intends to bind to. Please note that verification of binding actually + requires checking both VolumeSnapshot and VolumeSnapshotContent to ensure + both are pointing at each other. Binding MUST be verified prior to usage of + this object. + jsonPath: .status.boundVolumeSnapshotContentName + name: SnapshotContent + type: string + - description: Timestamp when the point-in-time snapshot was taken by the underlying + storage system. + jsonPath: .status.creationTime + name: CreationTime + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshot is a user's request for either creating a point-in-time + snapshot of a persistent volume, or binding to a pre-existing snapshot. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: 'spec defines the desired characteristics of a snapshot requested + by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots + Required.' + properties: + source: + description: source specifies where a snapshot will be created from. + This field is immutable after creation. Required. + properties: + persistentVolumeClaimName: + description: persistentVolumeClaimName specifies the name of the + PersistentVolumeClaim object representing the volume from which + a snapshot should be created. This PVC is assumed to be in the + same namespace as the VolumeSnapshot object. This field should + be set if the snapshot does not exists, and needs to be created. + This field is immutable. + type: string + volumeSnapshotContentName: + description: volumeSnapshotContentName specifies the name of a + pre-existing VolumeSnapshotContent object representing an existing + volume snapshot. This field should be set if the snapshot already + exists and only needs a representation in Kubernetes. This field + is immutable. + type: string + type: object + oneOf: + - required: ["persistentVolumeClaimName"] + - required: ["volumeSnapshotContentName"] + volumeSnapshotClassName: + description: 'VolumeSnapshotClassName is the name of the VolumeSnapshotClass + requested by the VolumeSnapshot. VolumeSnapshotClassName may be + left nil to indicate that the default SnapshotClass should be used. + A given cluster may have multiple default Volume SnapshotClasses: + one default per CSI Driver. If a VolumeSnapshot does not specify + a SnapshotClass, VolumeSnapshotSource will be checked to figure + out what the associated CSI Driver is, and the default VolumeSnapshotClass + associated with that CSI Driver will be used. If more than one VolumeSnapshotClass + exist for a given CSI Driver and more than one have been marked + as default, CreateSnapshot will fail and generate an event. Empty + string is not allowed for this field.' + type: string + required: + - source + type: object + status: + description: status represents the current information of a snapshot. + Consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent + objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent + point at each other) before using this object. + properties: + boundVolumeSnapshotContentName: + description: 'boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent + object to which this VolumeSnapshot object intends to bind to. If + not specified, it indicates that the VolumeSnapshot object has not + been successfully bound to a VolumeSnapshotContent object yet. NOTE: + To avoid possible security issues, consumers must verify binding + between VolumeSnapshot and VolumeSnapshotContent objects is successful + (by validating that both VolumeSnapshot and VolumeSnapshotContent + point at each other) before using this object.' + type: string + creationTime: + description: creationTime is the timestamp when the point-in-time + snapshot is taken by the underlying storage system. In dynamic snapshot + creation case, this field will be filled in by the snapshot controller + with the "creation_time" value returned from CSI "CreateSnapshot" + gRPC call. For a pre-existing snapshot, this field will be filled + with the "creation_time" value returned from the CSI "ListSnapshots" + gRPC call if the driver supports it. If not specified, it may indicate + that the creation time of the snapshot is unknown. + format: date-time + type: string + error: + description: error is the last observed error during snapshot creation, + if any. This field could be helpful to upper level controllers(i.e., + application controller) to decide whether they should continue on + waiting for the snapshot to be created based on the type of error + reported. The snapshot controller will keep retrying when an error + occurs during the snapshot creation. Upon success, this error field + will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error + during snapshot creation if specified. NOTE: message may be + logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if the snapshot is ready to be used + to restore a volume. In dynamic snapshot creation case, this field + will be filled in by the snapshot controller with the "ready_to_use" + value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing + snapshot, this field will be filled with the "ready_to_use" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it, otherwise, this field will be set to "True". If not specified, + it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + type: string + description: restoreSize represents the minimum size of volume required + to create a volume from this snapshot. In dynamic snapshot creation + case, this field will be filled in by the snapshot controller with + the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. + For a pre-existing snapshot, this field will be filled with the + "size_bytes" value returned from the CSI "ListSnapshots" gRPC call + if the driver supports it. When restoring a volume from this snapshot, + the size of the volume MUST NOT be smaller than the restoreSize + if it is specified, otherwise the restoration will fail. If not + specified, it indicates that the size is unknown. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: If a new snapshot needs to be created, this contains the name of the source PVC from which this snapshot was (or will be) created. + jsonPath: .spec.source.persistentVolumeClaimName + name: SourcePVC + type: string + - description: If a snapshot already exists, this contains the name of the existing VolumeSnapshotContent object representing the existing snapshot. + jsonPath: .spec.source.volumeSnapshotContentName + name: SourceSnapshotContent + type: string + - description: Represents the minimum size of volume required to rehydrate from this snapshot. + jsonPath: .status.restoreSize + name: RestoreSize + type: string + - description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot. + jsonPath: .spec.volumeSnapshotClassName + name: SnapshotClass + type: string + - description: Name of the VolumeSnapshotContent object to which the VolumeSnapshot object intends to bind to. Please note that verification of binding actually requires checking both VolumeSnapshot and VolumeSnapshotContent to ensure both are pointing at each other. Binding MUST be verified prior to usage of this object. + jsonPath: .status.boundVolumeSnapshotContentName + name: SnapshotContent + type: string + - description: Timestamp when the point-in-time snapshot was taken by the underlying storage system. + jsonPath: .status.creationTime + name: CreationTime + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshot is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshot" + schema: + openAPIV3Schema: + description: VolumeSnapshot is a user's request for either creating a point-in-time snapshot of a persistent volume, or binding to a pre-existing snapshot. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: 'spec defines the desired characteristics of a snapshot requested by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots Required.' + properties: + source: + description: source specifies where a snapshot will be created from. This field is immutable after creation. Required. + properties: + persistentVolumeClaimName: + description: persistentVolumeClaimName specifies the name of the PersistentVolumeClaim object representing the volume from which a snapshot should be created. This PVC is assumed to be in the same namespace as the VolumeSnapshot object. This field should be set if the snapshot does not exists, and needs to be created. This field is immutable. + type: string + volumeSnapshotContentName: + description: volumeSnapshotContentName specifies the name of a pre-existing VolumeSnapshotContent object representing an existing volume snapshot. This field should be set if the snapshot already exists and only needs a representation in Kubernetes. This field is immutable. + type: string + type: object + volumeSnapshotClassName: + description: 'VolumeSnapshotClassName is the name of the VolumeSnapshotClass requested by the VolumeSnapshot. VolumeSnapshotClassName may be left nil to indicate that the default SnapshotClass should be used. A given cluster may have multiple default Volume SnapshotClasses: one default per CSI Driver. If a VolumeSnapshot does not specify a SnapshotClass, VolumeSnapshotSource will be checked to figure out what the associated CSI Driver is, and the default VolumeSnapshotClass associated with that CSI Driver will be used. If more than one VolumeSnapshotClass exist for a given CSI Driver and more than one have been marked as default, CreateSnapshot will fail and generate an event. Empty string is not allowed for this field.' + type: string + required: + - source + type: object + status: + description: status represents the current information of a snapshot. Consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object. + properties: + boundVolumeSnapshotContentName: + description: 'boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent object to which this VolumeSnapshot object intends to bind to. If not specified, it indicates that the VolumeSnapshot object has not been successfully bound to a VolumeSnapshotContent object yet. NOTE: To avoid possible security issues, consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object.' + type: string + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it may indicate that the creation time of the snapshot is unknown. + format: date-time + type: string + error: + description: error is the last observed error during snapshot creation, if any. This field could be helpful to upper level controllers(i.e., application controller) to decide whether they should continue on waiting for the snapshot to be created based on the type of error reported. The snapshot controller will keep retrying when an error occurs during the snapshot creation. Upon success, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if the snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + type: string + description: restoreSize represents the minimum size of volume required to create a volume from this snapshot. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + required: + - spec + type: object + served: false + storage: false + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/665" + creationTimestamp: null + name: volumesnapshotclasses.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotClass + listKind: VolumeSnapshotClassList + plural: volumesnapshotclasses + shortNames: + - vsclass + - vsclasses + singular: volumesnapshotclass + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .driver + name: Driver + type: string + - description: Determines whether a VolumeSnapshotContent created through the + VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .deletionPolicy + name: DeletionPolicy + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshotClass specifies parameters that a underlying storage + system uses when creating a volume snapshot. A specific VolumeSnapshotClass + is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses + are non-namespaced + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + deletionPolicy: + description: deletionPolicy determines whether a VolumeSnapshotContent + created through the VolumeSnapshotClass should be deleted when its bound + VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". + "Retain" means that the VolumeSnapshotContent and its physical snapshot + on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent + and its physical snapshot on underlying storage system are deleted. + Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the storage driver that handles this + VolumeSnapshotClass. Required. + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + parameters: + additionalProperties: + type: string + description: parameters is a key-value map with storage driver specific + parameters for creating snapshots. These values are opaque to Kubernetes. + type: object + required: + - deletionPolicy + - driver + type: object + served: true + storage: true + subresources: {} + - additionalPrinterColumns: + - jsonPath: .driver + name: Driver + type: string + - description: Determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .deletionPolicy + name: DeletionPolicy + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshotClass is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshotClass" + schema: + openAPIV3Schema: + description: VolumeSnapshotClass specifies parameters that a underlying storage system uses when creating a volume snapshot. A specific VolumeSnapshotClass is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses are non-namespaced + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + deletionPolicy: + description: deletionPolicy determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the storage driver that handles this VolumeSnapshotClass. Required. + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + parameters: + additionalProperties: + type: string + description: parameters is a key-value map with storage driver specific parameters for creating snapshots. These values are opaque to Kubernetes. + type: object + required: + - deletionPolicy + - driver + type: object + served: false + storage: false + subresources: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/665" + creationTimestamp: null + name: volumesnapshotcontents.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotContent + listKind: VolumeSnapshotContentList + plural: volumesnapshotcontents + shortNames: + - vsc + - vscs + singular: volumesnapshotcontent + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: Represents the complete size of the snapshot in bytes + jsonPath: .status.restoreSize + name: RestoreSize + type: integer + - description: Determines whether this VolumeSnapshotContent and its physical + snapshot on the underlying storage system should be deleted when its bound + VolumeSnapshot is deleted. + jsonPath: .spec.deletionPolicy + name: DeletionPolicy + type: string + - description: Name of the CSI driver used to create the physical snapshot on + the underlying storage system. + jsonPath: .spec.driver + name: Driver + type: string + - description: Name of the VolumeSnapshotClass to which this snapshot belongs. + jsonPath: .spec.volumeSnapshotClassName + name: VolumeSnapshotClass + type: string + - description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent + object is bound. + jsonPath: .spec.volumeSnapshotRef.name + name: VolumeSnapshot + type: string + - description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.namespace + name: VolumeSnapshotNamespace + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshotContent represents the actual "on-disk" snapshot + object in the underlying storage system + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: spec defines properties of a VolumeSnapshotContent created + by the underlying storage system. Required. + properties: + deletionPolicy: + description: deletionPolicy determines whether this VolumeSnapshotContent + and its physical snapshot on the underlying storage system should + be deleted when its bound VolumeSnapshot is deleted. Supported values + are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent + and its physical snapshot on underlying storage system are kept. + "Delete" means that the VolumeSnapshotContent and its physical snapshot + on underlying storage system are deleted. For dynamically provisioned + snapshots, this field will automatically be filled in by the CSI + snapshotter sidecar with the "DeletionPolicy" field defined in the + corresponding VolumeSnapshotClass. For pre-existing snapshots, users + MUST specify this field when creating the VolumeSnapshotContent + object. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the CSI driver used to create the + physical snapshot on the underlying storage system. This MUST be + the same as the name returned by the CSI GetPluginName() call for + that driver. Required. + type: string + source: + description: source specifies whether the snapshot is (or should be) + dynamically provisioned or already exists, and just requires a Kubernetes + object representation. This field is immutable after creation. Required. + properties: + snapshotHandle: + description: snapshotHandle specifies the CSI "snapshot_id" of + a pre-existing snapshot on the underlying storage system for + which a Kubernetes object representation was (or should be) + created. This field is immutable. + type: string + volumeHandle: + description: volumeHandle specifies the CSI "volume_id" of the + volume from which a snapshot should be dynamically taken from. + This field is immutable. + type: string + type: object + oneOf: + - required: ["snapshotHandle"] + - required: ["volumeHandle"] + sourceVolumeMode: + description: SourceVolumeMode is the mode of the volume whose snapshot + is taken. Can be either “Filesystem” or “Block”. If not specified, + it indicates the source volume's mode is unknown. This field is + immutable. This field is an alpha field. + type: string + volumeSnapshotClassName: + description: name of the VolumeSnapshotClass from which this snapshot + was (or will be) created. Note that after provisioning, the VolumeSnapshotClass + may be deleted or recreated with different set of values, and as + such, should not be referenced post-snapshot creation. + type: string + volumeSnapshotRef: + description: volumeSnapshotRef specifies the VolumeSnapshot object + to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName + field must reference to this VolumeSnapshotContent's name for the + bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent + object, name and namespace of the VolumeSnapshot object MUST be + provided for binding to happen. This field is immutable after creation. + Required. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + required: + - deletionPolicy + - driver + - source + - volumeSnapshotRef + type: object + status: + description: status represents the current information of a snapshot. + properties: + creationTime: + description: creationTime is the timestamp when the point-in-time + snapshot is taken by the underlying storage system. In dynamic snapshot + creation case, this field will be filled in by the CSI snapshotter + sidecar with the "creation_time" value returned from CSI "CreateSnapshot" + gRPC call. For a pre-existing snapshot, this field will be filled + with the "creation_time" value returned from the CSI "ListSnapshots" + gRPC call if the driver supports it. If not specified, it indicates + the creation time is unknown. The format of this field is a Unix + nanoseconds time encoded as an int64. On Unix, the command `date + +%s%N` returns the current time in nanoseconds since 1970-01-01 + 00:00:00 UTC. + format: int64 + type: integer + error: + description: error is the last observed error during snapshot creation, + if any. Upon success after retry, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error + during snapshot creation if specified. NOTE: message may be + logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used + to restore a volume. In dynamic snapshot creation case, this field + will be filled in by the CSI snapshotter sidecar with the "ready_to_use" + value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing + snapshot, this field will be filled with the "ready_to_use" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it, otherwise, this field will be set to "True". If not specified, + it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + description: restoreSize represents the complete size of the snapshot + in bytes. In dynamic snapshot creation case, this field will be + filled in by the CSI snapshotter sidecar with the "size_bytes" value + returned from CSI "CreateSnapshot" gRPC call. For a pre-existing + snapshot, this field will be filled with the "size_bytes" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it. When restoring a volume from this snapshot, the size of the + volume MUST NOT be smaller than the restoreSize if it is specified, + otherwise the restoration will fail. If not specified, it indicates + that the size is unknown. + format: int64 + minimum: 0 + type: integer + snapshotHandle: + description: snapshotHandle is the CSI "snapshot_id" of a snapshot + on the underlying storage system. If not specified, it indicates + that dynamic snapshot creation has either failed or it is still + in progress. + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: Represents the complete size of the snapshot in bytes + jsonPath: .status.restoreSize + name: RestoreSize + type: integer + - description: Determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .spec.deletionPolicy + name: DeletionPolicy + type: string + - description: Name of the CSI driver used to create the physical snapshot on the underlying storage system. + jsonPath: .spec.driver + name: Driver + type: string + - description: Name of the VolumeSnapshotClass to which this snapshot belongs. + jsonPath: .spec.volumeSnapshotClassName + name: VolumeSnapshotClass + type: string + - description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.name + name: VolumeSnapshot + type: string + - description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.namespace + name: VolumeSnapshotNamespace + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshotContent is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshotContent" + schema: + openAPIV3Schema: + description: VolumeSnapshotContent represents the actual "on-disk" snapshot object in the underlying storage system + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: spec defines properties of a VolumeSnapshotContent created by the underlying storage system. Required. + properties: + deletionPolicy: + description: deletionPolicy determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. For dynamically provisioned snapshots, this field will automatically be filled in by the CSI snapshotter sidecar with the "DeletionPolicy" field defined in the corresponding VolumeSnapshotClass. For pre-existing snapshots, users MUST specify this field when creating the VolumeSnapshotContent object. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the CSI driver used to create the physical snapshot on the underlying storage system. This MUST be the same as the name returned by the CSI GetPluginName() call for that driver. Required. + type: string + source: + description: source specifies whether the snapshot is (or should be) dynamically provisioned or already exists, and just requires a Kubernetes object representation. This field is immutable after creation. Required. + properties: + snapshotHandle: + description: snapshotHandle specifies the CSI "snapshot_id" of a pre-existing snapshot on the underlying storage system for which a Kubernetes object representation was (or should be) created. This field is immutable. + type: string + volumeHandle: + description: volumeHandle specifies the CSI "volume_id" of the volume from which a snapshot should be dynamically taken from. This field is immutable. + type: string + type: object + volumeSnapshotClassName: + description: name of the VolumeSnapshotClass from which this snapshot was (or will be) created. Note that after provisioning, the VolumeSnapshotClass may be deleted or recreated with different set of values, and as such, should not be referenced post-snapshot creation. + type: string + volumeSnapshotRef: + description: volumeSnapshotRef specifies the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName field must reference to this VolumeSnapshotContent's name for the bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent object, name and namespace of the VolumeSnapshot object MUST be provided for binding to happen. This field is immutable after creation. Required. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + required: + - deletionPolicy + - driver + - source + - volumeSnapshotRef + type: object + status: + description: status represents the current information of a snapshot. + properties: + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it indicates the creation time is unknown. The format of this field is a Unix nanoseconds time encoded as an int64. On Unix, the command `date +%s%N` returns the current time in nanoseconds since 1970-01-01 00:00:00 UTC. + format: int64 + type: integer + error: + description: error is the last observed error during snapshot creation, if any. Upon success after retry, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + description: restoreSize represents the complete size of the snapshot in bytes. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown. + format: int64 + minimum: 0 + type: integer + snapshotHandle: + description: snapshotHandle is the CSI "snapshot_id" of a snapshot on the underlying storage system. If not specified, it indicates that dynamic snapshot creation has either failed or it is still in progress. + type: string + type: object + required: + - spec + type: object + served: false + storage: false + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +{{- end -}} diff --git a/charts/v1.28.4/azuredisk-csi-driver/templates/csi-azuredisk-controller.yaml b/charts/v1.28.4/azuredisk-csi-driver/templates/csi-azuredisk-controller.yaml new file mode 100644 index 000000000..517b8233f --- /dev/null +++ b/charts/v1.28.4/azuredisk-csi-driver/templates/csi-azuredisk-controller.yaml @@ -0,0 +1,281 @@ +kind: Deployment +apiVersion: apps/v1 +metadata: + name: {{ .Values.controller.name }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- with .Values.controller.labels }} +{{ . | toYaml | indent 4 }} +{{- end }} +{{- with .Values.controller.annotations }} + annotations: +{{ . | toYaml | indent 4 }} +{{- end }} +spec: + replicas: {{ .Values.controller.replicas }} + selector: + matchLabels: + app: {{ .Values.controller.name }} + template: + metadata: +{{ include "azuredisk.labels" . | indent 6 }} + app: {{ .Values.controller.name }} + {{- if .Values.workloadIdentity.clientID }} + azure.workload.identity/use: "true" + {{- end }} +{{- with .Values.controller.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.controller.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + hostNetwork: {{ .Values.controller.hostNetwork }} + serviceAccountName: {{ .Values.serviceAccount.controller }} + nodeSelector: + kubernetes.io/os: linux +{{- with .Values.controller.nodeSelector }} +{{ toYaml . | indent 8 }} +{{- end }} + {{- if .Values.controller.runOnMaster}} + node-role.kubernetes.io/master: "" + {{- end}} + {{- if .Values.controller.runOnControlPlane}} + node-role.kubernetes.io/control-plane: "" + {{- end}} + priorityClassName: system-cluster-critical + securityContext: + seccompProfile: + type: RuntimeDefault +{{- with .Values.controller.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.controller.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} + {{- include "azuredisk.pullSecrets" . | indent 6 }} + containers: + - name: csi-provisioner +{{- if hasPrefix "/" .Values.image.csiProvisioner.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.csiProvisioner.repository }}:{{ .Values.image.csiProvisioner.tag }}" +{{- else }} + image: "{{ .Values.image.csiProvisioner.repository }}:{{ .Values.image.csiProvisioner.tag }}" +{{- end }} + args: + - "--feature-gates=Topology=true" + - "--csi-address=$(ADDRESS)" + - "--v=2" + - "--timeout=30s" + - "--leader-election" + - "--leader-election-namespace={{ .Release.Namespace }}" + - "--worker-threads={{ .Values.controller.provisionerWorkerThreads }}" + - "--extra-create-metadata=true" + - "--strict-topology=true" + - "--kube-api-qps=50" + - "--kube-api-burst=100" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + resources: {{- toYaml .Values.controller.resources.csiProvisioner | nindent 12 }} + - name: csi-attacher +{{- if hasPrefix "/" .Values.image.csiAttacher.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.csiAttacher.repository }}:{{ .Values.image.csiAttacher.tag }}" +{{- else }} + image: "{{ .Values.image.csiAttacher.repository }}:{{ .Values.image.csiAttacher.tag }}" +{{- end }} + args: + - "-v=2" + - "-csi-address=$(ADDRESS)" + - "-timeout=1200s" + - "-leader-election" + - "--leader-election-namespace={{ .Release.Namespace }}" + - "-worker-threads={{ .Values.controller.attacherWorkerThreads }}" + - "-kube-api-qps=200" + - "-kube-api-burst=400" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + resources: {{- toYaml .Values.controller.resources.csiAttacher | nindent 12 }} + - name: csi-snapshotter +{{- if hasPrefix "/" .Values.snapshot.image.csiSnapshotter.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.snapshot.image.csiSnapshotter.repository }}:{{ .Values.snapshot.image.csiSnapshotter.tag }}" +{{- else }} + image: "{{ .Values.snapshot.image.csiSnapshotter.repository }}:{{ .Values.snapshot.image.csiSnapshotter.tag }}" +{{- end }} + args: + - "-csi-address=$(ADDRESS)" + - "-leader-election" + - "--leader-election-namespace={{ .Release.Namespace }}" + - "-v=2" + - "--timeout=1200s" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: {{- toYaml .Values.controller.resources.csiSnapshotter | nindent 12 }} + - name: csi-resizer +{{- if hasPrefix "/" .Values.image.csiResizer.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.csiResizer.repository }}:{{ .Values.image.csiResizer.tag }}" +{{- else }} + image: "{{ .Values.image.csiResizer.repository }}:{{ .Values.image.csiResizer.tag }}" +{{- end }} + args: + - "-csi-address=$(ADDRESS)" + - "-v=2" + - "-leader-election" + - "--leader-election-namespace={{ .Release.Namespace }}" + - '-handle-volume-inuse-error=false' + - '-feature-gates=RecoverVolumeExpansionFailure=true' + - "-timeout=240s" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: {{- toYaml .Values.controller.resources.csiResizer | nindent 12 }} + - name: liveness-probe +{{- if hasPrefix "/" .Values.image.livenessProbe.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- else }} + image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- end }} + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --health-port={{ .Values.controller.livenessProbe.healthPort }} + - --v=2 + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: {{- toYaml .Values.controller.resources.livenessProbe | nindent 12 }} +{{- if eq .Values.controller.enableTrafficManager true }} + - image: mcr.microsoft.com/aks/ccp/ccp-auto-thrust:master.221118.2 + imagePullPolicy: IfNotPresent + name: proxy + command: + - /ccp-auto-thrust + args: + - "--port={{ .Values.controller.trafficManagerPort }}" + ports: + - containerPort: {{ .Values.controller.trafficManagerPort }} + protocol: TCP +{{- end }} + - name: azuredisk +{{- if hasPrefix "/" .Values.image.azuredisk.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- else }} + image: "{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- end }} + args: + - "--v={{ .Values.controller.logLevel }}" + - "--endpoint=$(CSI_ENDPOINT)" + - "--metrics-address=0.0.0.0:{{ .Values.controller.metricsPort }}" + - "--disable-avset-nodes={{ .Values.controller.disableAvailabilitySetNodes }}" + - "--vm-type={{ .Values.controller.vmType }}" + - "--drivername={{ .Values.driver.name }}" + - "--cloud-config-secret-name={{ .Values.controller.cloudConfigSecretName }}" + - "--cloud-config-secret-namespace={{ .Values.controller.cloudConfigSecretNamespace }}" + - "--custom-user-agent={{ .Values.driver.customUserAgent }}" + - "--user-agent-suffix={{ .Values.driver.userAgentSuffix }}" + - "--allow-empty-cloud-config={{ .Values.controller.allowEmptyCloudConfig }}" + - "--vmss-cache-ttl-seconds={{ .Values.controller.vmssCacheTTLInSeconds }}" + - "--enable-traffic-manager={{ .Values.controller.enableTrafficManager }}" + - "--traffic-manager-port={{ .Values.controller.trafficManagerPort }}" + ports: + - containerPort: {{ .Values.controller.livenessProbe.healthPort }} + name: healthz + protocol: TCP + - containerPort: {{ .Values.controller.metricsPort }} + name: metrics + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path + optional: true + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + {{- if ne .Values.driver.httpsProxy "" }} + - name: HTTPS_PROXY + value: {{ .Values.driver.httpsProxy }} + {{- end }} + {{- if ne .Values.driver.httpProxy "" }} + - name: HTTP_PROXY + value: {{ .Values.driver.httpProxy }} + {{- end }} + {{- if ne .Values.driver.noProxy "" }} + - name: NO_PROXY + value: {{ .Values.driver.noProxy }} + {{- end }} + - name: AZURE_GO_SDK_LOG_LEVEL + value: {{ .Values.driver.azureGoSDKLogLevel }} + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: AZURE_ENVIRONMENT_FILEPATH + value: /etc/kubernetes/azurestackcloud.json + {{- end }} + imagePullPolicy: {{ .Values.image.azuredisk.pullPolicy }} + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /etc/kubernetes/ + name: azure-cred + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: ssl + mountPath: /etc/ssl/certs + readOnly: true + {{- end }} + {{- if eq .Values.linux.distro "fedora" }} + - name: ssl + mountPath: /etc/ssl/certs + readOnly: true + - name: ssl-pki + mountPath: /etc/pki/ca-trust/extracted + readOnly: true + {{- end }} + resources: {{- toYaml .Values.controller.resources.azuredisk | nindent 12 }} + volumes: + - name: socket-dir + emptyDir: {} + - name: azure-cred + hostPath: + path: /etc/kubernetes/ + type: DirectoryOrCreate + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: ssl + hostPath: + path: /etc/ssl/certs + {{- end }} + {{- if eq .Values.linux.distro "fedora" }} + - name: ssl + hostPath: + path: /etc/ssl/certs + - name: ssl-pki + hostPath: + path: /etc/pki/ca-trust/extracted + {{- end }} diff --git a/charts/v1.28.4/azuredisk-csi-driver/templates/csi-azuredisk-driver.yaml b/charts/v1.28.4/azuredisk-csi-driver/templates/csi-azuredisk-driver.yaml new file mode 100644 index 000000000..21f941748 --- /dev/null +++ b/charts/v1.28.4/azuredisk-csi-driver/templates/csi-azuredisk-driver.yaml @@ -0,0 +1,13 @@ +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: {{ .Values.driver.name }} + annotations: + csiDriver: "{{ .Values.image.azuredisk.tag }}" + snapshot: "{{ .Values.snapshot.image.csiSnapshotter.tag }}" +spec: + attachRequired: true + podInfoOnMount: false + {{- if .Values.feature.enableFSGroupPolicy}} + fsGroupPolicy: File + {{- end}} diff --git a/charts/v1.28.4/azuredisk-csi-driver/templates/csi-azuredisk-node-windows-hostprocess.yaml b/charts/v1.28.4/azuredisk-csi-driver/templates/csi-azuredisk-node-windows-hostprocess.yaml new file mode 100644 index 000000000..24de36157 --- /dev/null +++ b/charts/v1.28.4/azuredisk-csi-driver/templates/csi-azuredisk-node-windows-hostprocess.yaml @@ -0,0 +1,154 @@ +{{- if and (.Values.windows.enabled) (eq .Values.windows.useHostProcessContainers true) }} +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: {{ .Values.windows.dsName }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- with .Values.windows.labels }} +{{ . | toYaml | indent 4 }} +{{- end }} +{{- with .Values.windows.annotations }} + annotations: +{{ . | toYaml | indent 4 }} +{{- end }} +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: {{ .Values.node.maxUnavailable }} + type: RollingUpdate + selector: + matchLabels: + app: {{ .Values.windows.dsName }} + template: + metadata: +{{ include "azuredisk.labels" . | indent 6 }} + app: {{ .Values.windows.dsName }} +{{- with .Values.windows.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.windows.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + serviceAccountName: {{ .Values.serviceAccount.node }} +{{- with .Values.windows.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} + nodeSelector: + kubernetes.io/os: windows +{{- with .Values.windows.nodeSelector }} +{{ toYaml . | indent 8 }} +{{- end }} + affinity: +{{- with .Values.windows.affinity }} +{{ toYaml . | indent 8 }} +{{- end }} + nodeAffinity: +{{ toYaml .Values.windows.nodeAffinity | indent 10 }} + priorityClassName: system-node-critical + {{- include "azuredisk.pullSecrets" . | indent 6 }} + securityContext: + seccompProfile: + type: RuntimeDefault + windowsOptions: + hostProcess: true + runAsUserName: "NT AUTHORITY\\SYSTEM" + hostNetwork: true + initContainers: + - name: init +{{- if hasPrefix "/" .Values.image.azuredisk.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}-windows-hp" +{{- else }} + image: "{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}-windows-hp" +{{- end }} + imagePullPolicy: {{ .Values.image.azuredisk.pullPolicy }} + command: + - "powershell.exe" + - "-c" + - "New-Item -ItemType Directory -Path C:\\var\\lib\\kubelet\\plugins\\{{ .Values.driver.name }}\\ -Force" + containers: + - name: node-driver-registrar +{{- if hasPrefix "/" .Values.image.nodeDriverRegistrar.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- else }} + image: "{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- end }} + command: + - "csi-node-driver-registrar.exe" + args: + - "--csi-address=$(CSI_ENDPOINT)" + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" + - "--plugin-registration-path=$(PLUGIN_REG_DIR)" + - "--v=2" + env: + - name: CSI_ENDPOINT + value: unix://{{ .Values.windows.kubelet }}\plugins\{{ .Values.driver.name }}\csi.sock + - name: DRIVER_REG_SOCK_PATH + value: C:\\var\\lib\\kubelet\\plugins\\{{ .Values.driver.name }}\\csi.sock + - name: PLUGIN_REG_DIR + value: C:\\var\\lib\\kubelet\\plugins_registry\\ + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + imagePullPolicy: {{ .Values.image.nodeDriverRegistrar.pullPolicy }} + resources: {{- toYaml .Values.windows.resources.nodeDriverRegistrar | nindent 12 }} + - name: azuredisk +{{- if hasPrefix "/" .Values.image.azuredisk.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}-windows-hp" +{{- else }} + image: "{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}-windows-hp" +{{- end }} + command: + - "azurediskplugin.exe" + args: + - "--v={{ .Values.node.logLevel }}" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--drivername={{ .Values.driver.name }}" + - "--volume-attach-limit={{ .Values.driver.volumeAttachLimit }}" + - "--cloud-config-secret-name={{ .Values.node.cloudConfigSecretName }}" + - "--cloud-config-secret-namespace={{ .Values.node.cloudConfigSecretNamespace }}" + - "--custom-user-agent={{ .Values.driver.customUserAgent }}" + - "--user-agent-suffix={{ .Values.driver.userAgentSuffix }}" + - "--allow-empty-cloud-config={{ .Values.node.allowEmptyCloudConfig }}" + - "--support-zone={{ .Values.node.supportZone }}" + - "--enable-windows-host-process=true" + - "--get-nodeid-from-imds={{ .Values.node.getNodeIDFromIMDS }}" + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path-windows + optional: true + - name: CSI_ENDPOINT + value: unix://C:\\var\\lib\\kubelet\\plugins\\{{ .Values.driver.name }}\\csi.sock + {{- if ne .Values.driver.httpsProxy "" }} + - name: HTTPS_PROXY + value: {{ .Values.driver.httpsProxy }} + {{- end }} + {{- if ne .Values.driver.httpProxy "" }} + - name: HTTP_PROXY + value: {{ .Values.driver.httpProxy }} + {{- end }} + {{- if ne .Values.driver.noProxy "" }} + - name: NO_PROXY + value: {{ .Values.driver.noProxy }} + {{- end }} + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: AZURE_GO_SDK_LOG_LEVEL + value: {{ .Values.driver.azureGoSDKLogLevel }} + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: AZURE_ENVIRONMENT_FILEPATH + value: C:\k\azurestackcloud.json + {{- end }} + imagePullPolicy: {{ .Values.image.azuredisk.pullPolicy }} +{{- end -}} diff --git a/charts/v1.28.4/azuredisk-csi-driver/templates/csi-azuredisk-node-windows.yaml b/charts/v1.28.4/azuredisk-csi-driver/templates/csi-azuredisk-node-windows.yaml new file mode 100644 index 000000000..260a68c95 --- /dev/null +++ b/charts/v1.28.4/azuredisk-csi-driver/templates/csi-azuredisk-node-windows.yaml @@ -0,0 +1,238 @@ +{{- if and (.Values.windows.enabled) (ne .Values.windows.useHostProcessContainers true) }} +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: {{ .Values.windows.dsName }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- with .Values.windows.labels }} +{{ . | toYaml | indent 4 }} +{{- end }} +{{- with .Values.windows.annotations }} + annotations: +{{ . | toYaml | indent 4 }} +{{- end }} +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: {{ .Values.node.maxUnavailable }} + type: RollingUpdate + selector: + matchLabels: + app: {{ .Values.windows.dsName }} + template: + metadata: +{{ include "azuredisk.labels" . | indent 6 }} + app: {{ .Values.windows.dsName }} +{{- with .Values.windows.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.windows.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + serviceAccountName: {{ .Values.serviceAccount.node }} +{{- with .Values.windows.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} + nodeSelector: + kubernetes.io/os: windows +{{- with .Values.windows.nodeSelector }} +{{ toYaml . | indent 8 }} +{{- end }} + affinity: +{{- with .Values.windows.affinity }} +{{ toYaml . | indent 8 }} +{{- end }} + nodeAffinity: +{{ toYaml .Values.windows.nodeAffinity | indent 10 }} + priorityClassName: system-node-critical + securityContext: + seccompProfile: + type: RuntimeDefault + {{- include "azuredisk.pullSecrets" . | indent 6 }} + containers: + - name: liveness-probe + volumeMounts: + - mountPath: C:\csi + name: plugin-dir +{{- if hasPrefix "/" .Values.image.livenessProbe.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- else }} + image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- end }} + args: + - "--csi-address=$(CSI_ENDPOINT)" + - "--probe-timeout=3s" + - "--health-port={{ .Values.node.livenessProbe.healthPort }}" + - "--v=2" + env: + - name: CSI_ENDPOINT + value: unix://C:\\csi\\csi.sock + imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }} + resources: {{- toYaml .Values.windows.resources.livenessProbe | nindent 12 }} + - name: node-driver-registrar +{{- if hasPrefix "/" .Values.image.nodeDriverRegistrar.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- else }} + image: "{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- end }} + args: + - "--v=2" + - "--csi-address=$(CSI_ENDPOINT)" + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" +{{- if .Values.windows.enableRegistrationProbe }} + livenessProbe: + exec: + command: + - /csi-node-driver-registrar.exe + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --mode=kubelet-registration-probe + initialDelaySeconds: 60 + timeoutSeconds: 30 +{{- end }} + env: + - name: CSI_ENDPOINT + value: unix://C:\\csi\\csi.sock + - name: DRIVER_REG_SOCK_PATH + value: C:\\var\\lib\\kubelet\\plugins\\{{ .Values.driver.name }}\\csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + imagePullPolicy: {{ .Values.image.nodeDriverRegistrar.pullPolicy }} + volumeMounts: + - name: kubelet-dir + mountPath: "C:\\var\\lib\\kubelet" + - name: plugin-dir + mountPath: C:\csi + - name: registration-dir + mountPath: C:\registration + resources: {{- toYaml .Values.windows.resources.nodeDriverRegistrar | nindent 12 }} + - name: azuredisk +{{- if hasPrefix "/" .Values.image.azuredisk.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- else }} + image: "{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- end }} + args: + - "--v={{ .Values.node.logLevel }}" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--drivername={{ .Values.driver.name }}" + - "--volume-attach-limit={{ .Values.driver.volumeAttachLimit }}" + - "--cloud-config-secret-name={{ .Values.node.cloudConfigSecretName }}" + - "--cloud-config-secret-namespace={{ .Values.node.cloudConfigSecretNamespace }}" + - "--custom-user-agent={{ .Values.driver.customUserAgent }}" + - "--user-agent-suffix={{ .Values.driver.userAgentSuffix }}" + - "--allow-empty-cloud-config={{ .Values.node.allowEmptyCloudConfig }}" + - "--support-zone={{ .Values.node.supportZone }}" + - "--get-node-info-from-labels={{ .Values.windows.getNodeInfoFromLabels }}" + - "--get-nodeid-from-imds={{ .Values.node.getNodeIDFromIMDS }}" + ports: + - containerPort: {{ .Values.node.livenessProbe.healthPort }} + name: healthz + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path-windows + optional: true + - name: CSI_ENDPOINT + value: unix://C:\\csi\\csi.sock + {{- if ne .Values.driver.httpsProxy "" }} + - name: HTTPS_PROXY + value: {{ .Values.driver.httpsProxy }} + {{- end }} + {{- if ne .Values.driver.httpProxy "" }} + - name: HTTP_PROXY + value: {{ .Values.driver.httpProxy }} + {{- end }} + {{- if ne .Values.driver.noProxy "" }} + - name: NO_PROXY + value: {{ .Values.driver.noProxy }} + {{- end }} + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: AZURE_GO_SDK_LOG_LEVEL + value: {{ .Values.driver.azureGoSDKLogLevel }} + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: AZURE_ENVIRONMENT_FILEPATH + value: C:\k\azurestackcloud.json + {{- end }} + imagePullPolicy: {{ .Values.image.azuredisk.pullPolicy }} + volumeMounts: + - name: kubelet-dir + mountPath: "C:\\var\\lib\\kubelet" + - name: plugin-dir + mountPath: C:\csi + - name: azure-config + mountPath: C:\k + - name: csi-proxy-fs-pipe-v1 + mountPath: \\.\pipe\csi-proxy-filesystem-v1 + - name: csi-proxy-disk-pipe-v1 + mountPath: \\.\pipe\csi-proxy-disk-v1 + - name: csi-proxy-volume-pipe-v1 + mountPath: \\.\pipe\csi-proxy-volume-v1 + # these paths are still included for compatibility, they're used + # only if the node has still the beta version of the CSI proxy + - name: csi-proxy-fs-pipe-v1beta1 + mountPath: \\.\pipe\csi-proxy-filesystem-v1beta1 + - name: csi-proxy-disk-pipe-v1beta2 + mountPath: \\.\pipe\csi-proxy-disk-v1beta2 + - name: csi-proxy-volume-pipe-v1beta2 + mountPath: \\.\pipe\csi-proxy-volume-v1beta2 + resources: {{- toYaml .Values.windows.resources.azuredisk | nindent 12 }} + volumes: + - name: csi-proxy-fs-pipe-v1 + hostPath: + path: \\.\pipe\csi-proxy-filesystem-v1 + - name: csi-proxy-disk-pipe-v1 + hostPath: + path: \\.\pipe\csi-proxy-disk-v1 + - name: csi-proxy-volume-pipe-v1 + hostPath: + path: \\.\pipe\csi-proxy-volume-v1 + # these paths are still included for compatibility, they're used + # only if the node has still the beta version of the CSI proxy + - name: csi-proxy-fs-pipe-v1beta1 + hostPath: + path: \\.\pipe\csi-proxy-filesystem-v1beta1 + - name: csi-proxy-disk-pipe-v1beta2 + hostPath: + path: \\.\pipe\csi-proxy-disk-v1beta2 + - name: csi-proxy-volume-pipe-v1beta2 + hostPath: + path: \\.\pipe\csi-proxy-volume-v1beta2 + - name: registration-dir + hostPath: + path: {{ .Values.windows.kubelet }}\plugins_registry\ + type: Directory + - name: kubelet-dir + hostPath: + path: {{ .Values.windows.kubelet }}\ + type: Directory + - name: plugin-dir + hostPath: + path: {{ .Values.windows.kubelet }}\plugins\{{ .Values.driver.name }}\ + type: DirectoryOrCreate + - name: azure-config + hostPath: + path: C:\k + type: DirectoryOrCreate +{{- end -}} diff --git a/charts/v1.28.4/azuredisk-csi-driver/templates/csi-azuredisk-node.yaml b/charts/v1.28.4/azuredisk-csi-driver/templates/csi-azuredisk-node.yaml new file mode 100644 index 000000000..86b829361 --- /dev/null +++ b/charts/v1.28.4/azuredisk-csi-driver/templates/csi-azuredisk-node.yaml @@ -0,0 +1,250 @@ +{{- if .Values.linux.enabled}} +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: {{ .Values.linux.dsName }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- with .Values.linux.labels }} +{{ . | toYaml | indent 4 }} +{{- end }} +{{- with .Values.linux.annotations }} + annotations: +{{ . | toYaml | indent 4 }} +{{- end }} +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: {{ .Values.node.maxUnavailable }} + type: RollingUpdate + selector: + matchLabels: + app: {{ .Values.linux.dsName }} + template: + metadata: +{{ include "azuredisk.labels" . | indent 6 }} + app: {{ .Values.linux.dsName }} + {{- if .Values.workloadIdentity.clientID }} + azure.workload.identity/use: "true" + {{- end }} +{{- with .Values.linux.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.linux.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + hostNetwork: {{ .Values.linux.hostNetwork }} + dnsPolicy: Default + serviceAccountName: {{ .Values.serviceAccount.node }} + nodeSelector: + kubernetes.io/os: linux +{{- with .Values.linux.nodeSelector }} +{{ toYaml . | indent 8 }} +{{- end }} + affinity: +{{- with .Values.linux.affinity }} +{{ toYaml . | indent 8 }} +{{- end }} + nodeAffinity: +{{ toYaml .Values.linux.nodeAffinity | indent 10 }} + priorityClassName: system-node-critical + securityContext: + seccompProfile: + type: RuntimeDefault +{{- with .Values.linux.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} + {{- include "azuredisk.pullSecrets" . | indent 6 }} + containers: + - name: liveness-probe + volumeMounts: + - mountPath: /csi + name: socket-dir +{{- if hasPrefix "/" .Values.image.livenessProbe.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- else }} + image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- end }} + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --health-port={{ .Values.node.livenessProbe.healthPort }} + - --v=2 + resources: {{- toYaml .Values.linux.resources.livenessProbe | nindent 12 }} + - name: node-driver-registrar +{{- if hasPrefix "/" .Values.image.nodeDriverRegistrar.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- else }} + image: "{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- end }} + args: + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --v=2 +{{- if .Values.linux.enableRegistrationProbe }} + livenessProbe: + exec: + command: + - /csi-node-driver-registrar + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --mode=kubelet-registration-probe + initialDelaySeconds: 30 + timeoutSeconds: 15 +{{- end }} + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: {{ .Values.linux.kubelet }}/plugins/{{ .Values.driver.name }}/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: registration-dir + mountPath: /registration + resources: {{- toYaml .Values.linux.resources.nodeDriverRegistrar | nindent 12 }} + - name: azuredisk +{{- if hasPrefix "/" .Values.image.azuredisk.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- else }} + image: "{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- end }} + args: + - "--v={{ .Values.node.logLevel }}" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--enable-perf-optimization={{ .Values.linux.enablePerfOptimization }}" + - "--drivername={{ .Values.driver.name }}" + - "--volume-attach-limit={{ .Values.driver.volumeAttachLimit }}" + - "--cloud-config-secret-name={{ .Values.node.cloudConfigSecretName }}" + - "--cloud-config-secret-namespace={{ .Values.node.cloudConfigSecretNamespace }}" + - "--custom-user-agent={{ .Values.driver.customUserAgent }}" + - "--user-agent-suffix={{ .Values.driver.userAgentSuffix }}" + - "--allow-empty-cloud-config={{ .Values.node.allowEmptyCloudConfig }}" + - "--support-zone={{ .Values.node.supportZone }}" + - "--get-node-info-from-labels={{ .Values.linux.getNodeInfoFromLabels }}" + - "--get-nodeid-from-imds={{ .Values.node.getNodeIDFromIMDS }}" + ports: + - containerPort: {{ .Values.node.livenessProbe.healthPort }} + name: healthz + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path + optional: true + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + {{- if ne .Values.driver.httpsProxy "" }} + - name: HTTPS_PROXY + value: {{ .Values.driver.httpsProxy }} + {{- end }} + {{- if ne .Values.driver.httpProxy "" }} + - name: HTTP_PROXY + value: {{ .Values.driver.httpProxy }} + {{- end }} + {{- if ne .Values.driver.noProxy "" }} + - name: NO_PROXY + value: {{ .Values.driver.noProxy }} + {{- end }} + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: AZURE_GO_SDK_LOG_LEVEL + value: {{ .Values.driver.azureGoSDKLogLevel }} + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: AZURE_ENVIRONMENT_FILEPATH + value: /etc/kubernetes/azurestackcloud.json + {{- end }} + imagePullPolicy: {{ .Values.image.azuredisk.pullPolicy }} + securityContext: + privileged: true + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: {{ .Values.linux.kubelet }}/ + mountPropagation: Bidirectional + name: mountpoint-dir + - mountPath: /etc/kubernetes/ + name: azure-cred + - mountPath: /dev + name: device-dir + - mountPath: /sys/bus/scsi/devices + name: sys-devices-dir + - mountPath: /sys/class/ + name: sys-class + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: ssl + mountPath: /etc/ssl/certs + readOnly: true + {{- end }} + {{- if eq .Values.linux.distro "fedora" }} + - name: ssl + mountPath: /etc/ssl/certs + readOnly: true + - name: ssl-pki + mountPath: /etc/pki/ca-trust/extracted + readOnly: true + {{- end }} + resources: {{- toYaml .Values.linux.resources.azuredisk | nindent 12 }} + volumes: + - hostPath: + path: {{ .Values.linux.kubelet }}/plugins/{{ .Values.driver.name }} + type: DirectoryOrCreate + name: socket-dir + - hostPath: + path: {{ .Values.linux.kubelet }}/ + type: DirectoryOrCreate + name: mountpoint-dir + - hostPath: + path: {{ .Values.linux.kubelet }}/plugins_registry/ + type: DirectoryOrCreate + name: registration-dir + - hostPath: + path: /etc/kubernetes/ + type: DirectoryOrCreate + name: azure-cred + - hostPath: + path: /dev + type: Directory + name: device-dir + - hostPath: + path: /sys/bus/scsi/devices + type: Directory + name: sys-devices-dir + - hostPath: + path: /sys/class/ + type: Directory + name: sys-class + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: ssl + hostPath: + path: /etc/ssl/certs + {{- end }} + {{- if eq .Values.linux.distro "fedora" }} + - name: ssl + hostPath: + path: /etc/ssl/certs + - name: ssl-pki + hostPath: + path: /etc/pki/ca-trust/extracted + {{- end }} +{{- end -}} diff --git a/charts/v1.28.4/azuredisk-csi-driver/templates/csi-snapshot-controller.yaml b/charts/v1.28.4/azuredisk-csi-driver/templates/csi-snapshot-controller.yaml new file mode 100644 index 000000000..431a01b15 --- /dev/null +++ b/charts/v1.28.4/azuredisk-csi-driver/templates/csi-snapshot-controller.yaml @@ -0,0 +1,94 @@ +{{- if .Values.snapshot.enabled -}} +kind: Deployment +apiVersion: apps/v1 +metadata: + name: {{ .Values.snapshot.snapshotController.name }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- with .Values.snapshot.snapshotController.labels }} +{{ . | toYaml | indent 4 }} +{{- end }} +{{- with .Values.snapshot.snapshotController.annotations }} + annotations: +{{ . | toYaml | indent 4 }} +{{- end }} +spec: + replicas: {{ .Values.snapshot.snapshotController.replicas }} + selector: + matchLabels: + app: {{ .Values.snapshot.snapshotController.name }} + # the snapshot controller won't be marked as ready if the v1 CRDs are unavailable + # in #504 the snapshot-controller will exit after around 7.5 seconds if it + # can't find the v1 CRDs so this value should be greater than that + minReadySeconds: 15 + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: +{{ include "azuredisk.labels" . | indent 6 }} + app: {{ .Values.snapshot.snapshotController.name }} +{{- with .Values.snapshot.snapshotController.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.snapshot.snapshotController.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + serviceAccountName: {{ .Values.serviceAccount.snapshotController }} + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + securityContext: + seccompProfile: + type: RuntimeDefault +{{- with .Values.controller.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.controller.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} + {{- include "azuredisk.pullSecrets" . | indent 6 }} + containers: + - name: {{ .Values.snapshot.snapshotController.name }} +{{- if hasPrefix "/" .Values.snapshot.image.csiSnapshotController.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.snapshot.image.csiSnapshotController.repository }}:{{ .Values.snapshot.image.csiSnapshotController.tag }}" +{{- else }} + image: "{{ .Values.snapshot.image.csiSnapshotController.repository }}:{{ .Values.snapshot.image.csiSnapshotController.tag }}" +{{- end }} + args: + - "--v=2" + - "--leader-election=true" + - "--leader-election-namespace={{ .Release.Namespace }}" + resources: {{- toYaml .Values.snapshot.snapshotController.resources | nindent 12 }} + imagePullPolicy: {{ .Values.snapshot.image.csiSnapshotController.pullPolicy }} + +--- +{{- if .Values.snapshot.VolumeSnapshotClass.enabled -}} +kind: VolumeSnapshotClass +apiVersion: snapshot.storage.k8s.io/v1 +metadata: + name: {{ .Values.snapshot.VolumeSnapshotClass.name }} + annotations: + # This is what defines this resource as a hook. Without this line, the + # job is considered part of the release. + "helm.sh/hook": post-install +driver: {{ .Values.driver.name }} +deletionPolicy: {{ .Values.snapshot.VolumeSnapshotClass.deletionPolicy }} +parameters: + incremental: {{ .Values.snapshot.VolumeSnapshotClass.parameters.incremental }} + {{- if ne .Values.snapshot.VolumeSnapshotClass.parameters.resourceGroup "" }} + resourceGroup: {{ .Values.snapshot.VolumeSnapshotClass.parameters.resourceGroup }} + {{- end }} + tags: {{ .Values.snapshot.VolumeSnapshotClass.parameters.tags }} +{{- with .Values.snapshot.VolumeSnapshotClass.additionalLabels }} +additionalLabels: +{{ toYaml . | indent 2 }} +{{- end }} +{{- end -}} +{{- end -}} diff --git a/charts/v1.28.4/azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml b/charts/v1.28.4/azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml new file mode 100644 index 000000000..b78940fc6 --- /dev/null +++ b/charts/v1.28.4/azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml @@ -0,0 +1,199 @@ +{{- if .Values.rbac.create -}} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-external-provisioner-role +{{ include "azuredisk.labels" . | indent 2 }} +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["get", "list"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-csi-provisioner-binding +{{ include "azuredisk.labels" . | indent 2 }} +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ .Values.rbac.name }}-external-provisioner-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-external-attacher-role +{{ include "azuredisk.labels" . | indent 2 }} +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["csi.storage.k8s.io"] + resources: ["csinodeinfos"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments/status"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-csi-attacher-binding +{{ include "azuredisk.labels" . | indent 2 }} +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ .Values.rbac.name }}-external-attacher-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-external-snapshotter-role +{{ include "azuredisk.labels" . | indent 2 }} +rules: + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-csi-snapshotter-binding +{{ include "azuredisk.labels" . | indent 2 }} +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ .Values.rbac.name }}-external-snapshotter-role + apiGroup: rbac.authorization.k8s.io + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-external-resizer-role +{{ include "azuredisk.labels" . | indent 2 }} +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-csi-resizer-role +{{ include "azuredisk.labels" . | indent 2 }} +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ .Values.rbac.name }}-external-resizer-role + apiGroup: rbac.authorization.k8s.io + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-{{ .Values.rbac.name }}-controller-secret-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-{{ .Values.rbac.name }}-controller-secret-binding +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: csi-{{ .Values.rbac.name }}-controller-secret-role + apiGroup: rbac.authorization.k8s.io +{{ end }} diff --git a/charts/v1.28.4/azuredisk-csi-driver/templates/rbac-csi-azuredisk-node.yaml b/charts/v1.28.4/azuredisk-csi-driver/templates/rbac-csi-azuredisk-node.yaml new file mode 100644 index 000000000..ae7e080af --- /dev/null +++ b/charts/v1.28.4/azuredisk-csi-driver/templates/rbac-csi-azuredisk-node.yaml @@ -0,0 +1,28 @@ +{{- if .Values.rbac.create -}} +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-{{ .Values.rbac.name }}-node-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-{{ .Values.rbac.name }}-node-secret-binding +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.node }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: csi-{{ .Values.rbac.name }}-node-role + apiGroup: rbac.authorization.k8s.io +{{ end }} diff --git a/charts/v1.28.4/azuredisk-csi-driver/templates/rbac-csi-snapshot-controller.yaml b/charts/v1.28.4/azuredisk-csi-driver/templates/rbac-csi-snapshot-controller.yaml new file mode 100644 index 000000000..8e4278b6f --- /dev/null +++ b/charts/v1.28.4/azuredisk-csi-driver/templates/rbac-csi-snapshot-controller.yaml @@ -0,0 +1,72 @@ +{{- if and .Values.snapshot.enabled .Values.rbac.create -}} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots/status"] + verbs: ["update", "patch"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-binding +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.snapshotController }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: csi-snapshot-controller-role + apiGroup: rbac.authorization.k8s.io + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-leaderelection-role +rules: + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-leaderelection-binding +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.snapshotController }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: csi-snapshot-controller-leaderelection-role + apiGroup: rbac.authorization.k8s.io +{{ end }} diff --git a/charts/v1.28.4/azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-controller.yaml b/charts/v1.28.4/azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-controller.yaml new file mode 100644 index 000000000..1806e9c58 --- /dev/null +++ b/charts/v1.28.4/azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-controller.yaml @@ -0,0 +1,16 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- if .Values.workloadIdentity.clientID }} + azure.workload.identity/use: "true" + annotations: + azure.workload.identity/client-id: {{ .Values.workloadIdentity.clientID }} +{{- if .Values.workloadIdentity.tenantID }} + azure.workload.identity/tenant-id: {{ .Values.workloadIdentity.tenantID }} +{{- end }} +{{- end }} +{{- end -}} diff --git a/charts/v1.28.4/azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-node.yaml b/charts/v1.28.4/azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-node.yaml new file mode 100644 index 000000000..dcc2ff60b --- /dev/null +++ b/charts/v1.28.4/azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-node.yaml @@ -0,0 +1,16 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.node }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- if .Values.workloadIdentity.clientID }} + azure.workload.identity/use: "true" + annotations: + azure.workload.identity/client-id: {{ .Values.workloadIdentity.clientID }} +{{- if .Values.workloadIdentity.tenantID }} + azure.workload.identity/tenant-id: {{ .Values.workloadIdentity.tenantID }} +{{- end }} +{{- end }} +{{- end -}} diff --git a/charts/v1.28.4/azuredisk-csi-driver/templates/serviceaccount-csi-snapshot-controller.yaml b/charts/v1.28.4/azuredisk-csi-driver/templates/serviceaccount-csi-snapshot-controller.yaml new file mode 100644 index 000000000..7cdaad0b4 --- /dev/null +++ b/charts/v1.28.4/azuredisk-csi-driver/templates/serviceaccount-csi-snapshot-controller.yaml @@ -0,0 +1,7 @@ +{{- if and .Values.snapshot.enabled .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.snapshotController }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/charts/v1.28.4/azuredisk-csi-driver/values.yaml b/charts/v1.28.4/azuredisk-csi-driver/values.yaml new file mode 100644 index 000000000..bbbded0d3 --- /dev/null +++ b/charts/v1.28.4/azuredisk-csi-driver/values.yaml @@ -0,0 +1,275 @@ +image: + baseRepo: mcr.microsoft.com + azuredisk: + repository: /oss/kubernetes-csi/azuredisk-csi + tag: v1.28.4 + pullPolicy: IfNotPresent + csiProvisioner: + repository: /oss/kubernetes-csi/csi-provisioner + tag: v3.5.0 + pullPolicy: IfNotPresent + csiAttacher: + repository: /oss/kubernetes-csi/csi-attacher + tag: v4.3.0 + pullPolicy: IfNotPresent + csiResizer: + repository: /oss/kubernetes-csi/csi-resizer + tag: v1.8.0 + pullPolicy: IfNotPresent + livenessProbe: + repository: /oss/kubernetes-csi/livenessprobe + tag: v2.10.0 + pullPolicy: IfNotPresent + nodeDriverRegistrar: + repository: /oss/kubernetes-csi/csi-node-driver-registrar + tag: v2.8.0 + pullPolicy: IfNotPresent + +serviceAccount: + create: true # When true, service accounts will be created for you. Set to false if you want to use your own. + controller: csi-azuredisk-controller-sa # Name of Service Account to be created or used + node: csi-azuredisk-node-sa # Name of Service Account to be created or used + snapshotController: csi-snapshot-controller-sa # Name of Service Account to be created or used + +rbac: + create: true + name: azuredisk + +controller: + name: csi-azuredisk-controller + cloudConfigSecretName: azure-cloud-provider + cloudConfigSecretNamespace: kube-system + allowEmptyCloudConfig: false + enableTrafficManager: false + trafficManagerPort: 7788 + replicas: 2 + metricsPort: 29604 + livenessProbe: + healthPort: 29602 + runOnMaster: false + runOnControlPlane: false + disableAvailabilitySetNodes: false + vmType: "" + provisionerWorkerThreads: 100 + attacherWorkerThreads: 1000 + vmssCacheTTLInSeconds: -1 + logLevel: 5 + tolerations: + - key: "node-role.kubernetes.io/master" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/controlplane" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/control-plane" + operator: "Exists" + effect: "NoSchedule" + hostNetwork: true # this setting could be disabled if controller does not depend on MSI setting + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + nodeSelector: {} + affinity: {} + resources: + csiProvisioner: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + csiAttacher: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + csiResizer: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + csiSnapshotter: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 20Mi + livenessProbe: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + azuredisk: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + +node: + cloudConfigSecretName: azure-cloud-provider + cloudConfigSecretNamespace: kube-system + supportZone: true + allowEmptyCloudConfig: true + getNodeIDFromIMDS: false + maxUnavailable: 1 + logLevel: 5 + livenessProbe: + healthPort: 29603 + +snapshot: + enabled: false + name: csi-snapshot-controller + image: + csiSnapshotter: + repository: /oss/kubernetes-csi/csi-snapshotter + tag: v6.2.2 + pullPolicy: IfNotPresent + csiSnapshotController: + repository: /oss/kubernetes-csi/snapshot-controller + tag: v6.2.2 + pullPolicy: IfNotPresent + snapshotController: + name: csi-snapshot-controller + replicas: 2 + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + resources: + limits: + memory: 300Mi + requests: + cpu: 10m + memory: 20Mi + VolumeSnapshotClass: + enabled: false + name: csi-azuredisk-vsc + deletionPolicy: Delete + parameters: + incremental: '"true"' # available values: "true", "false" ("true" by default for Azure Public Cloud, and "false" by default for Azure Stack Cloud) + resourceGroup: "" # available values: EXISTING RESOURCE GROUP (If not specified, snapshot will be stored in the same resource group as source Azure disk) + tags: "" # tag format: 'key1=val1,key2=val2' + additionalLabels: {} + +feature: + enableFSGroupPolicy: true + +driver: + name: disk.csi.azure.com + # maximum number of attachable volumes per node, + # maximum number is defined according to node instance type by default(-1) + volumeAttachLimit: -1 + customUserAgent: "" + userAgentSuffix: "OSS-helm" + azureGoSDKLogLevel: "" # available values: ""(no logs), DEBUG, INFO, WARNING, ERROR + httpsProxy: "" + httpProxy: "" + noProxy: "" + +linux: + enabled: true + dsName: csi-azuredisk-node # daemonset name + kubelet: /var/lib/kubelet + distro: debian # available values: debian, fedora + enablePerfOptimization: true + enableRegistrationProbe: true + tolerations: + - operator: "Exists" + hostNetwork: true # this setting could be disabled if perfProfile is `none` + getNodeInfoFromLabels: false # get node info from node labels instead of IMDS + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + nodeSelector: {} + affinity: {} + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: type + operator: NotIn + values: + - virtual-kubelet + resources: + livenessProbe: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + nodeDriverRegistrar: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + azuredisk: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 20Mi + +windows: + enabled: true + useHostProcessContainers: false + dsName: csi-azuredisk-node-win # daemonset name + kubelet: 'C:\var\lib\kubelet' + getNodeInfoFromLabels: false # get node info from node labels instead of IMDS + enableRegistrationProbe: true + tolerations: + - key: "node.kubernetes.io/os" + operator: "Exists" + effect: "NoSchedule" + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + nodeSelector: {} + affinity: {} + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: type + operator: NotIn + values: + - virtual-kubelet + resources: + livenessProbe: + limits: + memory: 150Mi + requests: + cpu: 10m + memory: 40Mi + nodeDriverRegistrar: + limits: + memory: 150Mi + requests: + cpu: 30m + memory: 40Mi + azuredisk: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 40Mi + +cloud: AzurePublicCloud + +## Reference to one or more secrets to be used when pulling images +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ +## +imagePullSecrets: [] +# - name: "image-pull-secret" + +workloadIdentity: + clientID: "" + # [optional] If the AAD application or user-assigned managed identity is not in the same tenant as the cluster + # then set tenantID with the application or user-assigned managed identity tenant ID + tenantID: "" diff --git a/charts/v1.28.5/azuredisk-csi-driver-v1.28.5.tgz b/charts/v1.28.5/azuredisk-csi-driver-v1.28.5.tgz new file mode 100644 index 000000000..8b0f6db90 Binary files /dev/null and b/charts/v1.28.5/azuredisk-csi-driver-v1.28.5.tgz differ diff --git a/charts/v1.28.5/azuredisk-csi-driver/Chart.yaml b/charts/v1.28.5/azuredisk-csi-driver/Chart.yaml new file mode 100644 index 000000000..d53dc5867 --- /dev/null +++ b/charts/v1.28.5/azuredisk-csi-driver/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: v1.28.5 +description: Azure disk Container Storage Interface (CSI) Storage Plugin +name: azuredisk-csi-driver +version: v1.28.5 diff --git a/charts/v1.28.5/azuredisk-csi-driver/templates/NOTES.txt b/charts/v1.28.5/azuredisk-csi-driver/templates/NOTES.txt new file mode 100644 index 000000000..6a9695dad --- /dev/null +++ b/charts/v1.28.5/azuredisk-csi-driver/templates/NOTES.txt @@ -0,0 +1,5 @@ +The Azure Disk CSI Driver is getting deployed to your cluster. + +To check Azure Disk CSI Driver pods status, please run: + + kubectl --namespace={{ .Release.Namespace }} get pods --selector="release={{ .Release.Name }}" --watch diff --git a/charts/v1.28.5/azuredisk-csi-driver/templates/_helpers.tpl b/charts/v1.28.5/azuredisk-csi-driver/templates/_helpers.tpl new file mode 100644 index 000000000..51fcc8a09 --- /dev/null +++ b/charts/v1.28.5/azuredisk-csi-driver/templates/_helpers.tpl @@ -0,0 +1,26 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* Expand the name of the chart.*/}} +{{- define "azuredisk.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* labels for helm resources */}} +{{- define "azuredisk.labels" -}} +labels: + app.kubernetes.io/instance: "{{ .Release.Name }}" + app.kubernetes.io/managed-by: "{{ .Release.Service }}" + app.kubernetes.io/name: "{{ template "azuredisk.name" . }}" + app.kubernetes.io/version: "{{ .Chart.AppVersion }}" + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" +{{- end -}} + +{{/* pull secrets for containers */}} +{{- define "azuredisk.pullSecrets" -}} +{{- if .Values.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- end }} +{{- end -}} \ No newline at end of file diff --git a/charts/v1.28.5/azuredisk-csi-driver/templates/crd-csi-snapshot.yaml b/charts/v1.28.5/azuredisk-csi-driver/templates/crd-csi-snapshot.yaml new file mode 100644 index 000000000..76df8af7e --- /dev/null +++ b/charts/v1.28.5/azuredisk-csi-driver/templates/crd-csi-snapshot.yaml @@ -0,0 +1,840 @@ +{{- if .Values.snapshot.enabled -}} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/665" + creationTimestamp: null + name: volumesnapshots.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshot + listKind: VolumeSnapshotList + plural: volumesnapshots + shortNames: + - vs + singular: volumesnapshot + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: If a new snapshot needs to be created, this contains the name of + the source PVC from which this snapshot was (or will be) created. + jsonPath: .spec.source.persistentVolumeClaimName + name: SourcePVC + type: string + - description: If a snapshot already exists, this contains the name of the existing + VolumeSnapshotContent object representing the existing snapshot. + jsonPath: .spec.source.volumeSnapshotContentName + name: SourceSnapshotContent + type: string + - description: Represents the minimum size of volume required to rehydrate from + this snapshot. + jsonPath: .status.restoreSize + name: RestoreSize + type: string + - description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot. + jsonPath: .spec.volumeSnapshotClassName + name: SnapshotClass + type: string + - description: Name of the VolumeSnapshotContent object to which the VolumeSnapshot + object intends to bind to. Please note that verification of binding actually + requires checking both VolumeSnapshot and VolumeSnapshotContent to ensure + both are pointing at each other. Binding MUST be verified prior to usage of + this object. + jsonPath: .status.boundVolumeSnapshotContentName + name: SnapshotContent + type: string + - description: Timestamp when the point-in-time snapshot was taken by the underlying + storage system. + jsonPath: .status.creationTime + name: CreationTime + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshot is a user's request for either creating a point-in-time + snapshot of a persistent volume, or binding to a pre-existing snapshot. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: 'spec defines the desired characteristics of a snapshot requested + by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots + Required.' + properties: + source: + description: source specifies where a snapshot will be created from. + This field is immutable after creation. Required. + properties: + persistentVolumeClaimName: + description: persistentVolumeClaimName specifies the name of the + PersistentVolumeClaim object representing the volume from which + a snapshot should be created. This PVC is assumed to be in the + same namespace as the VolumeSnapshot object. This field should + be set if the snapshot does not exists, and needs to be created. + This field is immutable. + type: string + volumeSnapshotContentName: + description: volumeSnapshotContentName specifies the name of a + pre-existing VolumeSnapshotContent object representing an existing + volume snapshot. This field should be set if the snapshot already + exists and only needs a representation in Kubernetes. This field + is immutable. + type: string + type: object + oneOf: + - required: ["persistentVolumeClaimName"] + - required: ["volumeSnapshotContentName"] + volumeSnapshotClassName: + description: 'VolumeSnapshotClassName is the name of the VolumeSnapshotClass + requested by the VolumeSnapshot. VolumeSnapshotClassName may be + left nil to indicate that the default SnapshotClass should be used. + A given cluster may have multiple default Volume SnapshotClasses: + one default per CSI Driver. If a VolumeSnapshot does not specify + a SnapshotClass, VolumeSnapshotSource will be checked to figure + out what the associated CSI Driver is, and the default VolumeSnapshotClass + associated with that CSI Driver will be used. If more than one VolumeSnapshotClass + exist for a given CSI Driver and more than one have been marked + as default, CreateSnapshot will fail and generate an event. Empty + string is not allowed for this field.' + type: string + required: + - source + type: object + status: + description: status represents the current information of a snapshot. + Consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent + objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent + point at each other) before using this object. + properties: + boundVolumeSnapshotContentName: + description: 'boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent + object to which this VolumeSnapshot object intends to bind to. If + not specified, it indicates that the VolumeSnapshot object has not + been successfully bound to a VolumeSnapshotContent object yet. NOTE: + To avoid possible security issues, consumers must verify binding + between VolumeSnapshot and VolumeSnapshotContent objects is successful + (by validating that both VolumeSnapshot and VolumeSnapshotContent + point at each other) before using this object.' + type: string + creationTime: + description: creationTime is the timestamp when the point-in-time + snapshot is taken by the underlying storage system. In dynamic snapshot + creation case, this field will be filled in by the snapshot controller + with the "creation_time" value returned from CSI "CreateSnapshot" + gRPC call. For a pre-existing snapshot, this field will be filled + with the "creation_time" value returned from the CSI "ListSnapshots" + gRPC call if the driver supports it. If not specified, it may indicate + that the creation time of the snapshot is unknown. + format: date-time + type: string + error: + description: error is the last observed error during snapshot creation, + if any. This field could be helpful to upper level controllers(i.e., + application controller) to decide whether they should continue on + waiting for the snapshot to be created based on the type of error + reported. The snapshot controller will keep retrying when an error + occurs during the snapshot creation. Upon success, this error field + will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error + during snapshot creation if specified. NOTE: message may be + logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if the snapshot is ready to be used + to restore a volume. In dynamic snapshot creation case, this field + will be filled in by the snapshot controller with the "ready_to_use" + value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing + snapshot, this field will be filled with the "ready_to_use" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it, otherwise, this field will be set to "True". If not specified, + it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + type: string + description: restoreSize represents the minimum size of volume required + to create a volume from this snapshot. In dynamic snapshot creation + case, this field will be filled in by the snapshot controller with + the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. + For a pre-existing snapshot, this field will be filled with the + "size_bytes" value returned from the CSI "ListSnapshots" gRPC call + if the driver supports it. When restoring a volume from this snapshot, + the size of the volume MUST NOT be smaller than the restoreSize + if it is specified, otherwise the restoration will fail. If not + specified, it indicates that the size is unknown. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: If a new snapshot needs to be created, this contains the name of the source PVC from which this snapshot was (or will be) created. + jsonPath: .spec.source.persistentVolumeClaimName + name: SourcePVC + type: string + - description: If a snapshot already exists, this contains the name of the existing VolumeSnapshotContent object representing the existing snapshot. + jsonPath: .spec.source.volumeSnapshotContentName + name: SourceSnapshotContent + type: string + - description: Represents the minimum size of volume required to rehydrate from this snapshot. + jsonPath: .status.restoreSize + name: RestoreSize + type: string + - description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot. + jsonPath: .spec.volumeSnapshotClassName + name: SnapshotClass + type: string + - description: Name of the VolumeSnapshotContent object to which the VolumeSnapshot object intends to bind to. Please note that verification of binding actually requires checking both VolumeSnapshot and VolumeSnapshotContent to ensure both are pointing at each other. Binding MUST be verified prior to usage of this object. + jsonPath: .status.boundVolumeSnapshotContentName + name: SnapshotContent + type: string + - description: Timestamp when the point-in-time snapshot was taken by the underlying storage system. + jsonPath: .status.creationTime + name: CreationTime + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshot is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshot" + schema: + openAPIV3Schema: + description: VolumeSnapshot is a user's request for either creating a point-in-time snapshot of a persistent volume, or binding to a pre-existing snapshot. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: 'spec defines the desired characteristics of a snapshot requested by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots Required.' + properties: + source: + description: source specifies where a snapshot will be created from. This field is immutable after creation. Required. + properties: + persistentVolumeClaimName: + description: persistentVolumeClaimName specifies the name of the PersistentVolumeClaim object representing the volume from which a snapshot should be created. This PVC is assumed to be in the same namespace as the VolumeSnapshot object. This field should be set if the snapshot does not exists, and needs to be created. This field is immutable. + type: string + volumeSnapshotContentName: + description: volumeSnapshotContentName specifies the name of a pre-existing VolumeSnapshotContent object representing an existing volume snapshot. This field should be set if the snapshot already exists and only needs a representation in Kubernetes. This field is immutable. + type: string + type: object + volumeSnapshotClassName: + description: 'VolumeSnapshotClassName is the name of the VolumeSnapshotClass requested by the VolumeSnapshot. VolumeSnapshotClassName may be left nil to indicate that the default SnapshotClass should be used. A given cluster may have multiple default Volume SnapshotClasses: one default per CSI Driver. If a VolumeSnapshot does not specify a SnapshotClass, VolumeSnapshotSource will be checked to figure out what the associated CSI Driver is, and the default VolumeSnapshotClass associated with that CSI Driver will be used. If more than one VolumeSnapshotClass exist for a given CSI Driver and more than one have been marked as default, CreateSnapshot will fail and generate an event. Empty string is not allowed for this field.' + type: string + required: + - source + type: object + status: + description: status represents the current information of a snapshot. Consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object. + properties: + boundVolumeSnapshotContentName: + description: 'boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent object to which this VolumeSnapshot object intends to bind to. If not specified, it indicates that the VolumeSnapshot object has not been successfully bound to a VolumeSnapshotContent object yet. NOTE: To avoid possible security issues, consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object.' + type: string + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it may indicate that the creation time of the snapshot is unknown. + format: date-time + type: string + error: + description: error is the last observed error during snapshot creation, if any. This field could be helpful to upper level controllers(i.e., application controller) to decide whether they should continue on waiting for the snapshot to be created based on the type of error reported. The snapshot controller will keep retrying when an error occurs during the snapshot creation. Upon success, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if the snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + type: string + description: restoreSize represents the minimum size of volume required to create a volume from this snapshot. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + required: + - spec + type: object + served: false + storage: false + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/665" + creationTimestamp: null + name: volumesnapshotclasses.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotClass + listKind: VolumeSnapshotClassList + plural: volumesnapshotclasses + shortNames: + - vsclass + - vsclasses + singular: volumesnapshotclass + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .driver + name: Driver + type: string + - description: Determines whether a VolumeSnapshotContent created through the + VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .deletionPolicy + name: DeletionPolicy + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshotClass specifies parameters that a underlying storage + system uses when creating a volume snapshot. A specific VolumeSnapshotClass + is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses + are non-namespaced + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + deletionPolicy: + description: deletionPolicy determines whether a VolumeSnapshotContent + created through the VolumeSnapshotClass should be deleted when its bound + VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". + "Retain" means that the VolumeSnapshotContent and its physical snapshot + on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent + and its physical snapshot on underlying storage system are deleted. + Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the storage driver that handles this + VolumeSnapshotClass. Required. + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + parameters: + additionalProperties: + type: string + description: parameters is a key-value map with storage driver specific + parameters for creating snapshots. These values are opaque to Kubernetes. + type: object + required: + - deletionPolicy + - driver + type: object + served: true + storage: true + subresources: {} + - additionalPrinterColumns: + - jsonPath: .driver + name: Driver + type: string + - description: Determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .deletionPolicy + name: DeletionPolicy + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshotClass is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshotClass" + schema: + openAPIV3Schema: + description: VolumeSnapshotClass specifies parameters that a underlying storage system uses when creating a volume snapshot. A specific VolumeSnapshotClass is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses are non-namespaced + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + deletionPolicy: + description: deletionPolicy determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the storage driver that handles this VolumeSnapshotClass. Required. + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + parameters: + additionalProperties: + type: string + description: parameters is a key-value map with storage driver specific parameters for creating snapshots. These values are opaque to Kubernetes. + type: object + required: + - deletionPolicy + - driver + type: object + served: false + storage: false + subresources: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/665" + creationTimestamp: null + name: volumesnapshotcontents.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotContent + listKind: VolumeSnapshotContentList + plural: volumesnapshotcontents + shortNames: + - vsc + - vscs + singular: volumesnapshotcontent + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: Represents the complete size of the snapshot in bytes + jsonPath: .status.restoreSize + name: RestoreSize + type: integer + - description: Determines whether this VolumeSnapshotContent and its physical + snapshot on the underlying storage system should be deleted when its bound + VolumeSnapshot is deleted. + jsonPath: .spec.deletionPolicy + name: DeletionPolicy + type: string + - description: Name of the CSI driver used to create the physical snapshot on + the underlying storage system. + jsonPath: .spec.driver + name: Driver + type: string + - description: Name of the VolumeSnapshotClass to which this snapshot belongs. + jsonPath: .spec.volumeSnapshotClassName + name: VolumeSnapshotClass + type: string + - description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent + object is bound. + jsonPath: .spec.volumeSnapshotRef.name + name: VolumeSnapshot + type: string + - description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.namespace + name: VolumeSnapshotNamespace + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshotContent represents the actual "on-disk" snapshot + object in the underlying storage system + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: spec defines properties of a VolumeSnapshotContent created + by the underlying storage system. Required. + properties: + deletionPolicy: + description: deletionPolicy determines whether this VolumeSnapshotContent + and its physical snapshot on the underlying storage system should + be deleted when its bound VolumeSnapshot is deleted. Supported values + are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent + and its physical snapshot on underlying storage system are kept. + "Delete" means that the VolumeSnapshotContent and its physical snapshot + on underlying storage system are deleted. For dynamically provisioned + snapshots, this field will automatically be filled in by the CSI + snapshotter sidecar with the "DeletionPolicy" field defined in the + corresponding VolumeSnapshotClass. For pre-existing snapshots, users + MUST specify this field when creating the VolumeSnapshotContent + object. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the CSI driver used to create the + physical snapshot on the underlying storage system. This MUST be + the same as the name returned by the CSI GetPluginName() call for + that driver. Required. + type: string + source: + description: source specifies whether the snapshot is (or should be) + dynamically provisioned or already exists, and just requires a Kubernetes + object representation. This field is immutable after creation. Required. + properties: + snapshotHandle: + description: snapshotHandle specifies the CSI "snapshot_id" of + a pre-existing snapshot on the underlying storage system for + which a Kubernetes object representation was (or should be) + created. This field is immutable. + type: string + volumeHandle: + description: volumeHandle specifies the CSI "volume_id" of the + volume from which a snapshot should be dynamically taken from. + This field is immutable. + type: string + type: object + oneOf: + - required: ["snapshotHandle"] + - required: ["volumeHandle"] + sourceVolumeMode: + description: SourceVolumeMode is the mode of the volume whose snapshot + is taken. Can be either “Filesystem” or “Block”. If not specified, + it indicates the source volume's mode is unknown. This field is + immutable. This field is an alpha field. + type: string + volumeSnapshotClassName: + description: name of the VolumeSnapshotClass from which this snapshot + was (or will be) created. Note that after provisioning, the VolumeSnapshotClass + may be deleted or recreated with different set of values, and as + such, should not be referenced post-snapshot creation. + type: string + volumeSnapshotRef: + description: volumeSnapshotRef specifies the VolumeSnapshot object + to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName + field must reference to this VolumeSnapshotContent's name for the + bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent + object, name and namespace of the VolumeSnapshot object MUST be + provided for binding to happen. This field is immutable after creation. + Required. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + required: + - deletionPolicy + - driver + - source + - volumeSnapshotRef + type: object + status: + description: status represents the current information of a snapshot. + properties: + creationTime: + description: creationTime is the timestamp when the point-in-time + snapshot is taken by the underlying storage system. In dynamic snapshot + creation case, this field will be filled in by the CSI snapshotter + sidecar with the "creation_time" value returned from CSI "CreateSnapshot" + gRPC call. For a pre-existing snapshot, this field will be filled + with the "creation_time" value returned from the CSI "ListSnapshots" + gRPC call if the driver supports it. If not specified, it indicates + the creation time is unknown. The format of this field is a Unix + nanoseconds time encoded as an int64. On Unix, the command `date + +%s%N` returns the current time in nanoseconds since 1970-01-01 + 00:00:00 UTC. + format: int64 + type: integer + error: + description: error is the last observed error during snapshot creation, + if any. Upon success after retry, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error + during snapshot creation if specified. NOTE: message may be + logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used + to restore a volume. In dynamic snapshot creation case, this field + will be filled in by the CSI snapshotter sidecar with the "ready_to_use" + value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing + snapshot, this field will be filled with the "ready_to_use" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it, otherwise, this field will be set to "True". If not specified, + it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + description: restoreSize represents the complete size of the snapshot + in bytes. In dynamic snapshot creation case, this field will be + filled in by the CSI snapshotter sidecar with the "size_bytes" value + returned from CSI "CreateSnapshot" gRPC call. For a pre-existing + snapshot, this field will be filled with the "size_bytes" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it. When restoring a volume from this snapshot, the size of the + volume MUST NOT be smaller than the restoreSize if it is specified, + otherwise the restoration will fail. If not specified, it indicates + that the size is unknown. + format: int64 + minimum: 0 + type: integer + snapshotHandle: + description: snapshotHandle is the CSI "snapshot_id" of a snapshot + on the underlying storage system. If not specified, it indicates + that dynamic snapshot creation has either failed or it is still + in progress. + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: Represents the complete size of the snapshot in bytes + jsonPath: .status.restoreSize + name: RestoreSize + type: integer + - description: Determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .spec.deletionPolicy + name: DeletionPolicy + type: string + - description: Name of the CSI driver used to create the physical snapshot on the underlying storage system. + jsonPath: .spec.driver + name: Driver + type: string + - description: Name of the VolumeSnapshotClass to which this snapshot belongs. + jsonPath: .spec.volumeSnapshotClassName + name: VolumeSnapshotClass + type: string + - description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.name + name: VolumeSnapshot + type: string + - description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.namespace + name: VolumeSnapshotNamespace + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshotContent is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshotContent" + schema: + openAPIV3Schema: + description: VolumeSnapshotContent represents the actual "on-disk" snapshot object in the underlying storage system + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: spec defines properties of a VolumeSnapshotContent created by the underlying storage system. Required. + properties: + deletionPolicy: + description: deletionPolicy determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. For dynamically provisioned snapshots, this field will automatically be filled in by the CSI snapshotter sidecar with the "DeletionPolicy" field defined in the corresponding VolumeSnapshotClass. For pre-existing snapshots, users MUST specify this field when creating the VolumeSnapshotContent object. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the CSI driver used to create the physical snapshot on the underlying storage system. This MUST be the same as the name returned by the CSI GetPluginName() call for that driver. Required. + type: string + source: + description: source specifies whether the snapshot is (or should be) dynamically provisioned or already exists, and just requires a Kubernetes object representation. This field is immutable after creation. Required. + properties: + snapshotHandle: + description: snapshotHandle specifies the CSI "snapshot_id" of a pre-existing snapshot on the underlying storage system for which a Kubernetes object representation was (or should be) created. This field is immutable. + type: string + volumeHandle: + description: volumeHandle specifies the CSI "volume_id" of the volume from which a snapshot should be dynamically taken from. This field is immutable. + type: string + type: object + volumeSnapshotClassName: + description: name of the VolumeSnapshotClass from which this snapshot was (or will be) created. Note that after provisioning, the VolumeSnapshotClass may be deleted or recreated with different set of values, and as such, should not be referenced post-snapshot creation. + type: string + volumeSnapshotRef: + description: volumeSnapshotRef specifies the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName field must reference to this VolumeSnapshotContent's name for the bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent object, name and namespace of the VolumeSnapshot object MUST be provided for binding to happen. This field is immutable after creation. Required. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + required: + - deletionPolicy + - driver + - source + - volumeSnapshotRef + type: object + status: + description: status represents the current information of a snapshot. + properties: + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it indicates the creation time is unknown. The format of this field is a Unix nanoseconds time encoded as an int64. On Unix, the command `date +%s%N` returns the current time in nanoseconds since 1970-01-01 00:00:00 UTC. + format: int64 + type: integer + error: + description: error is the last observed error during snapshot creation, if any. Upon success after retry, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + description: restoreSize represents the complete size of the snapshot in bytes. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown. + format: int64 + minimum: 0 + type: integer + snapshotHandle: + description: snapshotHandle is the CSI "snapshot_id" of a snapshot on the underlying storage system. If not specified, it indicates that dynamic snapshot creation has either failed or it is still in progress. + type: string + type: object + required: + - spec + type: object + served: false + storage: false + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +{{- end -}} diff --git a/charts/v1.28.5/azuredisk-csi-driver/templates/csi-azuredisk-controller.yaml b/charts/v1.28.5/azuredisk-csi-driver/templates/csi-azuredisk-controller.yaml new file mode 100644 index 000000000..517b8233f --- /dev/null +++ b/charts/v1.28.5/azuredisk-csi-driver/templates/csi-azuredisk-controller.yaml @@ -0,0 +1,281 @@ +kind: Deployment +apiVersion: apps/v1 +metadata: + name: {{ .Values.controller.name }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- with .Values.controller.labels }} +{{ . | toYaml | indent 4 }} +{{- end }} +{{- with .Values.controller.annotations }} + annotations: +{{ . | toYaml | indent 4 }} +{{- end }} +spec: + replicas: {{ .Values.controller.replicas }} + selector: + matchLabels: + app: {{ .Values.controller.name }} + template: + metadata: +{{ include "azuredisk.labels" . | indent 6 }} + app: {{ .Values.controller.name }} + {{- if .Values.workloadIdentity.clientID }} + azure.workload.identity/use: "true" + {{- end }} +{{- with .Values.controller.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.controller.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + hostNetwork: {{ .Values.controller.hostNetwork }} + serviceAccountName: {{ .Values.serviceAccount.controller }} + nodeSelector: + kubernetes.io/os: linux +{{- with .Values.controller.nodeSelector }} +{{ toYaml . | indent 8 }} +{{- end }} + {{- if .Values.controller.runOnMaster}} + node-role.kubernetes.io/master: "" + {{- end}} + {{- if .Values.controller.runOnControlPlane}} + node-role.kubernetes.io/control-plane: "" + {{- end}} + priorityClassName: system-cluster-critical + securityContext: + seccompProfile: + type: RuntimeDefault +{{- with .Values.controller.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.controller.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} + {{- include "azuredisk.pullSecrets" . | indent 6 }} + containers: + - name: csi-provisioner +{{- if hasPrefix "/" .Values.image.csiProvisioner.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.csiProvisioner.repository }}:{{ .Values.image.csiProvisioner.tag }}" +{{- else }} + image: "{{ .Values.image.csiProvisioner.repository }}:{{ .Values.image.csiProvisioner.tag }}" +{{- end }} + args: + - "--feature-gates=Topology=true" + - "--csi-address=$(ADDRESS)" + - "--v=2" + - "--timeout=30s" + - "--leader-election" + - "--leader-election-namespace={{ .Release.Namespace }}" + - "--worker-threads={{ .Values.controller.provisionerWorkerThreads }}" + - "--extra-create-metadata=true" + - "--strict-topology=true" + - "--kube-api-qps=50" + - "--kube-api-burst=100" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + resources: {{- toYaml .Values.controller.resources.csiProvisioner | nindent 12 }} + - name: csi-attacher +{{- if hasPrefix "/" .Values.image.csiAttacher.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.csiAttacher.repository }}:{{ .Values.image.csiAttacher.tag }}" +{{- else }} + image: "{{ .Values.image.csiAttacher.repository }}:{{ .Values.image.csiAttacher.tag }}" +{{- end }} + args: + - "-v=2" + - "-csi-address=$(ADDRESS)" + - "-timeout=1200s" + - "-leader-election" + - "--leader-election-namespace={{ .Release.Namespace }}" + - "-worker-threads={{ .Values.controller.attacherWorkerThreads }}" + - "-kube-api-qps=200" + - "-kube-api-burst=400" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + resources: {{- toYaml .Values.controller.resources.csiAttacher | nindent 12 }} + - name: csi-snapshotter +{{- if hasPrefix "/" .Values.snapshot.image.csiSnapshotter.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.snapshot.image.csiSnapshotter.repository }}:{{ .Values.snapshot.image.csiSnapshotter.tag }}" +{{- else }} + image: "{{ .Values.snapshot.image.csiSnapshotter.repository }}:{{ .Values.snapshot.image.csiSnapshotter.tag }}" +{{- end }} + args: + - "-csi-address=$(ADDRESS)" + - "-leader-election" + - "--leader-election-namespace={{ .Release.Namespace }}" + - "-v=2" + - "--timeout=1200s" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: {{- toYaml .Values.controller.resources.csiSnapshotter | nindent 12 }} + - name: csi-resizer +{{- if hasPrefix "/" .Values.image.csiResizer.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.csiResizer.repository }}:{{ .Values.image.csiResizer.tag }}" +{{- else }} + image: "{{ .Values.image.csiResizer.repository }}:{{ .Values.image.csiResizer.tag }}" +{{- end }} + args: + - "-csi-address=$(ADDRESS)" + - "-v=2" + - "-leader-election" + - "--leader-election-namespace={{ .Release.Namespace }}" + - '-handle-volume-inuse-error=false' + - '-feature-gates=RecoverVolumeExpansionFailure=true' + - "-timeout=240s" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: {{- toYaml .Values.controller.resources.csiResizer | nindent 12 }} + - name: liveness-probe +{{- if hasPrefix "/" .Values.image.livenessProbe.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- else }} + image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- end }} + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --health-port={{ .Values.controller.livenessProbe.healthPort }} + - --v=2 + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: {{- toYaml .Values.controller.resources.livenessProbe | nindent 12 }} +{{- if eq .Values.controller.enableTrafficManager true }} + - image: mcr.microsoft.com/aks/ccp/ccp-auto-thrust:master.221118.2 + imagePullPolicy: IfNotPresent + name: proxy + command: + - /ccp-auto-thrust + args: + - "--port={{ .Values.controller.trafficManagerPort }}" + ports: + - containerPort: {{ .Values.controller.trafficManagerPort }} + protocol: TCP +{{- end }} + - name: azuredisk +{{- if hasPrefix "/" .Values.image.azuredisk.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- else }} + image: "{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- end }} + args: + - "--v={{ .Values.controller.logLevel }}" + - "--endpoint=$(CSI_ENDPOINT)" + - "--metrics-address=0.0.0.0:{{ .Values.controller.metricsPort }}" + - "--disable-avset-nodes={{ .Values.controller.disableAvailabilitySetNodes }}" + - "--vm-type={{ .Values.controller.vmType }}" + - "--drivername={{ .Values.driver.name }}" + - "--cloud-config-secret-name={{ .Values.controller.cloudConfigSecretName }}" + - "--cloud-config-secret-namespace={{ .Values.controller.cloudConfigSecretNamespace }}" + - "--custom-user-agent={{ .Values.driver.customUserAgent }}" + - "--user-agent-suffix={{ .Values.driver.userAgentSuffix }}" + - "--allow-empty-cloud-config={{ .Values.controller.allowEmptyCloudConfig }}" + - "--vmss-cache-ttl-seconds={{ .Values.controller.vmssCacheTTLInSeconds }}" + - "--enable-traffic-manager={{ .Values.controller.enableTrafficManager }}" + - "--traffic-manager-port={{ .Values.controller.trafficManagerPort }}" + ports: + - containerPort: {{ .Values.controller.livenessProbe.healthPort }} + name: healthz + protocol: TCP + - containerPort: {{ .Values.controller.metricsPort }} + name: metrics + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path + optional: true + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + {{- if ne .Values.driver.httpsProxy "" }} + - name: HTTPS_PROXY + value: {{ .Values.driver.httpsProxy }} + {{- end }} + {{- if ne .Values.driver.httpProxy "" }} + - name: HTTP_PROXY + value: {{ .Values.driver.httpProxy }} + {{- end }} + {{- if ne .Values.driver.noProxy "" }} + - name: NO_PROXY + value: {{ .Values.driver.noProxy }} + {{- end }} + - name: AZURE_GO_SDK_LOG_LEVEL + value: {{ .Values.driver.azureGoSDKLogLevel }} + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: AZURE_ENVIRONMENT_FILEPATH + value: /etc/kubernetes/azurestackcloud.json + {{- end }} + imagePullPolicy: {{ .Values.image.azuredisk.pullPolicy }} + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /etc/kubernetes/ + name: azure-cred + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: ssl + mountPath: /etc/ssl/certs + readOnly: true + {{- end }} + {{- if eq .Values.linux.distro "fedora" }} + - name: ssl + mountPath: /etc/ssl/certs + readOnly: true + - name: ssl-pki + mountPath: /etc/pki/ca-trust/extracted + readOnly: true + {{- end }} + resources: {{- toYaml .Values.controller.resources.azuredisk | nindent 12 }} + volumes: + - name: socket-dir + emptyDir: {} + - name: azure-cred + hostPath: + path: /etc/kubernetes/ + type: DirectoryOrCreate + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: ssl + hostPath: + path: /etc/ssl/certs + {{- end }} + {{- if eq .Values.linux.distro "fedora" }} + - name: ssl + hostPath: + path: /etc/ssl/certs + - name: ssl-pki + hostPath: + path: /etc/pki/ca-trust/extracted + {{- end }} diff --git a/charts/v1.28.5/azuredisk-csi-driver/templates/csi-azuredisk-driver.yaml b/charts/v1.28.5/azuredisk-csi-driver/templates/csi-azuredisk-driver.yaml new file mode 100644 index 000000000..21f941748 --- /dev/null +++ b/charts/v1.28.5/azuredisk-csi-driver/templates/csi-azuredisk-driver.yaml @@ -0,0 +1,13 @@ +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: {{ .Values.driver.name }} + annotations: + csiDriver: "{{ .Values.image.azuredisk.tag }}" + snapshot: "{{ .Values.snapshot.image.csiSnapshotter.tag }}" +spec: + attachRequired: true + podInfoOnMount: false + {{- if .Values.feature.enableFSGroupPolicy}} + fsGroupPolicy: File + {{- end}} diff --git a/charts/v1.28.5/azuredisk-csi-driver/templates/csi-azuredisk-node-windows-hostprocess.yaml b/charts/v1.28.5/azuredisk-csi-driver/templates/csi-azuredisk-node-windows-hostprocess.yaml new file mode 100644 index 000000000..24de36157 --- /dev/null +++ b/charts/v1.28.5/azuredisk-csi-driver/templates/csi-azuredisk-node-windows-hostprocess.yaml @@ -0,0 +1,154 @@ +{{- if and (.Values.windows.enabled) (eq .Values.windows.useHostProcessContainers true) }} +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: {{ .Values.windows.dsName }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- with .Values.windows.labels }} +{{ . | toYaml | indent 4 }} +{{- end }} +{{- with .Values.windows.annotations }} + annotations: +{{ . | toYaml | indent 4 }} +{{- end }} +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: {{ .Values.node.maxUnavailable }} + type: RollingUpdate + selector: + matchLabels: + app: {{ .Values.windows.dsName }} + template: + metadata: +{{ include "azuredisk.labels" . | indent 6 }} + app: {{ .Values.windows.dsName }} +{{- with .Values.windows.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.windows.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + serviceAccountName: {{ .Values.serviceAccount.node }} +{{- with .Values.windows.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} + nodeSelector: + kubernetes.io/os: windows +{{- with .Values.windows.nodeSelector }} +{{ toYaml . | indent 8 }} +{{- end }} + affinity: +{{- with .Values.windows.affinity }} +{{ toYaml . | indent 8 }} +{{- end }} + nodeAffinity: +{{ toYaml .Values.windows.nodeAffinity | indent 10 }} + priorityClassName: system-node-critical + {{- include "azuredisk.pullSecrets" . | indent 6 }} + securityContext: + seccompProfile: + type: RuntimeDefault + windowsOptions: + hostProcess: true + runAsUserName: "NT AUTHORITY\\SYSTEM" + hostNetwork: true + initContainers: + - name: init +{{- if hasPrefix "/" .Values.image.azuredisk.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}-windows-hp" +{{- else }} + image: "{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}-windows-hp" +{{- end }} + imagePullPolicy: {{ .Values.image.azuredisk.pullPolicy }} + command: + - "powershell.exe" + - "-c" + - "New-Item -ItemType Directory -Path C:\\var\\lib\\kubelet\\plugins\\{{ .Values.driver.name }}\\ -Force" + containers: + - name: node-driver-registrar +{{- if hasPrefix "/" .Values.image.nodeDriverRegistrar.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- else }} + image: "{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- end }} + command: + - "csi-node-driver-registrar.exe" + args: + - "--csi-address=$(CSI_ENDPOINT)" + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" + - "--plugin-registration-path=$(PLUGIN_REG_DIR)" + - "--v=2" + env: + - name: CSI_ENDPOINT + value: unix://{{ .Values.windows.kubelet }}\plugins\{{ .Values.driver.name }}\csi.sock + - name: DRIVER_REG_SOCK_PATH + value: C:\\var\\lib\\kubelet\\plugins\\{{ .Values.driver.name }}\\csi.sock + - name: PLUGIN_REG_DIR + value: C:\\var\\lib\\kubelet\\plugins_registry\\ + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + imagePullPolicy: {{ .Values.image.nodeDriverRegistrar.pullPolicy }} + resources: {{- toYaml .Values.windows.resources.nodeDriverRegistrar | nindent 12 }} + - name: azuredisk +{{- if hasPrefix "/" .Values.image.azuredisk.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}-windows-hp" +{{- else }} + image: "{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}-windows-hp" +{{- end }} + command: + - "azurediskplugin.exe" + args: + - "--v={{ .Values.node.logLevel }}" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--drivername={{ .Values.driver.name }}" + - "--volume-attach-limit={{ .Values.driver.volumeAttachLimit }}" + - "--cloud-config-secret-name={{ .Values.node.cloudConfigSecretName }}" + - "--cloud-config-secret-namespace={{ .Values.node.cloudConfigSecretNamespace }}" + - "--custom-user-agent={{ .Values.driver.customUserAgent }}" + - "--user-agent-suffix={{ .Values.driver.userAgentSuffix }}" + - "--allow-empty-cloud-config={{ .Values.node.allowEmptyCloudConfig }}" + - "--support-zone={{ .Values.node.supportZone }}" + - "--enable-windows-host-process=true" + - "--get-nodeid-from-imds={{ .Values.node.getNodeIDFromIMDS }}" + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path-windows + optional: true + - name: CSI_ENDPOINT + value: unix://C:\\var\\lib\\kubelet\\plugins\\{{ .Values.driver.name }}\\csi.sock + {{- if ne .Values.driver.httpsProxy "" }} + - name: HTTPS_PROXY + value: {{ .Values.driver.httpsProxy }} + {{- end }} + {{- if ne .Values.driver.httpProxy "" }} + - name: HTTP_PROXY + value: {{ .Values.driver.httpProxy }} + {{- end }} + {{- if ne .Values.driver.noProxy "" }} + - name: NO_PROXY + value: {{ .Values.driver.noProxy }} + {{- end }} + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: AZURE_GO_SDK_LOG_LEVEL + value: {{ .Values.driver.azureGoSDKLogLevel }} + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: AZURE_ENVIRONMENT_FILEPATH + value: C:\k\azurestackcloud.json + {{- end }} + imagePullPolicy: {{ .Values.image.azuredisk.pullPolicy }} +{{- end -}} diff --git a/charts/v1.28.5/azuredisk-csi-driver/templates/csi-azuredisk-node-windows.yaml b/charts/v1.28.5/azuredisk-csi-driver/templates/csi-azuredisk-node-windows.yaml new file mode 100644 index 000000000..260a68c95 --- /dev/null +++ b/charts/v1.28.5/azuredisk-csi-driver/templates/csi-azuredisk-node-windows.yaml @@ -0,0 +1,238 @@ +{{- if and (.Values.windows.enabled) (ne .Values.windows.useHostProcessContainers true) }} +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: {{ .Values.windows.dsName }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- with .Values.windows.labels }} +{{ . | toYaml | indent 4 }} +{{- end }} +{{- with .Values.windows.annotations }} + annotations: +{{ . | toYaml | indent 4 }} +{{- end }} +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: {{ .Values.node.maxUnavailable }} + type: RollingUpdate + selector: + matchLabels: + app: {{ .Values.windows.dsName }} + template: + metadata: +{{ include "azuredisk.labels" . | indent 6 }} + app: {{ .Values.windows.dsName }} +{{- with .Values.windows.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.windows.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + serviceAccountName: {{ .Values.serviceAccount.node }} +{{- with .Values.windows.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} + nodeSelector: + kubernetes.io/os: windows +{{- with .Values.windows.nodeSelector }} +{{ toYaml . | indent 8 }} +{{- end }} + affinity: +{{- with .Values.windows.affinity }} +{{ toYaml . | indent 8 }} +{{- end }} + nodeAffinity: +{{ toYaml .Values.windows.nodeAffinity | indent 10 }} + priorityClassName: system-node-critical + securityContext: + seccompProfile: + type: RuntimeDefault + {{- include "azuredisk.pullSecrets" . | indent 6 }} + containers: + - name: liveness-probe + volumeMounts: + - mountPath: C:\csi + name: plugin-dir +{{- if hasPrefix "/" .Values.image.livenessProbe.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- else }} + image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- end }} + args: + - "--csi-address=$(CSI_ENDPOINT)" + - "--probe-timeout=3s" + - "--health-port={{ .Values.node.livenessProbe.healthPort }}" + - "--v=2" + env: + - name: CSI_ENDPOINT + value: unix://C:\\csi\\csi.sock + imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }} + resources: {{- toYaml .Values.windows.resources.livenessProbe | nindent 12 }} + - name: node-driver-registrar +{{- if hasPrefix "/" .Values.image.nodeDriverRegistrar.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- else }} + image: "{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- end }} + args: + - "--v=2" + - "--csi-address=$(CSI_ENDPOINT)" + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" +{{- if .Values.windows.enableRegistrationProbe }} + livenessProbe: + exec: + command: + - /csi-node-driver-registrar.exe + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --mode=kubelet-registration-probe + initialDelaySeconds: 60 + timeoutSeconds: 30 +{{- end }} + env: + - name: CSI_ENDPOINT + value: unix://C:\\csi\\csi.sock + - name: DRIVER_REG_SOCK_PATH + value: C:\\var\\lib\\kubelet\\plugins\\{{ .Values.driver.name }}\\csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + imagePullPolicy: {{ .Values.image.nodeDriverRegistrar.pullPolicy }} + volumeMounts: + - name: kubelet-dir + mountPath: "C:\\var\\lib\\kubelet" + - name: plugin-dir + mountPath: C:\csi + - name: registration-dir + mountPath: C:\registration + resources: {{- toYaml .Values.windows.resources.nodeDriverRegistrar | nindent 12 }} + - name: azuredisk +{{- if hasPrefix "/" .Values.image.azuredisk.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- else }} + image: "{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- end }} + args: + - "--v={{ .Values.node.logLevel }}" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--drivername={{ .Values.driver.name }}" + - "--volume-attach-limit={{ .Values.driver.volumeAttachLimit }}" + - "--cloud-config-secret-name={{ .Values.node.cloudConfigSecretName }}" + - "--cloud-config-secret-namespace={{ .Values.node.cloudConfigSecretNamespace }}" + - "--custom-user-agent={{ .Values.driver.customUserAgent }}" + - "--user-agent-suffix={{ .Values.driver.userAgentSuffix }}" + - "--allow-empty-cloud-config={{ .Values.node.allowEmptyCloudConfig }}" + - "--support-zone={{ .Values.node.supportZone }}" + - "--get-node-info-from-labels={{ .Values.windows.getNodeInfoFromLabels }}" + - "--get-nodeid-from-imds={{ .Values.node.getNodeIDFromIMDS }}" + ports: + - containerPort: {{ .Values.node.livenessProbe.healthPort }} + name: healthz + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path-windows + optional: true + - name: CSI_ENDPOINT + value: unix://C:\\csi\\csi.sock + {{- if ne .Values.driver.httpsProxy "" }} + - name: HTTPS_PROXY + value: {{ .Values.driver.httpsProxy }} + {{- end }} + {{- if ne .Values.driver.httpProxy "" }} + - name: HTTP_PROXY + value: {{ .Values.driver.httpProxy }} + {{- end }} + {{- if ne .Values.driver.noProxy "" }} + - name: NO_PROXY + value: {{ .Values.driver.noProxy }} + {{- end }} + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: AZURE_GO_SDK_LOG_LEVEL + value: {{ .Values.driver.azureGoSDKLogLevel }} + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: AZURE_ENVIRONMENT_FILEPATH + value: C:\k\azurestackcloud.json + {{- end }} + imagePullPolicy: {{ .Values.image.azuredisk.pullPolicy }} + volumeMounts: + - name: kubelet-dir + mountPath: "C:\\var\\lib\\kubelet" + - name: plugin-dir + mountPath: C:\csi + - name: azure-config + mountPath: C:\k + - name: csi-proxy-fs-pipe-v1 + mountPath: \\.\pipe\csi-proxy-filesystem-v1 + - name: csi-proxy-disk-pipe-v1 + mountPath: \\.\pipe\csi-proxy-disk-v1 + - name: csi-proxy-volume-pipe-v1 + mountPath: \\.\pipe\csi-proxy-volume-v1 + # these paths are still included for compatibility, they're used + # only if the node has still the beta version of the CSI proxy + - name: csi-proxy-fs-pipe-v1beta1 + mountPath: \\.\pipe\csi-proxy-filesystem-v1beta1 + - name: csi-proxy-disk-pipe-v1beta2 + mountPath: \\.\pipe\csi-proxy-disk-v1beta2 + - name: csi-proxy-volume-pipe-v1beta2 + mountPath: \\.\pipe\csi-proxy-volume-v1beta2 + resources: {{- toYaml .Values.windows.resources.azuredisk | nindent 12 }} + volumes: + - name: csi-proxy-fs-pipe-v1 + hostPath: + path: \\.\pipe\csi-proxy-filesystem-v1 + - name: csi-proxy-disk-pipe-v1 + hostPath: + path: \\.\pipe\csi-proxy-disk-v1 + - name: csi-proxy-volume-pipe-v1 + hostPath: + path: \\.\pipe\csi-proxy-volume-v1 + # these paths are still included for compatibility, they're used + # only if the node has still the beta version of the CSI proxy + - name: csi-proxy-fs-pipe-v1beta1 + hostPath: + path: \\.\pipe\csi-proxy-filesystem-v1beta1 + - name: csi-proxy-disk-pipe-v1beta2 + hostPath: + path: \\.\pipe\csi-proxy-disk-v1beta2 + - name: csi-proxy-volume-pipe-v1beta2 + hostPath: + path: \\.\pipe\csi-proxy-volume-v1beta2 + - name: registration-dir + hostPath: + path: {{ .Values.windows.kubelet }}\plugins_registry\ + type: Directory + - name: kubelet-dir + hostPath: + path: {{ .Values.windows.kubelet }}\ + type: Directory + - name: plugin-dir + hostPath: + path: {{ .Values.windows.kubelet }}\plugins\{{ .Values.driver.name }}\ + type: DirectoryOrCreate + - name: azure-config + hostPath: + path: C:\k + type: DirectoryOrCreate +{{- end -}} diff --git a/charts/v1.28.5/azuredisk-csi-driver/templates/csi-azuredisk-node.yaml b/charts/v1.28.5/azuredisk-csi-driver/templates/csi-azuredisk-node.yaml new file mode 100644 index 000000000..86b829361 --- /dev/null +++ b/charts/v1.28.5/azuredisk-csi-driver/templates/csi-azuredisk-node.yaml @@ -0,0 +1,250 @@ +{{- if .Values.linux.enabled}} +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: {{ .Values.linux.dsName }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- with .Values.linux.labels }} +{{ . | toYaml | indent 4 }} +{{- end }} +{{- with .Values.linux.annotations }} + annotations: +{{ . | toYaml | indent 4 }} +{{- end }} +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: {{ .Values.node.maxUnavailable }} + type: RollingUpdate + selector: + matchLabels: + app: {{ .Values.linux.dsName }} + template: + metadata: +{{ include "azuredisk.labels" . | indent 6 }} + app: {{ .Values.linux.dsName }} + {{- if .Values.workloadIdentity.clientID }} + azure.workload.identity/use: "true" + {{- end }} +{{- with .Values.linux.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.linux.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + hostNetwork: {{ .Values.linux.hostNetwork }} + dnsPolicy: Default + serviceAccountName: {{ .Values.serviceAccount.node }} + nodeSelector: + kubernetes.io/os: linux +{{- with .Values.linux.nodeSelector }} +{{ toYaml . | indent 8 }} +{{- end }} + affinity: +{{- with .Values.linux.affinity }} +{{ toYaml . | indent 8 }} +{{- end }} + nodeAffinity: +{{ toYaml .Values.linux.nodeAffinity | indent 10 }} + priorityClassName: system-node-critical + securityContext: + seccompProfile: + type: RuntimeDefault +{{- with .Values.linux.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} + {{- include "azuredisk.pullSecrets" . | indent 6 }} + containers: + - name: liveness-probe + volumeMounts: + - mountPath: /csi + name: socket-dir +{{- if hasPrefix "/" .Values.image.livenessProbe.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- else }} + image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- end }} + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --health-port={{ .Values.node.livenessProbe.healthPort }} + - --v=2 + resources: {{- toYaml .Values.linux.resources.livenessProbe | nindent 12 }} + - name: node-driver-registrar +{{- if hasPrefix "/" .Values.image.nodeDriverRegistrar.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- else }} + image: "{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- end }} + args: + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --v=2 +{{- if .Values.linux.enableRegistrationProbe }} + livenessProbe: + exec: + command: + - /csi-node-driver-registrar + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --mode=kubelet-registration-probe + initialDelaySeconds: 30 + timeoutSeconds: 15 +{{- end }} + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: {{ .Values.linux.kubelet }}/plugins/{{ .Values.driver.name }}/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: registration-dir + mountPath: /registration + resources: {{- toYaml .Values.linux.resources.nodeDriverRegistrar | nindent 12 }} + - name: azuredisk +{{- if hasPrefix "/" .Values.image.azuredisk.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- else }} + image: "{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- end }} + args: + - "--v={{ .Values.node.logLevel }}" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--enable-perf-optimization={{ .Values.linux.enablePerfOptimization }}" + - "--drivername={{ .Values.driver.name }}" + - "--volume-attach-limit={{ .Values.driver.volumeAttachLimit }}" + - "--cloud-config-secret-name={{ .Values.node.cloudConfigSecretName }}" + - "--cloud-config-secret-namespace={{ .Values.node.cloudConfigSecretNamespace }}" + - "--custom-user-agent={{ .Values.driver.customUserAgent }}" + - "--user-agent-suffix={{ .Values.driver.userAgentSuffix }}" + - "--allow-empty-cloud-config={{ .Values.node.allowEmptyCloudConfig }}" + - "--support-zone={{ .Values.node.supportZone }}" + - "--get-node-info-from-labels={{ .Values.linux.getNodeInfoFromLabels }}" + - "--get-nodeid-from-imds={{ .Values.node.getNodeIDFromIMDS }}" + ports: + - containerPort: {{ .Values.node.livenessProbe.healthPort }} + name: healthz + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path + optional: true + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + {{- if ne .Values.driver.httpsProxy "" }} + - name: HTTPS_PROXY + value: {{ .Values.driver.httpsProxy }} + {{- end }} + {{- if ne .Values.driver.httpProxy "" }} + - name: HTTP_PROXY + value: {{ .Values.driver.httpProxy }} + {{- end }} + {{- if ne .Values.driver.noProxy "" }} + - name: NO_PROXY + value: {{ .Values.driver.noProxy }} + {{- end }} + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: AZURE_GO_SDK_LOG_LEVEL + value: {{ .Values.driver.azureGoSDKLogLevel }} + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: AZURE_ENVIRONMENT_FILEPATH + value: /etc/kubernetes/azurestackcloud.json + {{- end }} + imagePullPolicy: {{ .Values.image.azuredisk.pullPolicy }} + securityContext: + privileged: true + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: {{ .Values.linux.kubelet }}/ + mountPropagation: Bidirectional + name: mountpoint-dir + - mountPath: /etc/kubernetes/ + name: azure-cred + - mountPath: /dev + name: device-dir + - mountPath: /sys/bus/scsi/devices + name: sys-devices-dir + - mountPath: /sys/class/ + name: sys-class + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: ssl + mountPath: /etc/ssl/certs + readOnly: true + {{- end }} + {{- if eq .Values.linux.distro "fedora" }} + - name: ssl + mountPath: /etc/ssl/certs + readOnly: true + - name: ssl-pki + mountPath: /etc/pki/ca-trust/extracted + readOnly: true + {{- end }} + resources: {{- toYaml .Values.linux.resources.azuredisk | nindent 12 }} + volumes: + - hostPath: + path: {{ .Values.linux.kubelet }}/plugins/{{ .Values.driver.name }} + type: DirectoryOrCreate + name: socket-dir + - hostPath: + path: {{ .Values.linux.kubelet }}/ + type: DirectoryOrCreate + name: mountpoint-dir + - hostPath: + path: {{ .Values.linux.kubelet }}/plugins_registry/ + type: DirectoryOrCreate + name: registration-dir + - hostPath: + path: /etc/kubernetes/ + type: DirectoryOrCreate + name: azure-cred + - hostPath: + path: /dev + type: Directory + name: device-dir + - hostPath: + path: /sys/bus/scsi/devices + type: Directory + name: sys-devices-dir + - hostPath: + path: /sys/class/ + type: Directory + name: sys-class + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: ssl + hostPath: + path: /etc/ssl/certs + {{- end }} + {{- if eq .Values.linux.distro "fedora" }} + - name: ssl + hostPath: + path: /etc/ssl/certs + - name: ssl-pki + hostPath: + path: /etc/pki/ca-trust/extracted + {{- end }} +{{- end -}} diff --git a/charts/v1.28.5/azuredisk-csi-driver/templates/csi-snapshot-controller.yaml b/charts/v1.28.5/azuredisk-csi-driver/templates/csi-snapshot-controller.yaml new file mode 100644 index 000000000..431a01b15 --- /dev/null +++ b/charts/v1.28.5/azuredisk-csi-driver/templates/csi-snapshot-controller.yaml @@ -0,0 +1,94 @@ +{{- if .Values.snapshot.enabled -}} +kind: Deployment +apiVersion: apps/v1 +metadata: + name: {{ .Values.snapshot.snapshotController.name }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- with .Values.snapshot.snapshotController.labels }} +{{ . | toYaml | indent 4 }} +{{- end }} +{{- with .Values.snapshot.snapshotController.annotations }} + annotations: +{{ . | toYaml | indent 4 }} +{{- end }} +spec: + replicas: {{ .Values.snapshot.snapshotController.replicas }} + selector: + matchLabels: + app: {{ .Values.snapshot.snapshotController.name }} + # the snapshot controller won't be marked as ready if the v1 CRDs are unavailable + # in #504 the snapshot-controller will exit after around 7.5 seconds if it + # can't find the v1 CRDs so this value should be greater than that + minReadySeconds: 15 + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: +{{ include "azuredisk.labels" . | indent 6 }} + app: {{ .Values.snapshot.snapshotController.name }} +{{- with .Values.snapshot.snapshotController.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.snapshot.snapshotController.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + serviceAccountName: {{ .Values.serviceAccount.snapshotController }} + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + securityContext: + seccompProfile: + type: RuntimeDefault +{{- with .Values.controller.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.controller.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} + {{- include "azuredisk.pullSecrets" . | indent 6 }} + containers: + - name: {{ .Values.snapshot.snapshotController.name }} +{{- if hasPrefix "/" .Values.snapshot.image.csiSnapshotController.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.snapshot.image.csiSnapshotController.repository }}:{{ .Values.snapshot.image.csiSnapshotController.tag }}" +{{- else }} + image: "{{ .Values.snapshot.image.csiSnapshotController.repository }}:{{ .Values.snapshot.image.csiSnapshotController.tag }}" +{{- end }} + args: + - "--v=2" + - "--leader-election=true" + - "--leader-election-namespace={{ .Release.Namespace }}" + resources: {{- toYaml .Values.snapshot.snapshotController.resources | nindent 12 }} + imagePullPolicy: {{ .Values.snapshot.image.csiSnapshotController.pullPolicy }} + +--- +{{- if .Values.snapshot.VolumeSnapshotClass.enabled -}} +kind: VolumeSnapshotClass +apiVersion: snapshot.storage.k8s.io/v1 +metadata: + name: {{ .Values.snapshot.VolumeSnapshotClass.name }} + annotations: + # This is what defines this resource as a hook. Without this line, the + # job is considered part of the release. + "helm.sh/hook": post-install +driver: {{ .Values.driver.name }} +deletionPolicy: {{ .Values.snapshot.VolumeSnapshotClass.deletionPolicy }} +parameters: + incremental: {{ .Values.snapshot.VolumeSnapshotClass.parameters.incremental }} + {{- if ne .Values.snapshot.VolumeSnapshotClass.parameters.resourceGroup "" }} + resourceGroup: {{ .Values.snapshot.VolumeSnapshotClass.parameters.resourceGroup }} + {{- end }} + tags: {{ .Values.snapshot.VolumeSnapshotClass.parameters.tags }} +{{- with .Values.snapshot.VolumeSnapshotClass.additionalLabels }} +additionalLabels: +{{ toYaml . | indent 2 }} +{{- end }} +{{- end -}} +{{- end -}} diff --git a/charts/v1.28.5/azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml b/charts/v1.28.5/azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml new file mode 100644 index 000000000..b78940fc6 --- /dev/null +++ b/charts/v1.28.5/azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml @@ -0,0 +1,199 @@ +{{- if .Values.rbac.create -}} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-external-provisioner-role +{{ include "azuredisk.labels" . | indent 2 }} +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["get", "list"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-csi-provisioner-binding +{{ include "azuredisk.labels" . | indent 2 }} +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ .Values.rbac.name }}-external-provisioner-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-external-attacher-role +{{ include "azuredisk.labels" . | indent 2 }} +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["csi.storage.k8s.io"] + resources: ["csinodeinfos"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments/status"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-csi-attacher-binding +{{ include "azuredisk.labels" . | indent 2 }} +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ .Values.rbac.name }}-external-attacher-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-external-snapshotter-role +{{ include "azuredisk.labels" . | indent 2 }} +rules: + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-csi-snapshotter-binding +{{ include "azuredisk.labels" . | indent 2 }} +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ .Values.rbac.name }}-external-snapshotter-role + apiGroup: rbac.authorization.k8s.io + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-external-resizer-role +{{ include "azuredisk.labels" . | indent 2 }} +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-csi-resizer-role +{{ include "azuredisk.labels" . | indent 2 }} +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ .Values.rbac.name }}-external-resizer-role + apiGroup: rbac.authorization.k8s.io + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-{{ .Values.rbac.name }}-controller-secret-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-{{ .Values.rbac.name }}-controller-secret-binding +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: csi-{{ .Values.rbac.name }}-controller-secret-role + apiGroup: rbac.authorization.k8s.io +{{ end }} diff --git a/charts/v1.28.5/azuredisk-csi-driver/templates/rbac-csi-azuredisk-node.yaml b/charts/v1.28.5/azuredisk-csi-driver/templates/rbac-csi-azuredisk-node.yaml new file mode 100644 index 000000000..ae7e080af --- /dev/null +++ b/charts/v1.28.5/azuredisk-csi-driver/templates/rbac-csi-azuredisk-node.yaml @@ -0,0 +1,28 @@ +{{- if .Values.rbac.create -}} +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-{{ .Values.rbac.name }}-node-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-{{ .Values.rbac.name }}-node-secret-binding +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.node }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: csi-{{ .Values.rbac.name }}-node-role + apiGroup: rbac.authorization.k8s.io +{{ end }} diff --git a/charts/v1.28.5/azuredisk-csi-driver/templates/rbac-csi-snapshot-controller.yaml b/charts/v1.28.5/azuredisk-csi-driver/templates/rbac-csi-snapshot-controller.yaml new file mode 100644 index 000000000..8e4278b6f --- /dev/null +++ b/charts/v1.28.5/azuredisk-csi-driver/templates/rbac-csi-snapshot-controller.yaml @@ -0,0 +1,72 @@ +{{- if and .Values.snapshot.enabled .Values.rbac.create -}} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots/status"] + verbs: ["update", "patch"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-binding +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.snapshotController }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: csi-snapshot-controller-role + apiGroup: rbac.authorization.k8s.io + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-leaderelection-role +rules: + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-leaderelection-binding +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.snapshotController }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: csi-snapshot-controller-leaderelection-role + apiGroup: rbac.authorization.k8s.io +{{ end }} diff --git a/charts/v1.28.5/azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-controller.yaml b/charts/v1.28.5/azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-controller.yaml new file mode 100644 index 000000000..1806e9c58 --- /dev/null +++ b/charts/v1.28.5/azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-controller.yaml @@ -0,0 +1,16 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- if .Values.workloadIdentity.clientID }} + azure.workload.identity/use: "true" + annotations: + azure.workload.identity/client-id: {{ .Values.workloadIdentity.clientID }} +{{- if .Values.workloadIdentity.tenantID }} + azure.workload.identity/tenant-id: {{ .Values.workloadIdentity.tenantID }} +{{- end }} +{{- end }} +{{- end -}} diff --git a/charts/v1.28.5/azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-node.yaml b/charts/v1.28.5/azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-node.yaml new file mode 100644 index 000000000..dcc2ff60b --- /dev/null +++ b/charts/v1.28.5/azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-node.yaml @@ -0,0 +1,16 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.node }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- if .Values.workloadIdentity.clientID }} + azure.workload.identity/use: "true" + annotations: + azure.workload.identity/client-id: {{ .Values.workloadIdentity.clientID }} +{{- if .Values.workloadIdentity.tenantID }} + azure.workload.identity/tenant-id: {{ .Values.workloadIdentity.tenantID }} +{{- end }} +{{- end }} +{{- end -}} diff --git a/charts/v1.28.5/azuredisk-csi-driver/templates/serviceaccount-csi-snapshot-controller.yaml b/charts/v1.28.5/azuredisk-csi-driver/templates/serviceaccount-csi-snapshot-controller.yaml new file mode 100644 index 000000000..7cdaad0b4 --- /dev/null +++ b/charts/v1.28.5/azuredisk-csi-driver/templates/serviceaccount-csi-snapshot-controller.yaml @@ -0,0 +1,7 @@ +{{- if and .Values.snapshot.enabled .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.snapshotController }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/charts/v1.28.5/azuredisk-csi-driver/values.yaml b/charts/v1.28.5/azuredisk-csi-driver/values.yaml new file mode 100644 index 000000000..a47339eb0 --- /dev/null +++ b/charts/v1.28.5/azuredisk-csi-driver/values.yaml @@ -0,0 +1,275 @@ +image: + baseRepo: mcr.microsoft.com + azuredisk: + repository: /oss/kubernetes-csi/azuredisk-csi + tag: v1.28.5 + pullPolicy: IfNotPresent + csiProvisioner: + repository: /oss/kubernetes-csi/csi-provisioner + tag: v3.5.0 + pullPolicy: IfNotPresent + csiAttacher: + repository: /oss/kubernetes-csi/csi-attacher + tag: v4.3.0 + pullPolicy: IfNotPresent + csiResizer: + repository: /oss/kubernetes-csi/csi-resizer + tag: v1.8.0 + pullPolicy: IfNotPresent + livenessProbe: + repository: /oss/kubernetes-csi/livenessprobe + tag: v2.10.0 + pullPolicy: IfNotPresent + nodeDriverRegistrar: + repository: /oss/kubernetes-csi/csi-node-driver-registrar + tag: v2.8.0 + pullPolicy: IfNotPresent + +serviceAccount: + create: true # When true, service accounts will be created for you. Set to false if you want to use your own. + controller: csi-azuredisk-controller-sa # Name of Service Account to be created or used + node: csi-azuredisk-node-sa # Name of Service Account to be created or used + snapshotController: csi-snapshot-controller-sa # Name of Service Account to be created or used + +rbac: + create: true + name: azuredisk + +controller: + name: csi-azuredisk-controller + cloudConfigSecretName: azure-cloud-provider + cloudConfigSecretNamespace: kube-system + allowEmptyCloudConfig: false + enableTrafficManager: false + trafficManagerPort: 7788 + replicas: 2 + metricsPort: 29604 + livenessProbe: + healthPort: 29602 + runOnMaster: false + runOnControlPlane: false + disableAvailabilitySetNodes: false + vmType: "" + provisionerWorkerThreads: 100 + attacherWorkerThreads: 1000 + vmssCacheTTLInSeconds: -1 + logLevel: 5 + tolerations: + - key: "node-role.kubernetes.io/master" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/controlplane" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/control-plane" + operator: "Exists" + effect: "NoSchedule" + hostNetwork: true # this setting could be disabled if controller does not depend on MSI setting + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + nodeSelector: {} + affinity: {} + resources: + csiProvisioner: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + csiAttacher: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + csiResizer: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + csiSnapshotter: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 20Mi + livenessProbe: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + azuredisk: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + +node: + cloudConfigSecretName: azure-cloud-provider + cloudConfigSecretNamespace: kube-system + supportZone: true + allowEmptyCloudConfig: true + getNodeIDFromIMDS: false + maxUnavailable: 1 + logLevel: 5 + livenessProbe: + healthPort: 29603 + +snapshot: + enabled: false + name: csi-snapshot-controller + image: + csiSnapshotter: + repository: /oss/kubernetes-csi/csi-snapshotter + tag: v6.2.2 + pullPolicy: IfNotPresent + csiSnapshotController: + repository: /oss/kubernetes-csi/snapshot-controller + tag: v6.2.2 + pullPolicy: IfNotPresent + snapshotController: + name: csi-snapshot-controller + replicas: 2 + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + resources: + limits: + memory: 300Mi + requests: + cpu: 10m + memory: 20Mi + VolumeSnapshotClass: + enabled: false + name: csi-azuredisk-vsc + deletionPolicy: Delete + parameters: + incremental: '"true"' # available values: "true", "false" ("true" by default for Azure Public Cloud, and "false" by default for Azure Stack Cloud) + resourceGroup: "" # available values: EXISTING RESOURCE GROUP (If not specified, snapshot will be stored in the same resource group as source Azure disk) + tags: "" # tag format: 'key1=val1,key2=val2' + additionalLabels: {} + +feature: + enableFSGroupPolicy: true + +driver: + name: disk.csi.azure.com + # maximum number of attachable volumes per node, + # maximum number is defined according to node instance type by default(-1) + volumeAttachLimit: -1 + customUserAgent: "" + userAgentSuffix: "OSS-helm" + azureGoSDKLogLevel: "" # available values: ""(no logs), DEBUG, INFO, WARNING, ERROR + httpsProxy: "" + httpProxy: "" + noProxy: "" + +linux: + enabled: true + dsName: csi-azuredisk-node # daemonset name + kubelet: /var/lib/kubelet + distro: debian # available values: debian, fedora + enablePerfOptimization: true + enableRegistrationProbe: true + tolerations: + - operator: "Exists" + hostNetwork: true # this setting could be disabled if perfProfile is `none` + getNodeInfoFromLabels: false # get node info from node labels instead of IMDS + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + nodeSelector: {} + affinity: {} + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: type + operator: NotIn + values: + - virtual-kubelet + resources: + livenessProbe: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + nodeDriverRegistrar: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + azuredisk: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 20Mi + +windows: + enabled: true + useHostProcessContainers: false + dsName: csi-azuredisk-node-win # daemonset name + kubelet: 'C:\var\lib\kubelet' + getNodeInfoFromLabels: false # get node info from node labels instead of IMDS + enableRegistrationProbe: true + tolerations: + - key: "node.kubernetes.io/os" + operator: "Exists" + effect: "NoSchedule" + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + nodeSelector: {} + affinity: {} + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: type + operator: NotIn + values: + - virtual-kubelet + resources: + livenessProbe: + limits: + memory: 150Mi + requests: + cpu: 10m + memory: 40Mi + nodeDriverRegistrar: + limits: + memory: 150Mi + requests: + cpu: 30m + memory: 40Mi + azuredisk: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 40Mi + +cloud: AzurePublicCloud + +## Reference to one or more secrets to be used when pulling images +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ +## +imagePullSecrets: [] +# - name: "image-pull-secret" + +workloadIdentity: + clientID: "" + # [optional] If the AAD application or user-assigned managed identity is not in the same tenant as the cluster + # then set tenantID with the application or user-assigned managed identity tenant ID + tenantID: "" diff --git a/charts/v1.29.1/azuredisk-csi-driver-v1.29.1.tgz b/charts/v1.29.1/azuredisk-csi-driver-v1.29.1.tgz new file mode 100644 index 000000000..13fdc959d Binary files /dev/null and b/charts/v1.29.1/azuredisk-csi-driver-v1.29.1.tgz differ diff --git a/charts/v1.29.1/azuredisk-csi-driver/Chart.yaml b/charts/v1.29.1/azuredisk-csi-driver/Chart.yaml new file mode 100644 index 000000000..e7318516c --- /dev/null +++ b/charts/v1.29.1/azuredisk-csi-driver/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: v1.29.1 +description: Azure disk Container Storage Interface (CSI) Storage Plugin +name: azuredisk-csi-driver +version: v1.29.1 diff --git a/charts/v1.29.1/azuredisk-csi-driver/templates/NOTES.txt b/charts/v1.29.1/azuredisk-csi-driver/templates/NOTES.txt new file mode 100644 index 000000000..6a9695dad --- /dev/null +++ b/charts/v1.29.1/azuredisk-csi-driver/templates/NOTES.txt @@ -0,0 +1,5 @@ +The Azure Disk CSI Driver is getting deployed to your cluster. + +To check Azure Disk CSI Driver pods status, please run: + + kubectl --namespace={{ .Release.Namespace }} get pods --selector="release={{ .Release.Name }}" --watch diff --git a/charts/v1.29.1/azuredisk-csi-driver/templates/_helpers.tpl b/charts/v1.29.1/azuredisk-csi-driver/templates/_helpers.tpl new file mode 100644 index 000000000..51fcc8a09 --- /dev/null +++ b/charts/v1.29.1/azuredisk-csi-driver/templates/_helpers.tpl @@ -0,0 +1,26 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* Expand the name of the chart.*/}} +{{- define "azuredisk.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* labels for helm resources */}} +{{- define "azuredisk.labels" -}} +labels: + app.kubernetes.io/instance: "{{ .Release.Name }}" + app.kubernetes.io/managed-by: "{{ .Release.Service }}" + app.kubernetes.io/name: "{{ template "azuredisk.name" . }}" + app.kubernetes.io/version: "{{ .Chart.AppVersion }}" + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" +{{- end -}} + +{{/* pull secrets for containers */}} +{{- define "azuredisk.pullSecrets" -}} +{{- if .Values.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- end }} +{{- end -}} \ No newline at end of file diff --git a/charts/v1.29.1/azuredisk-csi-driver/templates/crd-csi-snapshot.yaml b/charts/v1.29.1/azuredisk-csi-driver/templates/crd-csi-snapshot.yaml new file mode 100644 index 000000000..76df8af7e --- /dev/null +++ b/charts/v1.29.1/azuredisk-csi-driver/templates/crd-csi-snapshot.yaml @@ -0,0 +1,840 @@ +{{- if .Values.snapshot.enabled -}} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/665" + creationTimestamp: null + name: volumesnapshots.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshot + listKind: VolumeSnapshotList + plural: volumesnapshots + shortNames: + - vs + singular: volumesnapshot + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: If a new snapshot needs to be created, this contains the name of + the source PVC from which this snapshot was (or will be) created. + jsonPath: .spec.source.persistentVolumeClaimName + name: SourcePVC + type: string + - description: If a snapshot already exists, this contains the name of the existing + VolumeSnapshotContent object representing the existing snapshot. + jsonPath: .spec.source.volumeSnapshotContentName + name: SourceSnapshotContent + type: string + - description: Represents the minimum size of volume required to rehydrate from + this snapshot. + jsonPath: .status.restoreSize + name: RestoreSize + type: string + - description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot. + jsonPath: .spec.volumeSnapshotClassName + name: SnapshotClass + type: string + - description: Name of the VolumeSnapshotContent object to which the VolumeSnapshot + object intends to bind to. Please note that verification of binding actually + requires checking both VolumeSnapshot and VolumeSnapshotContent to ensure + both are pointing at each other. Binding MUST be verified prior to usage of + this object. + jsonPath: .status.boundVolumeSnapshotContentName + name: SnapshotContent + type: string + - description: Timestamp when the point-in-time snapshot was taken by the underlying + storage system. + jsonPath: .status.creationTime + name: CreationTime + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshot is a user's request for either creating a point-in-time + snapshot of a persistent volume, or binding to a pre-existing snapshot. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: 'spec defines the desired characteristics of a snapshot requested + by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots + Required.' + properties: + source: + description: source specifies where a snapshot will be created from. + This field is immutable after creation. Required. + properties: + persistentVolumeClaimName: + description: persistentVolumeClaimName specifies the name of the + PersistentVolumeClaim object representing the volume from which + a snapshot should be created. This PVC is assumed to be in the + same namespace as the VolumeSnapshot object. This field should + be set if the snapshot does not exists, and needs to be created. + This field is immutable. + type: string + volumeSnapshotContentName: + description: volumeSnapshotContentName specifies the name of a + pre-existing VolumeSnapshotContent object representing an existing + volume snapshot. This field should be set if the snapshot already + exists and only needs a representation in Kubernetes. This field + is immutable. + type: string + type: object + oneOf: + - required: ["persistentVolumeClaimName"] + - required: ["volumeSnapshotContentName"] + volumeSnapshotClassName: + description: 'VolumeSnapshotClassName is the name of the VolumeSnapshotClass + requested by the VolumeSnapshot. VolumeSnapshotClassName may be + left nil to indicate that the default SnapshotClass should be used. + A given cluster may have multiple default Volume SnapshotClasses: + one default per CSI Driver. If a VolumeSnapshot does not specify + a SnapshotClass, VolumeSnapshotSource will be checked to figure + out what the associated CSI Driver is, and the default VolumeSnapshotClass + associated with that CSI Driver will be used. If more than one VolumeSnapshotClass + exist for a given CSI Driver and more than one have been marked + as default, CreateSnapshot will fail and generate an event. Empty + string is not allowed for this field.' + type: string + required: + - source + type: object + status: + description: status represents the current information of a snapshot. + Consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent + objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent + point at each other) before using this object. + properties: + boundVolumeSnapshotContentName: + description: 'boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent + object to which this VolumeSnapshot object intends to bind to. If + not specified, it indicates that the VolumeSnapshot object has not + been successfully bound to a VolumeSnapshotContent object yet. NOTE: + To avoid possible security issues, consumers must verify binding + between VolumeSnapshot and VolumeSnapshotContent objects is successful + (by validating that both VolumeSnapshot and VolumeSnapshotContent + point at each other) before using this object.' + type: string + creationTime: + description: creationTime is the timestamp when the point-in-time + snapshot is taken by the underlying storage system. In dynamic snapshot + creation case, this field will be filled in by the snapshot controller + with the "creation_time" value returned from CSI "CreateSnapshot" + gRPC call. For a pre-existing snapshot, this field will be filled + with the "creation_time" value returned from the CSI "ListSnapshots" + gRPC call if the driver supports it. If not specified, it may indicate + that the creation time of the snapshot is unknown. + format: date-time + type: string + error: + description: error is the last observed error during snapshot creation, + if any. This field could be helpful to upper level controllers(i.e., + application controller) to decide whether they should continue on + waiting for the snapshot to be created based on the type of error + reported. The snapshot controller will keep retrying when an error + occurs during the snapshot creation. Upon success, this error field + will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error + during snapshot creation if specified. NOTE: message may be + logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if the snapshot is ready to be used + to restore a volume. In dynamic snapshot creation case, this field + will be filled in by the snapshot controller with the "ready_to_use" + value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing + snapshot, this field will be filled with the "ready_to_use" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it, otherwise, this field will be set to "True". If not specified, + it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + type: string + description: restoreSize represents the minimum size of volume required + to create a volume from this snapshot. In dynamic snapshot creation + case, this field will be filled in by the snapshot controller with + the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. + For a pre-existing snapshot, this field will be filled with the + "size_bytes" value returned from the CSI "ListSnapshots" gRPC call + if the driver supports it. When restoring a volume from this snapshot, + the size of the volume MUST NOT be smaller than the restoreSize + if it is specified, otherwise the restoration will fail. If not + specified, it indicates that the size is unknown. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: If a new snapshot needs to be created, this contains the name of the source PVC from which this snapshot was (or will be) created. + jsonPath: .spec.source.persistentVolumeClaimName + name: SourcePVC + type: string + - description: If a snapshot already exists, this contains the name of the existing VolumeSnapshotContent object representing the existing snapshot. + jsonPath: .spec.source.volumeSnapshotContentName + name: SourceSnapshotContent + type: string + - description: Represents the minimum size of volume required to rehydrate from this snapshot. + jsonPath: .status.restoreSize + name: RestoreSize + type: string + - description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot. + jsonPath: .spec.volumeSnapshotClassName + name: SnapshotClass + type: string + - description: Name of the VolumeSnapshotContent object to which the VolumeSnapshot object intends to bind to. Please note that verification of binding actually requires checking both VolumeSnapshot and VolumeSnapshotContent to ensure both are pointing at each other. Binding MUST be verified prior to usage of this object. + jsonPath: .status.boundVolumeSnapshotContentName + name: SnapshotContent + type: string + - description: Timestamp when the point-in-time snapshot was taken by the underlying storage system. + jsonPath: .status.creationTime + name: CreationTime + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshot is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshot" + schema: + openAPIV3Schema: + description: VolumeSnapshot is a user's request for either creating a point-in-time snapshot of a persistent volume, or binding to a pre-existing snapshot. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: 'spec defines the desired characteristics of a snapshot requested by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots Required.' + properties: + source: + description: source specifies where a snapshot will be created from. This field is immutable after creation. Required. + properties: + persistentVolumeClaimName: + description: persistentVolumeClaimName specifies the name of the PersistentVolumeClaim object representing the volume from which a snapshot should be created. This PVC is assumed to be in the same namespace as the VolumeSnapshot object. This field should be set if the snapshot does not exists, and needs to be created. This field is immutable. + type: string + volumeSnapshotContentName: + description: volumeSnapshotContentName specifies the name of a pre-existing VolumeSnapshotContent object representing an existing volume snapshot. This field should be set if the snapshot already exists and only needs a representation in Kubernetes. This field is immutable. + type: string + type: object + volumeSnapshotClassName: + description: 'VolumeSnapshotClassName is the name of the VolumeSnapshotClass requested by the VolumeSnapshot. VolumeSnapshotClassName may be left nil to indicate that the default SnapshotClass should be used. A given cluster may have multiple default Volume SnapshotClasses: one default per CSI Driver. If a VolumeSnapshot does not specify a SnapshotClass, VolumeSnapshotSource will be checked to figure out what the associated CSI Driver is, and the default VolumeSnapshotClass associated with that CSI Driver will be used. If more than one VolumeSnapshotClass exist for a given CSI Driver and more than one have been marked as default, CreateSnapshot will fail and generate an event. Empty string is not allowed for this field.' + type: string + required: + - source + type: object + status: + description: status represents the current information of a snapshot. Consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object. + properties: + boundVolumeSnapshotContentName: + description: 'boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent object to which this VolumeSnapshot object intends to bind to. If not specified, it indicates that the VolumeSnapshot object has not been successfully bound to a VolumeSnapshotContent object yet. NOTE: To avoid possible security issues, consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object.' + type: string + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it may indicate that the creation time of the snapshot is unknown. + format: date-time + type: string + error: + description: error is the last observed error during snapshot creation, if any. This field could be helpful to upper level controllers(i.e., application controller) to decide whether they should continue on waiting for the snapshot to be created based on the type of error reported. The snapshot controller will keep retrying when an error occurs during the snapshot creation. Upon success, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if the snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + type: string + description: restoreSize represents the minimum size of volume required to create a volume from this snapshot. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + required: + - spec + type: object + served: false + storage: false + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/665" + creationTimestamp: null + name: volumesnapshotclasses.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotClass + listKind: VolumeSnapshotClassList + plural: volumesnapshotclasses + shortNames: + - vsclass + - vsclasses + singular: volumesnapshotclass + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .driver + name: Driver + type: string + - description: Determines whether a VolumeSnapshotContent created through the + VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .deletionPolicy + name: DeletionPolicy + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshotClass specifies parameters that a underlying storage + system uses when creating a volume snapshot. A specific VolumeSnapshotClass + is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses + are non-namespaced + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + deletionPolicy: + description: deletionPolicy determines whether a VolumeSnapshotContent + created through the VolumeSnapshotClass should be deleted when its bound + VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". + "Retain" means that the VolumeSnapshotContent and its physical snapshot + on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent + and its physical snapshot on underlying storage system are deleted. + Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the storage driver that handles this + VolumeSnapshotClass. Required. + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + parameters: + additionalProperties: + type: string + description: parameters is a key-value map with storage driver specific + parameters for creating snapshots. These values are opaque to Kubernetes. + type: object + required: + - deletionPolicy + - driver + type: object + served: true + storage: true + subresources: {} + - additionalPrinterColumns: + - jsonPath: .driver + name: Driver + type: string + - description: Determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .deletionPolicy + name: DeletionPolicy + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshotClass is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshotClass" + schema: + openAPIV3Schema: + description: VolumeSnapshotClass specifies parameters that a underlying storage system uses when creating a volume snapshot. A specific VolumeSnapshotClass is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses are non-namespaced + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + deletionPolicy: + description: deletionPolicy determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the storage driver that handles this VolumeSnapshotClass. Required. + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + parameters: + additionalProperties: + type: string + description: parameters is a key-value map with storage driver specific parameters for creating snapshots. These values are opaque to Kubernetes. + type: object + required: + - deletionPolicy + - driver + type: object + served: false + storage: false + subresources: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/665" + creationTimestamp: null + name: volumesnapshotcontents.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotContent + listKind: VolumeSnapshotContentList + plural: volumesnapshotcontents + shortNames: + - vsc + - vscs + singular: volumesnapshotcontent + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: Represents the complete size of the snapshot in bytes + jsonPath: .status.restoreSize + name: RestoreSize + type: integer + - description: Determines whether this VolumeSnapshotContent and its physical + snapshot on the underlying storage system should be deleted when its bound + VolumeSnapshot is deleted. + jsonPath: .spec.deletionPolicy + name: DeletionPolicy + type: string + - description: Name of the CSI driver used to create the physical snapshot on + the underlying storage system. + jsonPath: .spec.driver + name: Driver + type: string + - description: Name of the VolumeSnapshotClass to which this snapshot belongs. + jsonPath: .spec.volumeSnapshotClassName + name: VolumeSnapshotClass + type: string + - description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent + object is bound. + jsonPath: .spec.volumeSnapshotRef.name + name: VolumeSnapshot + type: string + - description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.namespace + name: VolumeSnapshotNamespace + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshotContent represents the actual "on-disk" snapshot + object in the underlying storage system + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: spec defines properties of a VolumeSnapshotContent created + by the underlying storage system. Required. + properties: + deletionPolicy: + description: deletionPolicy determines whether this VolumeSnapshotContent + and its physical snapshot on the underlying storage system should + be deleted when its bound VolumeSnapshot is deleted. Supported values + are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent + and its physical snapshot on underlying storage system are kept. + "Delete" means that the VolumeSnapshotContent and its physical snapshot + on underlying storage system are deleted. For dynamically provisioned + snapshots, this field will automatically be filled in by the CSI + snapshotter sidecar with the "DeletionPolicy" field defined in the + corresponding VolumeSnapshotClass. For pre-existing snapshots, users + MUST specify this field when creating the VolumeSnapshotContent + object. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the CSI driver used to create the + physical snapshot on the underlying storage system. This MUST be + the same as the name returned by the CSI GetPluginName() call for + that driver. Required. + type: string + source: + description: source specifies whether the snapshot is (or should be) + dynamically provisioned or already exists, and just requires a Kubernetes + object representation. This field is immutable after creation. Required. + properties: + snapshotHandle: + description: snapshotHandle specifies the CSI "snapshot_id" of + a pre-existing snapshot on the underlying storage system for + which a Kubernetes object representation was (or should be) + created. This field is immutable. + type: string + volumeHandle: + description: volumeHandle specifies the CSI "volume_id" of the + volume from which a snapshot should be dynamically taken from. + This field is immutable. + type: string + type: object + oneOf: + - required: ["snapshotHandle"] + - required: ["volumeHandle"] + sourceVolumeMode: + description: SourceVolumeMode is the mode of the volume whose snapshot + is taken. Can be either “Filesystem” or “Block”. If not specified, + it indicates the source volume's mode is unknown. This field is + immutable. This field is an alpha field. + type: string + volumeSnapshotClassName: + description: name of the VolumeSnapshotClass from which this snapshot + was (or will be) created. Note that after provisioning, the VolumeSnapshotClass + may be deleted or recreated with different set of values, and as + such, should not be referenced post-snapshot creation. + type: string + volumeSnapshotRef: + description: volumeSnapshotRef specifies the VolumeSnapshot object + to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName + field must reference to this VolumeSnapshotContent's name for the + bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent + object, name and namespace of the VolumeSnapshot object MUST be + provided for binding to happen. This field is immutable after creation. + Required. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + required: + - deletionPolicy + - driver + - source + - volumeSnapshotRef + type: object + status: + description: status represents the current information of a snapshot. + properties: + creationTime: + description: creationTime is the timestamp when the point-in-time + snapshot is taken by the underlying storage system. In dynamic snapshot + creation case, this field will be filled in by the CSI snapshotter + sidecar with the "creation_time" value returned from CSI "CreateSnapshot" + gRPC call. For a pre-existing snapshot, this field will be filled + with the "creation_time" value returned from the CSI "ListSnapshots" + gRPC call if the driver supports it. If not specified, it indicates + the creation time is unknown. The format of this field is a Unix + nanoseconds time encoded as an int64. On Unix, the command `date + +%s%N` returns the current time in nanoseconds since 1970-01-01 + 00:00:00 UTC. + format: int64 + type: integer + error: + description: error is the last observed error during snapshot creation, + if any. Upon success after retry, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error + during snapshot creation if specified. NOTE: message may be + logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used + to restore a volume. In dynamic snapshot creation case, this field + will be filled in by the CSI snapshotter sidecar with the "ready_to_use" + value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing + snapshot, this field will be filled with the "ready_to_use" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it, otherwise, this field will be set to "True". If not specified, + it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + description: restoreSize represents the complete size of the snapshot + in bytes. In dynamic snapshot creation case, this field will be + filled in by the CSI snapshotter sidecar with the "size_bytes" value + returned from CSI "CreateSnapshot" gRPC call. For a pre-existing + snapshot, this field will be filled with the "size_bytes" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it. When restoring a volume from this snapshot, the size of the + volume MUST NOT be smaller than the restoreSize if it is specified, + otherwise the restoration will fail. If not specified, it indicates + that the size is unknown. + format: int64 + minimum: 0 + type: integer + snapshotHandle: + description: snapshotHandle is the CSI "snapshot_id" of a snapshot + on the underlying storage system. If not specified, it indicates + that dynamic snapshot creation has either failed or it is still + in progress. + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: Represents the complete size of the snapshot in bytes + jsonPath: .status.restoreSize + name: RestoreSize + type: integer + - description: Determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .spec.deletionPolicy + name: DeletionPolicy + type: string + - description: Name of the CSI driver used to create the physical snapshot on the underlying storage system. + jsonPath: .spec.driver + name: Driver + type: string + - description: Name of the VolumeSnapshotClass to which this snapshot belongs. + jsonPath: .spec.volumeSnapshotClassName + name: VolumeSnapshotClass + type: string + - description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.name + name: VolumeSnapshot + type: string + - description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.namespace + name: VolumeSnapshotNamespace + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshotContent is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshotContent" + schema: + openAPIV3Schema: + description: VolumeSnapshotContent represents the actual "on-disk" snapshot object in the underlying storage system + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: spec defines properties of a VolumeSnapshotContent created by the underlying storage system. Required. + properties: + deletionPolicy: + description: deletionPolicy determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. For dynamically provisioned snapshots, this field will automatically be filled in by the CSI snapshotter sidecar with the "DeletionPolicy" field defined in the corresponding VolumeSnapshotClass. For pre-existing snapshots, users MUST specify this field when creating the VolumeSnapshotContent object. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the CSI driver used to create the physical snapshot on the underlying storage system. This MUST be the same as the name returned by the CSI GetPluginName() call for that driver. Required. + type: string + source: + description: source specifies whether the snapshot is (or should be) dynamically provisioned or already exists, and just requires a Kubernetes object representation. This field is immutable after creation. Required. + properties: + snapshotHandle: + description: snapshotHandle specifies the CSI "snapshot_id" of a pre-existing snapshot on the underlying storage system for which a Kubernetes object representation was (or should be) created. This field is immutable. + type: string + volumeHandle: + description: volumeHandle specifies the CSI "volume_id" of the volume from which a snapshot should be dynamically taken from. This field is immutable. + type: string + type: object + volumeSnapshotClassName: + description: name of the VolumeSnapshotClass from which this snapshot was (or will be) created. Note that after provisioning, the VolumeSnapshotClass may be deleted or recreated with different set of values, and as such, should not be referenced post-snapshot creation. + type: string + volumeSnapshotRef: + description: volumeSnapshotRef specifies the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName field must reference to this VolumeSnapshotContent's name for the bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent object, name and namespace of the VolumeSnapshot object MUST be provided for binding to happen. This field is immutable after creation. Required. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + required: + - deletionPolicy + - driver + - source + - volumeSnapshotRef + type: object + status: + description: status represents the current information of a snapshot. + properties: + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it indicates the creation time is unknown. The format of this field is a Unix nanoseconds time encoded as an int64. On Unix, the command `date +%s%N` returns the current time in nanoseconds since 1970-01-01 00:00:00 UTC. + format: int64 + type: integer + error: + description: error is the last observed error during snapshot creation, if any. Upon success after retry, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + description: restoreSize represents the complete size of the snapshot in bytes. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown. + format: int64 + minimum: 0 + type: integer + snapshotHandle: + description: snapshotHandle is the CSI "snapshot_id" of a snapshot on the underlying storage system. If not specified, it indicates that dynamic snapshot creation has either failed or it is still in progress. + type: string + type: object + required: + - spec + type: object + served: false + storage: false + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +{{- end -}} diff --git a/charts/v1.29.1/azuredisk-csi-driver/templates/csi-azuredisk-controller.yaml b/charts/v1.29.1/azuredisk-csi-driver/templates/csi-azuredisk-controller.yaml new file mode 100644 index 000000000..f23ec8627 --- /dev/null +++ b/charts/v1.29.1/azuredisk-csi-driver/templates/csi-azuredisk-controller.yaml @@ -0,0 +1,288 @@ +kind: Deployment +apiVersion: apps/v1 +metadata: + name: {{ .Values.controller.name }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- with .Values.controller.labels }} +{{ . | toYaml | indent 4 }} +{{- end }} +{{- with .Values.controller.annotations }} + annotations: +{{ . | toYaml | indent 4 }} +{{- end }} +spec: + replicas: {{ .Values.controller.replicas }} + selector: + matchLabels: + app: {{ .Values.controller.name }} + template: + metadata: +{{ include "azuredisk.labels" . | indent 6 }} + app: {{ .Values.controller.name }} + {{- if .Values.workloadIdentity.clientID }} + azure.workload.identity/use: "true" + {{- end }} +{{- with .Values.controller.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.controller.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + hostNetwork: {{ .Values.controller.hostNetwork }} + serviceAccountName: {{ .Values.serviceAccount.controller }} + nodeSelector: + kubernetes.io/os: linux +{{- with .Values.controller.nodeSelector }} +{{ toYaml . | indent 8 }} +{{- end }} + {{- if .Values.controller.runOnMaster}} + node-role.kubernetes.io/master: "" + {{- end}} + {{- if .Values.controller.runOnControlPlane}} + node-role.kubernetes.io/control-plane: "" + {{- end}} + priorityClassName: system-cluster-critical + securityContext: + seccompProfile: + type: RuntimeDefault +{{- with .Values.controller.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.controller.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} + {{- include "azuredisk.pullSecrets" . | indent 6 }} + containers: + - name: csi-provisioner +{{- if hasPrefix "/" .Values.image.csiProvisioner.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.csiProvisioner.repository }}:{{ .Values.image.csiProvisioner.tag }}" +{{- else }} + image: "{{ .Values.image.csiProvisioner.repository }}:{{ .Values.image.csiProvisioner.tag }}" +{{- end }} + args: + - "--feature-gates=Topology=true,HonorPVReclaimPolicy=true" + - "--csi-address=$(ADDRESS)" + - "--v=2" + - "--timeout=30s" + - "--leader-election" + - "--leader-election-namespace={{ .Release.Namespace }}" + - "--worker-threads={{ .Values.controller.provisionerWorkerThreads }}" + - "--extra-create-metadata=true" + - "--strict-topology=true" + - "--kube-api-qps=50" + - "--kube-api-burst=100" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + resources: {{- toYaml .Values.controller.resources.csiProvisioner | nindent 12 }} + - name: csi-attacher +{{- if hasPrefix "/" .Values.image.csiAttacher.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.csiAttacher.repository }}:{{ .Values.image.csiAttacher.tag }}" +{{- else }} + image: "{{ .Values.image.csiAttacher.repository }}:{{ .Values.image.csiAttacher.tag }}" +{{- end }} + args: + - "-v=2" + - "-csi-address=$(ADDRESS)" + - "-timeout=1200s" + - "-leader-election" + - "--leader-election-namespace={{ .Release.Namespace }}" + - "-worker-threads={{ .Values.controller.attacherWorkerThreads }}" + - "-kube-api-qps=200" + - "-kube-api-burst=400" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + resources: {{- toYaml .Values.controller.resources.csiAttacher | nindent 12 }} + - name: csi-snapshotter +{{- if hasPrefix "/" .Values.snapshot.image.csiSnapshotter.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.snapshot.image.csiSnapshotter.repository }}:{{ .Values.snapshot.image.csiSnapshotter.tag }}" +{{- else }} + image: "{{ .Values.snapshot.image.csiSnapshotter.repository }}:{{ .Values.snapshot.image.csiSnapshotter.tag }}" +{{- end }} + args: + - "-csi-address=$(ADDRESS)" + - "-leader-election" + - "--leader-election-namespace={{ .Release.Namespace }}" + - "-v=2" + - "--timeout=1200s" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: {{- toYaml .Values.controller.resources.csiSnapshotter | nindent 12 }} + - name: csi-resizer +{{- if hasPrefix "/" .Values.image.csiResizer.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.csiResizer.repository }}:{{ .Values.image.csiResizer.tag }}" +{{- else }} + image: "{{ .Values.image.csiResizer.repository }}:{{ .Values.image.csiResizer.tag }}" +{{- end }} + args: + - "-csi-address=$(ADDRESS)" + - "-v=2" + - "-leader-election" + - "--leader-election-namespace={{ .Release.Namespace }}" + - '-handle-volume-inuse-error=false' + - '-feature-gates=RecoverVolumeExpansionFailure=true' + - "-timeout=240s" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: {{- toYaml .Values.controller.resources.csiResizer | nindent 12 }} + - name: liveness-probe +{{- if hasPrefix "/" .Values.image.livenessProbe.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- else }} + image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- end }} + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --health-port={{ .Values.controller.livenessProbe.healthPort }} + - --v=2 + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: {{- toYaml .Values.controller.resources.livenessProbe | nindent 12 }} +{{- if eq .Values.controller.enableTrafficManager true }} + - image: mcr.microsoft.com/aks/ccp/ccp-auto-thrust:master.221118.2 + imagePullPolicy: IfNotPresent + name: proxy + command: + - /ccp-auto-thrust + args: + - "--port={{ .Values.controller.trafficManagerPort }}" + ports: + - containerPort: {{ .Values.controller.trafficManagerPort }} + protocol: TCP +{{- end }} + - name: azuredisk +{{- if hasPrefix "/" .Values.image.azuredisk.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- else }} + image: "{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- end }} + args: + - "--v={{ .Values.controller.logLevel }}" + - "--endpoint=$(CSI_ENDPOINT)" + - "--metrics-address=0.0.0.0:{{ .Values.controller.metricsPort }}" + - "--disable-avset-nodes={{ .Values.controller.disableAvailabilitySetNodes }}" + - "--vm-type={{ .Values.controller.vmType }}" + - "--drivername={{ .Values.driver.name }}" + - "--cloud-config-secret-name={{ .Values.controller.cloudConfigSecretName }}" + - "--cloud-config-secret-namespace={{ .Values.controller.cloudConfigSecretNamespace }}" + - "--custom-user-agent={{ .Values.driver.customUserAgent }}" + - "--user-agent-suffix={{ .Values.driver.userAgentSuffix }}" + - "--allow-empty-cloud-config={{ .Values.controller.allowEmptyCloudConfig }}" + - "--vmss-cache-ttl-seconds={{ .Values.controller.vmssCacheTTLInSeconds }}" + - "--enable-traffic-manager={{ .Values.controller.enableTrafficManager }}" + - "--traffic-manager-port={{ .Values.controller.trafficManagerPort }}" + - "--enable-otel-tracing={{ .Values.controller.otelTracing.enabled }}" + ports: + - containerPort: {{ .Values.controller.livenessProbe.healthPort }} + name: healthz + protocol: TCP + - containerPort: {{ .Values.controller.metricsPort }} + name: metrics + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: {{ .Values.azureCredentialFileConfigMap }} + key: path + optional: true + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + {{- if ne .Values.driver.httpsProxy "" }} + - name: HTTPS_PROXY + value: {{ .Values.driver.httpsProxy }} + {{- end }} + {{- if ne .Values.driver.httpProxy "" }} + - name: HTTP_PROXY + value: {{ .Values.driver.httpProxy }} + {{- end }} + {{- if ne .Values.driver.noProxy "" }} + - name: NO_PROXY + value: {{ .Values.driver.noProxy }} + {{- end }} + - name: AZURE_GO_SDK_LOG_LEVEL + value: {{ .Values.driver.azureGoSDKLogLevel }} + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: AZURE_ENVIRONMENT_FILEPATH + value: /etc/kubernetes/azurestackcloud.json + {{- end }} + {{- if .Values.controller.otelTracing.enabled }} + - name: OTEL_SERVICE_NAME + value: {{ .Values.controller.otelTracing.otelServiceName }} + - name: OTEL_EXPORTER_OTLP_ENDPOINT + value: {{ .Values.controller.otelTracing.otelExporterEndpoint }} + {{- end }} + imagePullPolicy: {{ .Values.image.azuredisk.pullPolicy }} + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /etc/kubernetes/ + name: azure-cred + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: ssl + mountPath: /etc/ssl/certs + readOnly: true + {{- end }} + {{- if eq .Values.linux.distro "fedora" }} + - name: ssl + mountPath: /etc/ssl/certs + readOnly: true + - name: ssl-pki + mountPath: /etc/pki/ca-trust/extracted + readOnly: true + {{- end }} + resources: {{- toYaml .Values.controller.resources.azuredisk | nindent 12 }} + volumes: + - name: socket-dir + emptyDir: {} + - name: azure-cred + hostPath: + path: /etc/kubernetes/ + type: DirectoryOrCreate + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: ssl + hostPath: + path: /etc/ssl/certs + {{- end }} + {{- if eq .Values.linux.distro "fedora" }} + - name: ssl + hostPath: + path: /etc/ssl/certs + - name: ssl-pki + hostPath: + path: /etc/pki/ca-trust/extracted + {{- end }} diff --git a/charts/v1.29.1/azuredisk-csi-driver/templates/csi-azuredisk-driver.yaml b/charts/v1.29.1/azuredisk-csi-driver/templates/csi-azuredisk-driver.yaml new file mode 100644 index 000000000..21f941748 --- /dev/null +++ b/charts/v1.29.1/azuredisk-csi-driver/templates/csi-azuredisk-driver.yaml @@ -0,0 +1,13 @@ +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: {{ .Values.driver.name }} + annotations: + csiDriver: "{{ .Values.image.azuredisk.tag }}" + snapshot: "{{ .Values.snapshot.image.csiSnapshotter.tag }}" +spec: + attachRequired: true + podInfoOnMount: false + {{- if .Values.feature.enableFSGroupPolicy}} + fsGroupPolicy: File + {{- end}} diff --git a/charts/v1.29.1/azuredisk-csi-driver/templates/csi-azuredisk-node-windows-hostprocess.yaml b/charts/v1.29.1/azuredisk-csi-driver/templates/csi-azuredisk-node-windows-hostprocess.yaml new file mode 100644 index 000000000..409231449 --- /dev/null +++ b/charts/v1.29.1/azuredisk-csi-driver/templates/csi-azuredisk-node-windows-hostprocess.yaml @@ -0,0 +1,161 @@ +{{- if and (.Values.windows.enabled) (eq .Values.windows.useHostProcessContainers true) }} +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: {{ .Values.windows.dsName }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- with .Values.windows.labels }} +{{ . | toYaml | indent 4 }} +{{- end }} +{{- with .Values.windows.annotations }} + annotations: +{{ . | toYaml | indent 4 }} +{{- end }} +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: {{ .Values.node.maxUnavailable }} + type: RollingUpdate + selector: + matchLabels: + app: {{ .Values.windows.dsName }} + template: + metadata: +{{ include "azuredisk.labels" . | indent 6 }} + app: {{ .Values.windows.dsName }} +{{- with .Values.windows.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.windows.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + serviceAccountName: {{ .Values.serviceAccount.node }} +{{- with .Values.windows.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} + nodeSelector: + kubernetes.io/os: windows +{{- with .Values.windows.nodeSelector }} +{{ toYaml . | indent 8 }} +{{- end }} + affinity: +{{- with .Values.windows.affinity }} +{{ toYaml . | indent 8 }} +{{- end }} + nodeAffinity: +{{ toYaml .Values.windows.nodeAffinity | indent 10 }} + priorityClassName: system-node-critical + {{- include "azuredisk.pullSecrets" . | indent 6 }} + securityContext: + seccompProfile: + type: RuntimeDefault + windowsOptions: + hostProcess: true + runAsUserName: "NT AUTHORITY\\SYSTEM" + hostNetwork: true + initContainers: + - name: init +{{- if hasPrefix "/" .Values.image.azuredisk.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}.1-windows-hp" +{{- else }} + image: "{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}.1-windows-hp" +{{- end }} + imagePullPolicy: {{ .Values.image.azuredisk.pullPolicy }} + command: + - "powershell.exe" + - "-c" + - "New-Item -ItemType Directory -Path C:\\var\\lib\\kubelet\\plugins\\{{ .Values.driver.name }}\\ -Force" + containers: + - name: node-driver-registrar +{{- if hasPrefix "/" .Values.image.nodeDriverRegistrar.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- else }} + image: "{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- end }} + command: + - "csi-node-driver-registrar.exe" + args: + - "--csi-address=$(CSI_ENDPOINT)" + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" + - "--plugin-registration-path=$(PLUGIN_REG_DIR)" + - "--v=2" + env: + - name: CSI_ENDPOINT + value: unix://{{ .Values.windows.kubelet }}\plugins\{{ .Values.driver.name }}\csi.sock + - name: DRIVER_REG_SOCK_PATH + value: C:\\var\\lib\\kubelet\\plugins\\{{ .Values.driver.name }}\\csi.sock + - name: PLUGIN_REG_DIR + value: C:\\var\\lib\\kubelet\\plugins_registry\\ + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + imagePullPolicy: {{ .Values.image.nodeDriverRegistrar.pullPolicy }} + resources: {{- toYaml .Values.windows.resources.nodeDriverRegistrar | nindent 12 }} + - name: azuredisk +{{- if hasPrefix "/" .Values.image.azuredisk.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}.1-windows-hp" +{{- else }} + image: "{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}.1-windows-hp" +{{- end }} + command: + - "azurediskplugin.exe" + args: + - "--v={{ .Values.node.logLevel }}" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--drivername={{ .Values.driver.name }}" + - "--volume-attach-limit={{ .Values.driver.volumeAttachLimit }}" + - "--cloud-config-secret-name={{ .Values.node.cloudConfigSecretName }}" + - "--cloud-config-secret-namespace={{ .Values.node.cloudConfigSecretNamespace }}" + - "--custom-user-agent={{ .Values.driver.customUserAgent }}" + - "--user-agent-suffix={{ .Values.driver.userAgentSuffix }}" + - "--allow-empty-cloud-config={{ .Values.node.allowEmptyCloudConfig }}" + - "--support-zone={{ .Values.node.supportZone }}" + - "--enable-windows-host-process=true" + - "--get-nodeid-from-imds={{ .Values.node.getNodeIDFromIMDS }}" + - "--enable-otel-tracing={{ .Values.windows.otelTracing.enabled }}" + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: {{ .Values.azureCredentialFileConfigMap }} + key: path-windows + optional: true + - name: CSI_ENDPOINT + value: unix://C:\\var\\lib\\kubelet\\plugins\\{{ .Values.driver.name }}\\csi.sock + {{- if ne .Values.driver.httpsProxy "" }} + - name: HTTPS_PROXY + value: {{ .Values.driver.httpsProxy }} + {{- end }} + {{- if ne .Values.driver.httpProxy "" }} + - name: HTTP_PROXY + value: {{ .Values.driver.httpProxy }} + {{- end }} + {{- if ne .Values.driver.noProxy "" }} + - name: NO_PROXY + value: {{ .Values.driver.noProxy }} + {{- end }} + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: AZURE_GO_SDK_LOG_LEVEL + value: {{ .Values.driver.azureGoSDKLogLevel }} + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: AZURE_ENVIRONMENT_FILEPATH + value: C:\k\azurestackcloud.json + {{- end }} + {{- if .Values.windows.otelTracing.enabled }} + - name: OTEL_SERVICE_NAME + value: {{ .Values.windows.otelTracing.otelServiceName }} + - name: OTEL_EXPORTER_OTLP_ENDPOINT + value: {{ .Values.windows.otelTracing.otelExporterEndpoint }} + {{- end }} + imagePullPolicy: {{ .Values.image.azuredisk.pullPolicy }} +{{- end -}} diff --git a/charts/v1.29.1/azuredisk-csi-driver/templates/csi-azuredisk-node-windows.yaml b/charts/v1.29.1/azuredisk-csi-driver/templates/csi-azuredisk-node-windows.yaml new file mode 100644 index 000000000..cb0e2c7cf --- /dev/null +++ b/charts/v1.29.1/azuredisk-csi-driver/templates/csi-azuredisk-node-windows.yaml @@ -0,0 +1,245 @@ +{{- if and (.Values.windows.enabled) (ne .Values.windows.useHostProcessContainers true) }} +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: {{ .Values.windows.dsName }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- with .Values.windows.labels }} +{{ . | toYaml | indent 4 }} +{{- end }} +{{- with .Values.windows.annotations }} + annotations: +{{ . | toYaml | indent 4 }} +{{- end }} +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: {{ .Values.node.maxUnavailable }} + type: RollingUpdate + selector: + matchLabels: + app: {{ .Values.windows.dsName }} + template: + metadata: +{{ include "azuredisk.labels" . | indent 6 }} + app: {{ .Values.windows.dsName }} +{{- with .Values.windows.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.windows.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + serviceAccountName: {{ .Values.serviceAccount.node }} +{{- with .Values.windows.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} + nodeSelector: + kubernetes.io/os: windows +{{- with .Values.windows.nodeSelector }} +{{ toYaml . | indent 8 }} +{{- end }} + affinity: +{{- with .Values.windows.affinity }} +{{ toYaml . | indent 8 }} +{{- end }} + nodeAffinity: +{{ toYaml .Values.windows.nodeAffinity | indent 10 }} + priorityClassName: system-node-critical + securityContext: + seccompProfile: + type: RuntimeDefault + {{- include "azuredisk.pullSecrets" . | indent 6 }} + containers: + - name: liveness-probe + volumeMounts: + - mountPath: C:\csi + name: plugin-dir +{{- if hasPrefix "/" .Values.image.livenessProbe.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- else }} + image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- end }} + args: + - "--csi-address=$(CSI_ENDPOINT)" + - "--probe-timeout=3s" + - "--health-port={{ .Values.node.livenessProbe.healthPort }}" + - "--v=2" + env: + - name: CSI_ENDPOINT + value: unix://C:\\csi\\csi.sock + imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }} + resources: {{- toYaml .Values.windows.resources.livenessProbe | nindent 12 }} + - name: node-driver-registrar +{{- if hasPrefix "/" .Values.image.nodeDriverRegistrar.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- else }} + image: "{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- end }} + args: + - "--v=2" + - "--csi-address=$(CSI_ENDPOINT)" + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" +{{- if .Values.windows.enableRegistrationProbe }} + livenessProbe: + exec: + command: + - /csi-node-driver-registrar.exe + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --mode=kubelet-registration-probe + initialDelaySeconds: 60 + timeoutSeconds: 30 +{{- end }} + env: + - name: CSI_ENDPOINT + value: unix://C:\\csi\\csi.sock + - name: DRIVER_REG_SOCK_PATH + value: C:\\var\\lib\\kubelet\\plugins\\{{ .Values.driver.name }}\\csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + imagePullPolicy: {{ .Values.image.nodeDriverRegistrar.pullPolicy }} + volumeMounts: + - name: kubelet-dir + mountPath: "C:\\var\\lib\\kubelet" + - name: plugin-dir + mountPath: C:\csi + - name: registration-dir + mountPath: C:\registration + resources: {{- toYaml .Values.windows.resources.nodeDriverRegistrar | nindent 12 }} + - name: azuredisk +{{- if hasPrefix "/" .Values.image.azuredisk.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- else }} + image: "{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- end }} + args: + - "--v={{ .Values.node.logLevel }}" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--drivername={{ .Values.driver.name }}" + - "--volume-attach-limit={{ .Values.driver.volumeAttachLimit }}" + - "--cloud-config-secret-name={{ .Values.node.cloudConfigSecretName }}" + - "--cloud-config-secret-namespace={{ .Values.node.cloudConfigSecretNamespace }}" + - "--custom-user-agent={{ .Values.driver.customUserAgent }}" + - "--user-agent-suffix={{ .Values.driver.userAgentSuffix }}" + - "--allow-empty-cloud-config={{ .Values.node.allowEmptyCloudConfig }}" + - "--support-zone={{ .Values.node.supportZone }}" + - "--get-node-info-from-labels={{ .Values.windows.getNodeInfoFromLabels }}" + - "--get-nodeid-from-imds={{ .Values.node.getNodeIDFromIMDS }}" + - "--enable-otel-tracing={{ .Values.windows.otelTracing.enabled }}" + ports: + - containerPort: {{ .Values.node.livenessProbe.healthPort }} + name: healthz + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: {{ .Values.azureCredentialFileConfigMap }} + key: path-windows + optional: true + - name: CSI_ENDPOINT + value: unix://C:\\csi\\csi.sock + {{- if ne .Values.driver.httpsProxy "" }} + - name: HTTPS_PROXY + value: {{ .Values.driver.httpsProxy }} + {{- end }} + {{- if ne .Values.driver.httpProxy "" }} + - name: HTTP_PROXY + value: {{ .Values.driver.httpProxy }} + {{- end }} + {{- if ne .Values.driver.noProxy "" }} + - name: NO_PROXY + value: {{ .Values.driver.noProxy }} + {{- end }} + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: AZURE_GO_SDK_LOG_LEVEL + value: {{ .Values.driver.azureGoSDKLogLevel }} + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: AZURE_ENVIRONMENT_FILEPATH + value: C:\k\azurestackcloud.json + {{- end }} + {{- if .Values.windows.otelTracing.enabled }} + - name: OTEL_SERVICE_NAME + value: {{ .Values.windows.otelTracing.otelServiceName }} + - name: OTEL_EXPORTER_OTLP_ENDPOINT + value: {{ .Values.windows.otelTracing.otelExporterEndpoint }} + {{- end }} + imagePullPolicy: {{ .Values.image.azuredisk.pullPolicy }} + volumeMounts: + - name: kubelet-dir + mountPath: "C:\\var\\lib\\kubelet" + - name: plugin-dir + mountPath: C:\csi + - name: azure-config + mountPath: C:\k + - name: csi-proxy-fs-pipe-v1 + mountPath: \\.\pipe\csi-proxy-filesystem-v1 + - name: csi-proxy-disk-pipe-v1 + mountPath: \\.\pipe\csi-proxy-disk-v1 + - name: csi-proxy-volume-pipe-v1 + mountPath: \\.\pipe\csi-proxy-volume-v1 + # these paths are still included for compatibility, they're used + # only if the node has still the beta version of the CSI proxy + - name: csi-proxy-fs-pipe-v1beta1 + mountPath: \\.\pipe\csi-proxy-filesystem-v1beta1 + - name: csi-proxy-disk-pipe-v1beta2 + mountPath: \\.\pipe\csi-proxy-disk-v1beta2 + - name: csi-proxy-volume-pipe-v1beta2 + mountPath: \\.\pipe\csi-proxy-volume-v1beta2 + resources: {{- toYaml .Values.windows.resources.azuredisk | nindent 12 }} + volumes: + - name: csi-proxy-fs-pipe-v1 + hostPath: + path: \\.\pipe\csi-proxy-filesystem-v1 + - name: csi-proxy-disk-pipe-v1 + hostPath: + path: \\.\pipe\csi-proxy-disk-v1 + - name: csi-proxy-volume-pipe-v1 + hostPath: + path: \\.\pipe\csi-proxy-volume-v1 + # these paths are still included for compatibility, they're used + # only if the node has still the beta version of the CSI proxy + - name: csi-proxy-fs-pipe-v1beta1 + hostPath: + path: \\.\pipe\csi-proxy-filesystem-v1beta1 + - name: csi-proxy-disk-pipe-v1beta2 + hostPath: + path: \\.\pipe\csi-proxy-disk-v1beta2 + - name: csi-proxy-volume-pipe-v1beta2 + hostPath: + path: \\.\pipe\csi-proxy-volume-v1beta2 + - name: registration-dir + hostPath: + path: {{ .Values.windows.kubelet }}\plugins_registry\ + type: Directory + - name: kubelet-dir + hostPath: + path: {{ .Values.windows.kubelet }}\ + type: Directory + - name: plugin-dir + hostPath: + path: {{ .Values.windows.kubelet }}\plugins\{{ .Values.driver.name }}\ + type: DirectoryOrCreate + - name: azure-config + hostPath: + path: C:\k + type: DirectoryOrCreate +{{- end -}} diff --git a/charts/v1.29.1/azuredisk-csi-driver/templates/csi-azuredisk-node.yaml b/charts/v1.29.1/azuredisk-csi-driver/templates/csi-azuredisk-node.yaml new file mode 100644 index 000000000..e9cab38ce --- /dev/null +++ b/charts/v1.29.1/azuredisk-csi-driver/templates/csi-azuredisk-node.yaml @@ -0,0 +1,257 @@ +{{- if .Values.linux.enabled}} +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: {{ .Values.linux.dsName }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- with .Values.linux.labels }} +{{ . | toYaml | indent 4 }} +{{- end }} +{{- with .Values.linux.annotations }} + annotations: +{{ . | toYaml | indent 4 }} +{{- end }} +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: {{ .Values.node.maxUnavailable }} + type: RollingUpdate + selector: + matchLabels: + app: {{ .Values.linux.dsName }} + template: + metadata: +{{ include "azuredisk.labels" . | indent 6 }} + app: {{ .Values.linux.dsName }} + {{- if .Values.workloadIdentity.clientID }} + azure.workload.identity/use: "true" + {{- end }} +{{- with .Values.linux.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.linux.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + hostNetwork: {{ .Values.linux.hostNetwork }} + dnsPolicy: Default + serviceAccountName: {{ .Values.serviceAccount.node }} + nodeSelector: + kubernetes.io/os: linux +{{- with .Values.linux.nodeSelector }} +{{ toYaml . | indent 8 }} +{{- end }} + affinity: +{{- with .Values.linux.affinity }} +{{ toYaml . | indent 8 }} +{{- end }} + nodeAffinity: +{{ toYaml .Values.linux.nodeAffinity | indent 10 }} + priorityClassName: system-node-critical + securityContext: + seccompProfile: + type: RuntimeDefault +{{- with .Values.linux.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} + {{- include "azuredisk.pullSecrets" . | indent 6 }} + containers: + - name: liveness-probe + volumeMounts: + - mountPath: /csi + name: socket-dir +{{- if hasPrefix "/" .Values.image.livenessProbe.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- else }} + image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- end }} + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --health-port={{ .Values.node.livenessProbe.healthPort }} + - --v=2 + resources: {{- toYaml .Values.linux.resources.livenessProbe | nindent 12 }} + - name: node-driver-registrar +{{- if hasPrefix "/" .Values.image.nodeDriverRegistrar.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- else }} + image: "{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- end }} + args: + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --v=2 +{{- if .Values.linux.enableRegistrationProbe }} + livenessProbe: + exec: + command: + - /csi-node-driver-registrar + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --mode=kubelet-registration-probe + initialDelaySeconds: 30 + timeoutSeconds: 15 +{{- end }} + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: {{ .Values.linux.kubelet }}/plugins/{{ .Values.driver.name }}/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: registration-dir + mountPath: /registration + resources: {{- toYaml .Values.linux.resources.nodeDriverRegistrar | nindent 12 }} + - name: azuredisk +{{- if hasPrefix "/" .Values.image.azuredisk.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- else }} + image: "{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- end }} + args: + - "--v={{ .Values.node.logLevel }}" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--enable-perf-optimization={{ .Values.linux.enablePerfOptimization }}" + - "--drivername={{ .Values.driver.name }}" + - "--volume-attach-limit={{ .Values.driver.volumeAttachLimit }}" + - "--cloud-config-secret-name={{ .Values.node.cloudConfigSecretName }}" + - "--cloud-config-secret-namespace={{ .Values.node.cloudConfigSecretNamespace }}" + - "--custom-user-agent={{ .Values.driver.customUserAgent }}" + - "--user-agent-suffix={{ .Values.driver.userAgentSuffix }}" + - "--allow-empty-cloud-config={{ .Values.node.allowEmptyCloudConfig }}" + - "--support-zone={{ .Values.node.supportZone }}" + - "--get-node-info-from-labels={{ .Values.linux.getNodeInfoFromLabels }}" + - "--get-nodeid-from-imds={{ .Values.node.getNodeIDFromIMDS }}" + - "--enable-otel-tracing={{ .Values.linux.otelTracing.enabled }}" + ports: + - containerPort: {{ .Values.node.livenessProbe.healthPort }} + name: healthz + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: {{ .Values.azureCredentialFileConfigMap }} + key: path + optional: true + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + {{- if ne .Values.driver.httpsProxy "" }} + - name: HTTPS_PROXY + value: {{ .Values.driver.httpsProxy }} + {{- end }} + {{- if ne .Values.driver.httpProxy "" }} + - name: HTTP_PROXY + value: {{ .Values.driver.httpProxy }} + {{- end }} + {{- if ne .Values.driver.noProxy "" }} + - name: NO_PROXY + value: {{ .Values.driver.noProxy }} + {{- end }} + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: AZURE_GO_SDK_LOG_LEVEL + value: {{ .Values.driver.azureGoSDKLogLevel }} + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: AZURE_ENVIRONMENT_FILEPATH + value: /etc/kubernetes/azurestackcloud.json + {{- end }} + {{- if .Values.linux.otelTracing.enabled }} + - name: OTEL_SERVICE_NAME + value: {{ .Values.linux.otelTracing.otelServiceName }} + - name: OTEL_EXPORTER_OTLP_ENDPOINT + value: {{ .Values.linux.otelTracing.otelExporterEndpoint }} + {{- end }} + imagePullPolicy: {{ .Values.image.azuredisk.pullPolicy }} + securityContext: + privileged: true + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: {{ .Values.linux.kubelet }}/ + mountPropagation: Bidirectional + name: mountpoint-dir + - mountPath: /etc/kubernetes/ + name: azure-cred + - mountPath: /dev + name: device-dir + - mountPath: /sys/bus/scsi/devices + name: sys-devices-dir + - mountPath: /sys/class/ + name: sys-class + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: ssl + mountPath: /etc/ssl/certs + readOnly: true + {{- end }} + {{- if eq .Values.linux.distro "fedora" }} + - name: ssl + mountPath: /etc/ssl/certs + readOnly: true + - name: ssl-pki + mountPath: /etc/pki/ca-trust/extracted + readOnly: true + {{- end }} + resources: {{- toYaml .Values.linux.resources.azuredisk | nindent 12 }} + volumes: + - hostPath: + path: {{ .Values.linux.kubelet }}/plugins/{{ .Values.driver.name }} + type: DirectoryOrCreate + name: socket-dir + - hostPath: + path: {{ .Values.linux.kubelet }}/ + type: DirectoryOrCreate + name: mountpoint-dir + - hostPath: + path: {{ .Values.linux.kubelet }}/plugins_registry/ + type: DirectoryOrCreate + name: registration-dir + - hostPath: + path: /etc/kubernetes/ + type: DirectoryOrCreate + name: azure-cred + - hostPath: + path: /dev + type: Directory + name: device-dir + - hostPath: + path: /sys/bus/scsi/devices + type: Directory + name: sys-devices-dir + - hostPath: + path: /sys/class/ + type: Directory + name: sys-class + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: ssl + hostPath: + path: /etc/ssl/certs + {{- end }} + {{- if eq .Values.linux.distro "fedora" }} + - name: ssl + hostPath: + path: /etc/ssl/certs + - name: ssl-pki + hostPath: + path: /etc/pki/ca-trust/extracted + {{- end }} +{{- end -}} diff --git a/charts/v1.29.1/azuredisk-csi-driver/templates/csi-snapshot-controller.yaml b/charts/v1.29.1/azuredisk-csi-driver/templates/csi-snapshot-controller.yaml new file mode 100644 index 000000000..431a01b15 --- /dev/null +++ b/charts/v1.29.1/azuredisk-csi-driver/templates/csi-snapshot-controller.yaml @@ -0,0 +1,94 @@ +{{- if .Values.snapshot.enabled -}} +kind: Deployment +apiVersion: apps/v1 +metadata: + name: {{ .Values.snapshot.snapshotController.name }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- with .Values.snapshot.snapshotController.labels }} +{{ . | toYaml | indent 4 }} +{{- end }} +{{- with .Values.snapshot.snapshotController.annotations }} + annotations: +{{ . | toYaml | indent 4 }} +{{- end }} +spec: + replicas: {{ .Values.snapshot.snapshotController.replicas }} + selector: + matchLabels: + app: {{ .Values.snapshot.snapshotController.name }} + # the snapshot controller won't be marked as ready if the v1 CRDs are unavailable + # in #504 the snapshot-controller will exit after around 7.5 seconds if it + # can't find the v1 CRDs so this value should be greater than that + minReadySeconds: 15 + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: +{{ include "azuredisk.labels" . | indent 6 }} + app: {{ .Values.snapshot.snapshotController.name }} +{{- with .Values.snapshot.snapshotController.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.snapshot.snapshotController.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + serviceAccountName: {{ .Values.serviceAccount.snapshotController }} + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + securityContext: + seccompProfile: + type: RuntimeDefault +{{- with .Values.controller.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.controller.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} + {{- include "azuredisk.pullSecrets" . | indent 6 }} + containers: + - name: {{ .Values.snapshot.snapshotController.name }} +{{- if hasPrefix "/" .Values.snapshot.image.csiSnapshotController.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.snapshot.image.csiSnapshotController.repository }}:{{ .Values.snapshot.image.csiSnapshotController.tag }}" +{{- else }} + image: "{{ .Values.snapshot.image.csiSnapshotController.repository }}:{{ .Values.snapshot.image.csiSnapshotController.tag }}" +{{- end }} + args: + - "--v=2" + - "--leader-election=true" + - "--leader-election-namespace={{ .Release.Namespace }}" + resources: {{- toYaml .Values.snapshot.snapshotController.resources | nindent 12 }} + imagePullPolicy: {{ .Values.snapshot.image.csiSnapshotController.pullPolicy }} + +--- +{{- if .Values.snapshot.VolumeSnapshotClass.enabled -}} +kind: VolumeSnapshotClass +apiVersion: snapshot.storage.k8s.io/v1 +metadata: + name: {{ .Values.snapshot.VolumeSnapshotClass.name }} + annotations: + # This is what defines this resource as a hook. Without this line, the + # job is considered part of the release. + "helm.sh/hook": post-install +driver: {{ .Values.driver.name }} +deletionPolicy: {{ .Values.snapshot.VolumeSnapshotClass.deletionPolicy }} +parameters: + incremental: {{ .Values.snapshot.VolumeSnapshotClass.parameters.incremental }} + {{- if ne .Values.snapshot.VolumeSnapshotClass.parameters.resourceGroup "" }} + resourceGroup: {{ .Values.snapshot.VolumeSnapshotClass.parameters.resourceGroup }} + {{- end }} + tags: {{ .Values.snapshot.VolumeSnapshotClass.parameters.tags }} +{{- with .Values.snapshot.VolumeSnapshotClass.additionalLabels }} +additionalLabels: +{{ toYaml . | indent 2 }} +{{- end }} +{{- end -}} +{{- end -}} diff --git a/charts/v1.29.1/azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml b/charts/v1.29.1/azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml new file mode 100644 index 000000000..b78940fc6 --- /dev/null +++ b/charts/v1.29.1/azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml @@ -0,0 +1,199 @@ +{{- if .Values.rbac.create -}} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-external-provisioner-role +{{ include "azuredisk.labels" . | indent 2 }} +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["get", "list"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-csi-provisioner-binding +{{ include "azuredisk.labels" . | indent 2 }} +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ .Values.rbac.name }}-external-provisioner-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-external-attacher-role +{{ include "azuredisk.labels" . | indent 2 }} +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["csi.storage.k8s.io"] + resources: ["csinodeinfos"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments/status"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-csi-attacher-binding +{{ include "azuredisk.labels" . | indent 2 }} +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ .Values.rbac.name }}-external-attacher-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-external-snapshotter-role +{{ include "azuredisk.labels" . | indent 2 }} +rules: + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-csi-snapshotter-binding +{{ include "azuredisk.labels" . | indent 2 }} +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ .Values.rbac.name }}-external-snapshotter-role + apiGroup: rbac.authorization.k8s.io + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-external-resizer-role +{{ include "azuredisk.labels" . | indent 2 }} +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-csi-resizer-role +{{ include "azuredisk.labels" . | indent 2 }} +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ .Values.rbac.name }}-external-resizer-role + apiGroup: rbac.authorization.k8s.io + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-{{ .Values.rbac.name }}-controller-secret-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-{{ .Values.rbac.name }}-controller-secret-binding +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: csi-{{ .Values.rbac.name }}-controller-secret-role + apiGroup: rbac.authorization.k8s.io +{{ end }} diff --git a/charts/v1.29.1/azuredisk-csi-driver/templates/rbac-csi-azuredisk-node.yaml b/charts/v1.29.1/azuredisk-csi-driver/templates/rbac-csi-azuredisk-node.yaml new file mode 100644 index 000000000..ae7e080af --- /dev/null +++ b/charts/v1.29.1/azuredisk-csi-driver/templates/rbac-csi-azuredisk-node.yaml @@ -0,0 +1,28 @@ +{{- if .Values.rbac.create -}} +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-{{ .Values.rbac.name }}-node-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-{{ .Values.rbac.name }}-node-secret-binding +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.node }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: csi-{{ .Values.rbac.name }}-node-role + apiGroup: rbac.authorization.k8s.io +{{ end }} diff --git a/charts/v1.29.1/azuredisk-csi-driver/templates/rbac-csi-snapshot-controller.yaml b/charts/v1.29.1/azuredisk-csi-driver/templates/rbac-csi-snapshot-controller.yaml new file mode 100644 index 000000000..8e4278b6f --- /dev/null +++ b/charts/v1.29.1/azuredisk-csi-driver/templates/rbac-csi-snapshot-controller.yaml @@ -0,0 +1,72 @@ +{{- if and .Values.snapshot.enabled .Values.rbac.create -}} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots/status"] + verbs: ["update", "patch"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-binding +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.snapshotController }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: csi-snapshot-controller-role + apiGroup: rbac.authorization.k8s.io + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-leaderelection-role +rules: + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-leaderelection-binding +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.snapshotController }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: csi-snapshot-controller-leaderelection-role + apiGroup: rbac.authorization.k8s.io +{{ end }} diff --git a/charts/v1.29.1/azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-controller.yaml b/charts/v1.29.1/azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-controller.yaml new file mode 100644 index 000000000..1806e9c58 --- /dev/null +++ b/charts/v1.29.1/azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-controller.yaml @@ -0,0 +1,16 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- if .Values.workloadIdentity.clientID }} + azure.workload.identity/use: "true" + annotations: + azure.workload.identity/client-id: {{ .Values.workloadIdentity.clientID }} +{{- if .Values.workloadIdentity.tenantID }} + azure.workload.identity/tenant-id: {{ .Values.workloadIdentity.tenantID }} +{{- end }} +{{- end }} +{{- end -}} diff --git a/charts/v1.29.1/azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-node.yaml b/charts/v1.29.1/azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-node.yaml new file mode 100644 index 000000000..dcc2ff60b --- /dev/null +++ b/charts/v1.29.1/azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-node.yaml @@ -0,0 +1,16 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.node }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- if .Values.workloadIdentity.clientID }} + azure.workload.identity/use: "true" + annotations: + azure.workload.identity/client-id: {{ .Values.workloadIdentity.clientID }} +{{- if .Values.workloadIdentity.tenantID }} + azure.workload.identity/tenant-id: {{ .Values.workloadIdentity.tenantID }} +{{- end }} +{{- end }} +{{- end -}} diff --git a/charts/v1.29.1/azuredisk-csi-driver/templates/serviceaccount-csi-snapshot-controller.yaml b/charts/v1.29.1/azuredisk-csi-driver/templates/serviceaccount-csi-snapshot-controller.yaml new file mode 100644 index 000000000..7cdaad0b4 --- /dev/null +++ b/charts/v1.29.1/azuredisk-csi-driver/templates/serviceaccount-csi-snapshot-controller.yaml @@ -0,0 +1,7 @@ +{{- if and .Values.snapshot.enabled .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.snapshotController }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/charts/v1.29.1/azuredisk-csi-driver/values.yaml b/charts/v1.29.1/azuredisk-csi-driver/values.yaml new file mode 100644 index 000000000..40f09e0a1 --- /dev/null +++ b/charts/v1.29.1/azuredisk-csi-driver/values.yaml @@ -0,0 +1,289 @@ +image: + baseRepo: mcr.microsoft.com + azuredisk: + repository: /oss/kubernetes-csi/azuredisk-csi + tag: v1.29.1 + pullPolicy: IfNotPresent + csiProvisioner: + repository: /oss/kubernetes-csi/csi-provisioner + tag: v3.5.0 + pullPolicy: IfNotPresent + csiAttacher: + repository: /oss/kubernetes-csi/csi-attacher + tag: v4.3.0 + pullPolicy: IfNotPresent + csiResizer: + repository: /oss/kubernetes-csi/csi-resizer + tag: v1.8.0 + pullPolicy: IfNotPresent + livenessProbe: + repository: /oss/kubernetes-csi/livenessprobe + tag: v2.10.0 + pullPolicy: IfNotPresent + nodeDriverRegistrar: + repository: /oss/kubernetes-csi/csi-node-driver-registrar + tag: v2.8.0 + pullPolicy: IfNotPresent + +serviceAccount: + create: true # When true, service accounts will be created for you. Set to false if you want to use your own. + controller: csi-azuredisk-controller-sa # Name of Service Account to be created or used + node: csi-azuredisk-node-sa # Name of Service Account to be created or used + snapshotController: csi-snapshot-controller-sa # Name of Service Account to be created or used + +rbac: + create: true + name: azuredisk + +controller: + name: csi-azuredisk-controller + cloudConfigSecretName: azure-cloud-provider + cloudConfigSecretNamespace: kube-system + allowEmptyCloudConfig: false + enableTrafficManager: false + trafficManagerPort: 7788 + replicas: 2 + metricsPort: 29604 + livenessProbe: + healthPort: 29602 + runOnMaster: false + runOnControlPlane: false + disableAvailabilitySetNodes: false + vmType: "" + provisionerWorkerThreads: 100 + attacherWorkerThreads: 1000 + vmssCacheTTLInSeconds: -1 + logLevel: 5 + otelTracing: + enabled: false + otelServiceName: csi-azuredisk-controller + otelExporterEndpoint: "http://localhost:4317" + tolerations: + - key: "node-role.kubernetes.io/master" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/controlplane" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/control-plane" + operator: "Exists" + effect: "NoSchedule" + hostNetwork: true # this setting could be disabled if controller does not depend on MSI setting + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + nodeSelector: {} + affinity: {} + resources: + csiProvisioner: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + csiAttacher: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + csiResizer: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + csiSnapshotter: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 20Mi + livenessProbe: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + azuredisk: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + +node: + cloudConfigSecretName: azure-cloud-provider + cloudConfigSecretNamespace: kube-system + supportZone: true + allowEmptyCloudConfig: true + getNodeIDFromIMDS: false + maxUnavailable: 1 + logLevel: 5 + livenessProbe: + healthPort: 29603 + +snapshot: + enabled: false + name: csi-snapshot-controller + image: + csiSnapshotter: + repository: /oss/kubernetes-csi/csi-snapshotter + tag: v6.2.2 + pullPolicy: IfNotPresent + csiSnapshotController: + repository: /oss/kubernetes-csi/snapshot-controller + tag: v6.2.2 + pullPolicy: IfNotPresent + snapshotController: + name: csi-snapshot-controller + replicas: 2 + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + resources: + limits: + memory: 300Mi + requests: + cpu: 10m + memory: 20Mi + VolumeSnapshotClass: + enabled: false + name: csi-azuredisk-vsc + deletionPolicy: Delete + parameters: + incremental: '"true"' # available values: "true", "false" ("true" by default for Azure Public Cloud, and "false" by default for Azure Stack Cloud) + resourceGroup: "" # available values: EXISTING RESOURCE GROUP (If not specified, snapshot will be stored in the same resource group as source Azure disk) + tags: "" # tag format: 'key1=val1,key2=val2' + additionalLabels: {} + +feature: + enableFSGroupPolicy: true + +driver: + name: disk.csi.azure.com + # maximum number of attachable volumes per node, + # maximum number is defined according to node instance type by default(-1) + volumeAttachLimit: -1 + customUserAgent: "" + userAgentSuffix: "OSS-helm" + azureGoSDKLogLevel: "" # available values: ""(no logs), DEBUG, INFO, WARNING, ERROR + httpsProxy: "" + httpProxy: "" + noProxy: "" + +linux: + enabled: true + dsName: csi-azuredisk-node # daemonset name + kubelet: /var/lib/kubelet + distro: debian # available values: debian, fedora + enablePerfOptimization: true + enableRegistrationProbe: true + otelTracing: + enabled: false + otelServiceName: csi-azuredisk-node + otelExporterEndpoint: "http://localhost:4317" + tolerations: + - operator: "Exists" + hostNetwork: true # this setting could be disabled if perfProfile is `none` + getNodeInfoFromLabels: false # get node info from node labels instead of IMDS + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + nodeSelector: {} + affinity: {} + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: type + operator: NotIn + values: + - virtual-kubelet + resources: + livenessProbe: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + nodeDriverRegistrar: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + azuredisk: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 20Mi + +windows: + enabled: true + useHostProcessContainers: false + dsName: csi-azuredisk-node-win # daemonset name + kubelet: 'C:\var\lib\kubelet' + getNodeInfoFromLabels: false # get node info from node labels instead of IMDS + enableRegistrationProbe: true + otelTracing: + enabled: false + otelServiceName: csi-azuredisk-node-win + otelExporterEndpoint: "http://localhost:4317" + tolerations: + - key: "node.kubernetes.io/os" + operator: "Exists" + effect: "NoSchedule" + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + nodeSelector: {} + affinity: {} + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: type + operator: NotIn + values: + - virtual-kubelet + resources: + livenessProbe: + limits: + memory: 150Mi + requests: + cpu: 10m + memory: 40Mi + nodeDriverRegistrar: + limits: + memory: 150Mi + requests: + cpu: 30m + memory: 40Mi + azuredisk: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 40Mi + +cloud: AzurePublicCloud + +## Reference to one or more secrets to be used when pulling images +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ +## +imagePullSecrets: [] +# - name: "image-pull-secret" + +workloadIdentity: + clientID: "" + # [optional] If the AAD application or user-assigned managed identity is not in the same tenant as the cluster + # then set tenantID with the application or user-assigned managed identity tenant ID + tenantID: "" + +azureCredentialFileConfigMap: azure-cred-file diff --git a/charts/v1.29.2/azuredisk-csi-driver-v1.29.2.tgz b/charts/v1.29.2/azuredisk-csi-driver-v1.29.2.tgz new file mode 100644 index 000000000..10ec691ca Binary files /dev/null and b/charts/v1.29.2/azuredisk-csi-driver-v1.29.2.tgz differ diff --git a/charts/v1.29.2/azuredisk-csi-driver/Chart.yaml b/charts/v1.29.2/azuredisk-csi-driver/Chart.yaml new file mode 100644 index 000000000..70f7cf331 --- /dev/null +++ b/charts/v1.29.2/azuredisk-csi-driver/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: v1.29.2 +description: Azure disk Container Storage Interface (CSI) Storage Plugin +name: azuredisk-csi-driver +version: v1.29.2 diff --git a/charts/v1.29.2/azuredisk-csi-driver/templates/NOTES.txt b/charts/v1.29.2/azuredisk-csi-driver/templates/NOTES.txt new file mode 100644 index 000000000..6a9695dad --- /dev/null +++ b/charts/v1.29.2/azuredisk-csi-driver/templates/NOTES.txt @@ -0,0 +1,5 @@ +The Azure Disk CSI Driver is getting deployed to your cluster. + +To check Azure Disk CSI Driver pods status, please run: + + kubectl --namespace={{ .Release.Namespace }} get pods --selector="release={{ .Release.Name }}" --watch diff --git a/charts/v1.29.2/azuredisk-csi-driver/templates/_helpers.tpl b/charts/v1.29.2/azuredisk-csi-driver/templates/_helpers.tpl new file mode 100644 index 000000000..51fcc8a09 --- /dev/null +++ b/charts/v1.29.2/azuredisk-csi-driver/templates/_helpers.tpl @@ -0,0 +1,26 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* Expand the name of the chart.*/}} +{{- define "azuredisk.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* labels for helm resources */}} +{{- define "azuredisk.labels" -}} +labels: + app.kubernetes.io/instance: "{{ .Release.Name }}" + app.kubernetes.io/managed-by: "{{ .Release.Service }}" + app.kubernetes.io/name: "{{ template "azuredisk.name" . }}" + app.kubernetes.io/version: "{{ .Chart.AppVersion }}" + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" +{{- end -}} + +{{/* pull secrets for containers */}} +{{- define "azuredisk.pullSecrets" -}} +{{- if .Values.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- end }} +{{- end -}} \ No newline at end of file diff --git a/charts/v1.29.2/azuredisk-csi-driver/templates/crd-csi-snapshot.yaml b/charts/v1.29.2/azuredisk-csi-driver/templates/crd-csi-snapshot.yaml new file mode 100644 index 000000000..76df8af7e --- /dev/null +++ b/charts/v1.29.2/azuredisk-csi-driver/templates/crd-csi-snapshot.yaml @@ -0,0 +1,840 @@ +{{- if .Values.snapshot.enabled -}} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/665" + creationTimestamp: null + name: volumesnapshots.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshot + listKind: VolumeSnapshotList + plural: volumesnapshots + shortNames: + - vs + singular: volumesnapshot + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: If a new snapshot needs to be created, this contains the name of + the source PVC from which this snapshot was (or will be) created. + jsonPath: .spec.source.persistentVolumeClaimName + name: SourcePVC + type: string + - description: If a snapshot already exists, this contains the name of the existing + VolumeSnapshotContent object representing the existing snapshot. + jsonPath: .spec.source.volumeSnapshotContentName + name: SourceSnapshotContent + type: string + - description: Represents the minimum size of volume required to rehydrate from + this snapshot. + jsonPath: .status.restoreSize + name: RestoreSize + type: string + - description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot. + jsonPath: .spec.volumeSnapshotClassName + name: SnapshotClass + type: string + - description: Name of the VolumeSnapshotContent object to which the VolumeSnapshot + object intends to bind to. Please note that verification of binding actually + requires checking both VolumeSnapshot and VolumeSnapshotContent to ensure + both are pointing at each other. Binding MUST be verified prior to usage of + this object. + jsonPath: .status.boundVolumeSnapshotContentName + name: SnapshotContent + type: string + - description: Timestamp when the point-in-time snapshot was taken by the underlying + storage system. + jsonPath: .status.creationTime + name: CreationTime + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshot is a user's request for either creating a point-in-time + snapshot of a persistent volume, or binding to a pre-existing snapshot. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: 'spec defines the desired characteristics of a snapshot requested + by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots + Required.' + properties: + source: + description: source specifies where a snapshot will be created from. + This field is immutable after creation. Required. + properties: + persistentVolumeClaimName: + description: persistentVolumeClaimName specifies the name of the + PersistentVolumeClaim object representing the volume from which + a snapshot should be created. This PVC is assumed to be in the + same namespace as the VolumeSnapshot object. This field should + be set if the snapshot does not exists, and needs to be created. + This field is immutable. + type: string + volumeSnapshotContentName: + description: volumeSnapshotContentName specifies the name of a + pre-existing VolumeSnapshotContent object representing an existing + volume snapshot. This field should be set if the snapshot already + exists and only needs a representation in Kubernetes. This field + is immutable. + type: string + type: object + oneOf: + - required: ["persistentVolumeClaimName"] + - required: ["volumeSnapshotContentName"] + volumeSnapshotClassName: + description: 'VolumeSnapshotClassName is the name of the VolumeSnapshotClass + requested by the VolumeSnapshot. VolumeSnapshotClassName may be + left nil to indicate that the default SnapshotClass should be used. + A given cluster may have multiple default Volume SnapshotClasses: + one default per CSI Driver. If a VolumeSnapshot does not specify + a SnapshotClass, VolumeSnapshotSource will be checked to figure + out what the associated CSI Driver is, and the default VolumeSnapshotClass + associated with that CSI Driver will be used. If more than one VolumeSnapshotClass + exist for a given CSI Driver and more than one have been marked + as default, CreateSnapshot will fail and generate an event. Empty + string is not allowed for this field.' + type: string + required: + - source + type: object + status: + description: status represents the current information of a snapshot. + Consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent + objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent + point at each other) before using this object. + properties: + boundVolumeSnapshotContentName: + description: 'boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent + object to which this VolumeSnapshot object intends to bind to. If + not specified, it indicates that the VolumeSnapshot object has not + been successfully bound to a VolumeSnapshotContent object yet. NOTE: + To avoid possible security issues, consumers must verify binding + between VolumeSnapshot and VolumeSnapshotContent objects is successful + (by validating that both VolumeSnapshot and VolumeSnapshotContent + point at each other) before using this object.' + type: string + creationTime: + description: creationTime is the timestamp when the point-in-time + snapshot is taken by the underlying storage system. In dynamic snapshot + creation case, this field will be filled in by the snapshot controller + with the "creation_time" value returned from CSI "CreateSnapshot" + gRPC call. For a pre-existing snapshot, this field will be filled + with the "creation_time" value returned from the CSI "ListSnapshots" + gRPC call if the driver supports it. If not specified, it may indicate + that the creation time of the snapshot is unknown. + format: date-time + type: string + error: + description: error is the last observed error during snapshot creation, + if any. This field could be helpful to upper level controllers(i.e., + application controller) to decide whether they should continue on + waiting for the snapshot to be created based on the type of error + reported. The snapshot controller will keep retrying when an error + occurs during the snapshot creation. Upon success, this error field + will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error + during snapshot creation if specified. NOTE: message may be + logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if the snapshot is ready to be used + to restore a volume. In dynamic snapshot creation case, this field + will be filled in by the snapshot controller with the "ready_to_use" + value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing + snapshot, this field will be filled with the "ready_to_use" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it, otherwise, this field will be set to "True". If not specified, + it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + type: string + description: restoreSize represents the minimum size of volume required + to create a volume from this snapshot. In dynamic snapshot creation + case, this field will be filled in by the snapshot controller with + the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. + For a pre-existing snapshot, this field will be filled with the + "size_bytes" value returned from the CSI "ListSnapshots" gRPC call + if the driver supports it. When restoring a volume from this snapshot, + the size of the volume MUST NOT be smaller than the restoreSize + if it is specified, otherwise the restoration will fail. If not + specified, it indicates that the size is unknown. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: If a new snapshot needs to be created, this contains the name of the source PVC from which this snapshot was (or will be) created. + jsonPath: .spec.source.persistentVolumeClaimName + name: SourcePVC + type: string + - description: If a snapshot already exists, this contains the name of the existing VolumeSnapshotContent object representing the existing snapshot. + jsonPath: .spec.source.volumeSnapshotContentName + name: SourceSnapshotContent + type: string + - description: Represents the minimum size of volume required to rehydrate from this snapshot. + jsonPath: .status.restoreSize + name: RestoreSize + type: string + - description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot. + jsonPath: .spec.volumeSnapshotClassName + name: SnapshotClass + type: string + - description: Name of the VolumeSnapshotContent object to which the VolumeSnapshot object intends to bind to. Please note that verification of binding actually requires checking both VolumeSnapshot and VolumeSnapshotContent to ensure both are pointing at each other. Binding MUST be verified prior to usage of this object. + jsonPath: .status.boundVolumeSnapshotContentName + name: SnapshotContent + type: string + - description: Timestamp when the point-in-time snapshot was taken by the underlying storage system. + jsonPath: .status.creationTime + name: CreationTime + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshot is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshot" + schema: + openAPIV3Schema: + description: VolumeSnapshot is a user's request for either creating a point-in-time snapshot of a persistent volume, or binding to a pre-existing snapshot. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: 'spec defines the desired characteristics of a snapshot requested by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots Required.' + properties: + source: + description: source specifies where a snapshot will be created from. This field is immutable after creation. Required. + properties: + persistentVolumeClaimName: + description: persistentVolumeClaimName specifies the name of the PersistentVolumeClaim object representing the volume from which a snapshot should be created. This PVC is assumed to be in the same namespace as the VolumeSnapshot object. This field should be set if the snapshot does not exists, and needs to be created. This field is immutable. + type: string + volumeSnapshotContentName: + description: volumeSnapshotContentName specifies the name of a pre-existing VolumeSnapshotContent object representing an existing volume snapshot. This field should be set if the snapshot already exists and only needs a representation in Kubernetes. This field is immutable. + type: string + type: object + volumeSnapshotClassName: + description: 'VolumeSnapshotClassName is the name of the VolumeSnapshotClass requested by the VolumeSnapshot. VolumeSnapshotClassName may be left nil to indicate that the default SnapshotClass should be used. A given cluster may have multiple default Volume SnapshotClasses: one default per CSI Driver. If a VolumeSnapshot does not specify a SnapshotClass, VolumeSnapshotSource will be checked to figure out what the associated CSI Driver is, and the default VolumeSnapshotClass associated with that CSI Driver will be used. If more than one VolumeSnapshotClass exist for a given CSI Driver and more than one have been marked as default, CreateSnapshot will fail and generate an event. Empty string is not allowed for this field.' + type: string + required: + - source + type: object + status: + description: status represents the current information of a snapshot. Consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object. + properties: + boundVolumeSnapshotContentName: + description: 'boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent object to which this VolumeSnapshot object intends to bind to. If not specified, it indicates that the VolumeSnapshot object has not been successfully bound to a VolumeSnapshotContent object yet. NOTE: To avoid possible security issues, consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object.' + type: string + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it may indicate that the creation time of the snapshot is unknown. + format: date-time + type: string + error: + description: error is the last observed error during snapshot creation, if any. This field could be helpful to upper level controllers(i.e., application controller) to decide whether they should continue on waiting for the snapshot to be created based on the type of error reported. The snapshot controller will keep retrying when an error occurs during the snapshot creation. Upon success, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if the snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + type: string + description: restoreSize represents the minimum size of volume required to create a volume from this snapshot. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + required: + - spec + type: object + served: false + storage: false + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/665" + creationTimestamp: null + name: volumesnapshotclasses.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotClass + listKind: VolumeSnapshotClassList + plural: volumesnapshotclasses + shortNames: + - vsclass + - vsclasses + singular: volumesnapshotclass + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .driver + name: Driver + type: string + - description: Determines whether a VolumeSnapshotContent created through the + VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .deletionPolicy + name: DeletionPolicy + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshotClass specifies parameters that a underlying storage + system uses when creating a volume snapshot. A specific VolumeSnapshotClass + is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses + are non-namespaced + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + deletionPolicy: + description: deletionPolicy determines whether a VolumeSnapshotContent + created through the VolumeSnapshotClass should be deleted when its bound + VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". + "Retain" means that the VolumeSnapshotContent and its physical snapshot + on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent + and its physical snapshot on underlying storage system are deleted. + Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the storage driver that handles this + VolumeSnapshotClass. Required. + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + parameters: + additionalProperties: + type: string + description: parameters is a key-value map with storage driver specific + parameters for creating snapshots. These values are opaque to Kubernetes. + type: object + required: + - deletionPolicy + - driver + type: object + served: true + storage: true + subresources: {} + - additionalPrinterColumns: + - jsonPath: .driver + name: Driver + type: string + - description: Determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .deletionPolicy + name: DeletionPolicy + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshotClass is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshotClass" + schema: + openAPIV3Schema: + description: VolumeSnapshotClass specifies parameters that a underlying storage system uses when creating a volume snapshot. A specific VolumeSnapshotClass is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses are non-namespaced + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + deletionPolicy: + description: deletionPolicy determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the storage driver that handles this VolumeSnapshotClass. Required. + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + parameters: + additionalProperties: + type: string + description: parameters is a key-value map with storage driver specific parameters for creating snapshots. These values are opaque to Kubernetes. + type: object + required: + - deletionPolicy + - driver + type: object + served: false + storage: false + subresources: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/665" + creationTimestamp: null + name: volumesnapshotcontents.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotContent + listKind: VolumeSnapshotContentList + plural: volumesnapshotcontents + shortNames: + - vsc + - vscs + singular: volumesnapshotcontent + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: Represents the complete size of the snapshot in bytes + jsonPath: .status.restoreSize + name: RestoreSize + type: integer + - description: Determines whether this VolumeSnapshotContent and its physical + snapshot on the underlying storage system should be deleted when its bound + VolumeSnapshot is deleted. + jsonPath: .spec.deletionPolicy + name: DeletionPolicy + type: string + - description: Name of the CSI driver used to create the physical snapshot on + the underlying storage system. + jsonPath: .spec.driver + name: Driver + type: string + - description: Name of the VolumeSnapshotClass to which this snapshot belongs. + jsonPath: .spec.volumeSnapshotClassName + name: VolumeSnapshotClass + type: string + - description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent + object is bound. + jsonPath: .spec.volumeSnapshotRef.name + name: VolumeSnapshot + type: string + - description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.namespace + name: VolumeSnapshotNamespace + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshotContent represents the actual "on-disk" snapshot + object in the underlying storage system + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: spec defines properties of a VolumeSnapshotContent created + by the underlying storage system. Required. + properties: + deletionPolicy: + description: deletionPolicy determines whether this VolumeSnapshotContent + and its physical snapshot on the underlying storage system should + be deleted when its bound VolumeSnapshot is deleted. Supported values + are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent + and its physical snapshot on underlying storage system are kept. + "Delete" means that the VolumeSnapshotContent and its physical snapshot + on underlying storage system are deleted. For dynamically provisioned + snapshots, this field will automatically be filled in by the CSI + snapshotter sidecar with the "DeletionPolicy" field defined in the + corresponding VolumeSnapshotClass. For pre-existing snapshots, users + MUST specify this field when creating the VolumeSnapshotContent + object. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the CSI driver used to create the + physical snapshot on the underlying storage system. This MUST be + the same as the name returned by the CSI GetPluginName() call for + that driver. Required. + type: string + source: + description: source specifies whether the snapshot is (or should be) + dynamically provisioned or already exists, and just requires a Kubernetes + object representation. This field is immutable after creation. Required. + properties: + snapshotHandle: + description: snapshotHandle specifies the CSI "snapshot_id" of + a pre-existing snapshot on the underlying storage system for + which a Kubernetes object representation was (or should be) + created. This field is immutable. + type: string + volumeHandle: + description: volumeHandle specifies the CSI "volume_id" of the + volume from which a snapshot should be dynamically taken from. + This field is immutable. + type: string + type: object + oneOf: + - required: ["snapshotHandle"] + - required: ["volumeHandle"] + sourceVolumeMode: + description: SourceVolumeMode is the mode of the volume whose snapshot + is taken. Can be either “Filesystem” or “Block”. If not specified, + it indicates the source volume's mode is unknown. This field is + immutable. This field is an alpha field. + type: string + volumeSnapshotClassName: + description: name of the VolumeSnapshotClass from which this snapshot + was (or will be) created. Note that after provisioning, the VolumeSnapshotClass + may be deleted or recreated with different set of values, and as + such, should not be referenced post-snapshot creation. + type: string + volumeSnapshotRef: + description: volumeSnapshotRef specifies the VolumeSnapshot object + to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName + field must reference to this VolumeSnapshotContent's name for the + bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent + object, name and namespace of the VolumeSnapshot object MUST be + provided for binding to happen. This field is immutable after creation. + Required. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + required: + - deletionPolicy + - driver + - source + - volumeSnapshotRef + type: object + status: + description: status represents the current information of a snapshot. + properties: + creationTime: + description: creationTime is the timestamp when the point-in-time + snapshot is taken by the underlying storage system. In dynamic snapshot + creation case, this field will be filled in by the CSI snapshotter + sidecar with the "creation_time" value returned from CSI "CreateSnapshot" + gRPC call. For a pre-existing snapshot, this field will be filled + with the "creation_time" value returned from the CSI "ListSnapshots" + gRPC call if the driver supports it. If not specified, it indicates + the creation time is unknown. The format of this field is a Unix + nanoseconds time encoded as an int64. On Unix, the command `date + +%s%N` returns the current time in nanoseconds since 1970-01-01 + 00:00:00 UTC. + format: int64 + type: integer + error: + description: error is the last observed error during snapshot creation, + if any. Upon success after retry, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error + during snapshot creation if specified. NOTE: message may be + logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used + to restore a volume. In dynamic snapshot creation case, this field + will be filled in by the CSI snapshotter sidecar with the "ready_to_use" + value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing + snapshot, this field will be filled with the "ready_to_use" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it, otherwise, this field will be set to "True". If not specified, + it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + description: restoreSize represents the complete size of the snapshot + in bytes. In dynamic snapshot creation case, this field will be + filled in by the CSI snapshotter sidecar with the "size_bytes" value + returned from CSI "CreateSnapshot" gRPC call. For a pre-existing + snapshot, this field will be filled with the "size_bytes" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it. When restoring a volume from this snapshot, the size of the + volume MUST NOT be smaller than the restoreSize if it is specified, + otherwise the restoration will fail. If not specified, it indicates + that the size is unknown. + format: int64 + minimum: 0 + type: integer + snapshotHandle: + description: snapshotHandle is the CSI "snapshot_id" of a snapshot + on the underlying storage system. If not specified, it indicates + that dynamic snapshot creation has either failed or it is still + in progress. + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: Represents the complete size of the snapshot in bytes + jsonPath: .status.restoreSize + name: RestoreSize + type: integer + - description: Determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .spec.deletionPolicy + name: DeletionPolicy + type: string + - description: Name of the CSI driver used to create the physical snapshot on the underlying storage system. + jsonPath: .spec.driver + name: Driver + type: string + - description: Name of the VolumeSnapshotClass to which this snapshot belongs. + jsonPath: .spec.volumeSnapshotClassName + name: VolumeSnapshotClass + type: string + - description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.name + name: VolumeSnapshot + type: string + - description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.namespace + name: VolumeSnapshotNamespace + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshotContent is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshotContent" + schema: + openAPIV3Schema: + description: VolumeSnapshotContent represents the actual "on-disk" snapshot object in the underlying storage system + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: spec defines properties of a VolumeSnapshotContent created by the underlying storage system. Required. + properties: + deletionPolicy: + description: deletionPolicy determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. For dynamically provisioned snapshots, this field will automatically be filled in by the CSI snapshotter sidecar with the "DeletionPolicy" field defined in the corresponding VolumeSnapshotClass. For pre-existing snapshots, users MUST specify this field when creating the VolumeSnapshotContent object. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the CSI driver used to create the physical snapshot on the underlying storage system. This MUST be the same as the name returned by the CSI GetPluginName() call for that driver. Required. + type: string + source: + description: source specifies whether the snapshot is (or should be) dynamically provisioned or already exists, and just requires a Kubernetes object representation. This field is immutable after creation. Required. + properties: + snapshotHandle: + description: snapshotHandle specifies the CSI "snapshot_id" of a pre-existing snapshot on the underlying storage system for which a Kubernetes object representation was (or should be) created. This field is immutable. + type: string + volumeHandle: + description: volumeHandle specifies the CSI "volume_id" of the volume from which a snapshot should be dynamically taken from. This field is immutable. + type: string + type: object + volumeSnapshotClassName: + description: name of the VolumeSnapshotClass from which this snapshot was (or will be) created. Note that after provisioning, the VolumeSnapshotClass may be deleted or recreated with different set of values, and as such, should not be referenced post-snapshot creation. + type: string + volumeSnapshotRef: + description: volumeSnapshotRef specifies the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName field must reference to this VolumeSnapshotContent's name for the bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent object, name and namespace of the VolumeSnapshot object MUST be provided for binding to happen. This field is immutable after creation. Required. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + required: + - deletionPolicy + - driver + - source + - volumeSnapshotRef + type: object + status: + description: status represents the current information of a snapshot. + properties: + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it indicates the creation time is unknown. The format of this field is a Unix nanoseconds time encoded as an int64. On Unix, the command `date +%s%N` returns the current time in nanoseconds since 1970-01-01 00:00:00 UTC. + format: int64 + type: integer + error: + description: error is the last observed error during snapshot creation, if any. Upon success after retry, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + description: restoreSize represents the complete size of the snapshot in bytes. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown. + format: int64 + minimum: 0 + type: integer + snapshotHandle: + description: snapshotHandle is the CSI "snapshot_id" of a snapshot on the underlying storage system. If not specified, it indicates that dynamic snapshot creation has either failed or it is still in progress. + type: string + type: object + required: + - spec + type: object + served: false + storage: false + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +{{- end -}} diff --git a/charts/v1.29.2/azuredisk-csi-driver/templates/csi-azuredisk-controller.yaml b/charts/v1.29.2/azuredisk-csi-driver/templates/csi-azuredisk-controller.yaml new file mode 100644 index 000000000..f23ec8627 --- /dev/null +++ b/charts/v1.29.2/azuredisk-csi-driver/templates/csi-azuredisk-controller.yaml @@ -0,0 +1,288 @@ +kind: Deployment +apiVersion: apps/v1 +metadata: + name: {{ .Values.controller.name }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- with .Values.controller.labels }} +{{ . | toYaml | indent 4 }} +{{- end }} +{{- with .Values.controller.annotations }} + annotations: +{{ . | toYaml | indent 4 }} +{{- end }} +spec: + replicas: {{ .Values.controller.replicas }} + selector: + matchLabels: + app: {{ .Values.controller.name }} + template: + metadata: +{{ include "azuredisk.labels" . | indent 6 }} + app: {{ .Values.controller.name }} + {{- if .Values.workloadIdentity.clientID }} + azure.workload.identity/use: "true" + {{- end }} +{{- with .Values.controller.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.controller.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + hostNetwork: {{ .Values.controller.hostNetwork }} + serviceAccountName: {{ .Values.serviceAccount.controller }} + nodeSelector: + kubernetes.io/os: linux +{{- with .Values.controller.nodeSelector }} +{{ toYaml . | indent 8 }} +{{- end }} + {{- if .Values.controller.runOnMaster}} + node-role.kubernetes.io/master: "" + {{- end}} + {{- if .Values.controller.runOnControlPlane}} + node-role.kubernetes.io/control-plane: "" + {{- end}} + priorityClassName: system-cluster-critical + securityContext: + seccompProfile: + type: RuntimeDefault +{{- with .Values.controller.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.controller.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} + {{- include "azuredisk.pullSecrets" . | indent 6 }} + containers: + - name: csi-provisioner +{{- if hasPrefix "/" .Values.image.csiProvisioner.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.csiProvisioner.repository }}:{{ .Values.image.csiProvisioner.tag }}" +{{- else }} + image: "{{ .Values.image.csiProvisioner.repository }}:{{ .Values.image.csiProvisioner.tag }}" +{{- end }} + args: + - "--feature-gates=Topology=true,HonorPVReclaimPolicy=true" + - "--csi-address=$(ADDRESS)" + - "--v=2" + - "--timeout=30s" + - "--leader-election" + - "--leader-election-namespace={{ .Release.Namespace }}" + - "--worker-threads={{ .Values.controller.provisionerWorkerThreads }}" + - "--extra-create-metadata=true" + - "--strict-topology=true" + - "--kube-api-qps=50" + - "--kube-api-burst=100" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + resources: {{- toYaml .Values.controller.resources.csiProvisioner | nindent 12 }} + - name: csi-attacher +{{- if hasPrefix "/" .Values.image.csiAttacher.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.csiAttacher.repository }}:{{ .Values.image.csiAttacher.tag }}" +{{- else }} + image: "{{ .Values.image.csiAttacher.repository }}:{{ .Values.image.csiAttacher.tag }}" +{{- end }} + args: + - "-v=2" + - "-csi-address=$(ADDRESS)" + - "-timeout=1200s" + - "-leader-election" + - "--leader-election-namespace={{ .Release.Namespace }}" + - "-worker-threads={{ .Values.controller.attacherWorkerThreads }}" + - "-kube-api-qps=200" + - "-kube-api-burst=400" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + resources: {{- toYaml .Values.controller.resources.csiAttacher | nindent 12 }} + - name: csi-snapshotter +{{- if hasPrefix "/" .Values.snapshot.image.csiSnapshotter.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.snapshot.image.csiSnapshotter.repository }}:{{ .Values.snapshot.image.csiSnapshotter.tag }}" +{{- else }} + image: "{{ .Values.snapshot.image.csiSnapshotter.repository }}:{{ .Values.snapshot.image.csiSnapshotter.tag }}" +{{- end }} + args: + - "-csi-address=$(ADDRESS)" + - "-leader-election" + - "--leader-election-namespace={{ .Release.Namespace }}" + - "-v=2" + - "--timeout=1200s" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: {{- toYaml .Values.controller.resources.csiSnapshotter | nindent 12 }} + - name: csi-resizer +{{- if hasPrefix "/" .Values.image.csiResizer.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.csiResizer.repository }}:{{ .Values.image.csiResizer.tag }}" +{{- else }} + image: "{{ .Values.image.csiResizer.repository }}:{{ .Values.image.csiResizer.tag }}" +{{- end }} + args: + - "-csi-address=$(ADDRESS)" + - "-v=2" + - "-leader-election" + - "--leader-election-namespace={{ .Release.Namespace }}" + - '-handle-volume-inuse-error=false' + - '-feature-gates=RecoverVolumeExpansionFailure=true' + - "-timeout=240s" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: {{- toYaml .Values.controller.resources.csiResizer | nindent 12 }} + - name: liveness-probe +{{- if hasPrefix "/" .Values.image.livenessProbe.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- else }} + image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- end }} + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --health-port={{ .Values.controller.livenessProbe.healthPort }} + - --v=2 + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: {{- toYaml .Values.controller.resources.livenessProbe | nindent 12 }} +{{- if eq .Values.controller.enableTrafficManager true }} + - image: mcr.microsoft.com/aks/ccp/ccp-auto-thrust:master.221118.2 + imagePullPolicy: IfNotPresent + name: proxy + command: + - /ccp-auto-thrust + args: + - "--port={{ .Values.controller.trafficManagerPort }}" + ports: + - containerPort: {{ .Values.controller.trafficManagerPort }} + protocol: TCP +{{- end }} + - name: azuredisk +{{- if hasPrefix "/" .Values.image.azuredisk.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- else }} + image: "{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- end }} + args: + - "--v={{ .Values.controller.logLevel }}" + - "--endpoint=$(CSI_ENDPOINT)" + - "--metrics-address=0.0.0.0:{{ .Values.controller.metricsPort }}" + - "--disable-avset-nodes={{ .Values.controller.disableAvailabilitySetNodes }}" + - "--vm-type={{ .Values.controller.vmType }}" + - "--drivername={{ .Values.driver.name }}" + - "--cloud-config-secret-name={{ .Values.controller.cloudConfigSecretName }}" + - "--cloud-config-secret-namespace={{ .Values.controller.cloudConfigSecretNamespace }}" + - "--custom-user-agent={{ .Values.driver.customUserAgent }}" + - "--user-agent-suffix={{ .Values.driver.userAgentSuffix }}" + - "--allow-empty-cloud-config={{ .Values.controller.allowEmptyCloudConfig }}" + - "--vmss-cache-ttl-seconds={{ .Values.controller.vmssCacheTTLInSeconds }}" + - "--enable-traffic-manager={{ .Values.controller.enableTrafficManager }}" + - "--traffic-manager-port={{ .Values.controller.trafficManagerPort }}" + - "--enable-otel-tracing={{ .Values.controller.otelTracing.enabled }}" + ports: + - containerPort: {{ .Values.controller.livenessProbe.healthPort }} + name: healthz + protocol: TCP + - containerPort: {{ .Values.controller.metricsPort }} + name: metrics + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: {{ .Values.azureCredentialFileConfigMap }} + key: path + optional: true + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + {{- if ne .Values.driver.httpsProxy "" }} + - name: HTTPS_PROXY + value: {{ .Values.driver.httpsProxy }} + {{- end }} + {{- if ne .Values.driver.httpProxy "" }} + - name: HTTP_PROXY + value: {{ .Values.driver.httpProxy }} + {{- end }} + {{- if ne .Values.driver.noProxy "" }} + - name: NO_PROXY + value: {{ .Values.driver.noProxy }} + {{- end }} + - name: AZURE_GO_SDK_LOG_LEVEL + value: {{ .Values.driver.azureGoSDKLogLevel }} + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: AZURE_ENVIRONMENT_FILEPATH + value: /etc/kubernetes/azurestackcloud.json + {{- end }} + {{- if .Values.controller.otelTracing.enabled }} + - name: OTEL_SERVICE_NAME + value: {{ .Values.controller.otelTracing.otelServiceName }} + - name: OTEL_EXPORTER_OTLP_ENDPOINT + value: {{ .Values.controller.otelTracing.otelExporterEndpoint }} + {{- end }} + imagePullPolicy: {{ .Values.image.azuredisk.pullPolicy }} + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /etc/kubernetes/ + name: azure-cred + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: ssl + mountPath: /etc/ssl/certs + readOnly: true + {{- end }} + {{- if eq .Values.linux.distro "fedora" }} + - name: ssl + mountPath: /etc/ssl/certs + readOnly: true + - name: ssl-pki + mountPath: /etc/pki/ca-trust/extracted + readOnly: true + {{- end }} + resources: {{- toYaml .Values.controller.resources.azuredisk | nindent 12 }} + volumes: + - name: socket-dir + emptyDir: {} + - name: azure-cred + hostPath: + path: /etc/kubernetes/ + type: DirectoryOrCreate + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: ssl + hostPath: + path: /etc/ssl/certs + {{- end }} + {{- if eq .Values.linux.distro "fedora" }} + - name: ssl + hostPath: + path: /etc/ssl/certs + - name: ssl-pki + hostPath: + path: /etc/pki/ca-trust/extracted + {{- end }} diff --git a/charts/v1.29.2/azuredisk-csi-driver/templates/csi-azuredisk-driver.yaml b/charts/v1.29.2/azuredisk-csi-driver/templates/csi-azuredisk-driver.yaml new file mode 100644 index 000000000..21f941748 --- /dev/null +++ b/charts/v1.29.2/azuredisk-csi-driver/templates/csi-azuredisk-driver.yaml @@ -0,0 +1,13 @@ +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: {{ .Values.driver.name }} + annotations: + csiDriver: "{{ .Values.image.azuredisk.tag }}" + snapshot: "{{ .Values.snapshot.image.csiSnapshotter.tag }}" +spec: + attachRequired: true + podInfoOnMount: false + {{- if .Values.feature.enableFSGroupPolicy}} + fsGroupPolicy: File + {{- end}} diff --git a/charts/v1.29.2/azuredisk-csi-driver/templates/csi-azuredisk-node-windows-hostprocess.yaml b/charts/v1.29.2/azuredisk-csi-driver/templates/csi-azuredisk-node-windows-hostprocess.yaml new file mode 100644 index 000000000..73513e0d5 --- /dev/null +++ b/charts/v1.29.2/azuredisk-csi-driver/templates/csi-azuredisk-node-windows-hostprocess.yaml @@ -0,0 +1,161 @@ +{{- if and (.Values.windows.enabled) (eq .Values.windows.useHostProcessContainers true) }} +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: {{ .Values.windows.dsName }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- with .Values.windows.labels }} +{{ . | toYaml | indent 4 }} +{{- end }} +{{- with .Values.windows.annotations }} + annotations: +{{ . | toYaml | indent 4 }} +{{- end }} +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: {{ .Values.node.maxUnavailable }} + type: RollingUpdate + selector: + matchLabels: + app: {{ .Values.windows.dsName }} + template: + metadata: +{{ include "azuredisk.labels" . | indent 6 }} + app: {{ .Values.windows.dsName }} +{{- with .Values.windows.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.windows.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + serviceAccountName: {{ .Values.serviceAccount.node }} +{{- with .Values.windows.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} + nodeSelector: + kubernetes.io/os: windows +{{- with .Values.windows.nodeSelector }} +{{ toYaml . | indent 8 }} +{{- end }} + affinity: +{{- with .Values.windows.affinity }} +{{ toYaml . | indent 8 }} +{{- end }} + nodeAffinity: +{{ toYaml .Values.windows.nodeAffinity | indent 10 }} + priorityClassName: system-node-critical + {{- include "azuredisk.pullSecrets" . | indent 6 }} + securityContext: + seccompProfile: + type: RuntimeDefault + windowsOptions: + hostProcess: true + runAsUserName: "NT AUTHORITY\\SYSTEM" + hostNetwork: true + initContainers: + - name: init +{{- if hasPrefix "/" .Values.image.azuredisk.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}-windows-hp" +{{- else }} + image: "{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}-windows-hp" +{{- end }} + imagePullPolicy: {{ .Values.image.azuredisk.pullPolicy }} + command: + - "powershell.exe" + - "-c" + - "New-Item -ItemType Directory -Path C:\\var\\lib\\kubelet\\plugins\\{{ .Values.driver.name }}\\ -Force" + containers: + - name: node-driver-registrar +{{- if hasPrefix "/" .Values.image.nodeDriverRegistrar.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- else }} + image: "{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- end }} + command: + - "csi-node-driver-registrar.exe" + args: + - "--csi-address=$(CSI_ENDPOINT)" + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" + - "--plugin-registration-path=$(PLUGIN_REG_DIR)" + - "--v=2" + env: + - name: CSI_ENDPOINT + value: unix://{{ .Values.windows.kubelet }}\plugins\{{ .Values.driver.name }}\csi.sock + - name: DRIVER_REG_SOCK_PATH + value: C:\\var\\lib\\kubelet\\plugins\\{{ .Values.driver.name }}\\csi.sock + - name: PLUGIN_REG_DIR + value: C:\\var\\lib\\kubelet\\plugins_registry\\ + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + imagePullPolicy: {{ .Values.image.nodeDriverRegistrar.pullPolicy }} + resources: {{- toYaml .Values.windows.resources.nodeDriverRegistrar | nindent 12 }} + - name: azuredisk +{{- if hasPrefix "/" .Values.image.azuredisk.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}-windows-hp" +{{- else }} + image: "{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}-windows-hp" +{{- end }} + command: + - "azurediskplugin.exe" + args: + - "--v={{ .Values.node.logLevel }}" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--drivername={{ .Values.driver.name }}" + - "--volume-attach-limit={{ .Values.driver.volumeAttachLimit }}" + - "--cloud-config-secret-name={{ .Values.node.cloudConfigSecretName }}" + - "--cloud-config-secret-namespace={{ .Values.node.cloudConfigSecretNamespace }}" + - "--custom-user-agent={{ .Values.driver.customUserAgent }}" + - "--user-agent-suffix={{ .Values.driver.userAgentSuffix }}" + - "--allow-empty-cloud-config={{ .Values.node.allowEmptyCloudConfig }}" + - "--support-zone={{ .Values.node.supportZone }}" + - "--enable-windows-host-process=true" + - "--get-nodeid-from-imds={{ .Values.node.getNodeIDFromIMDS }}" + - "--enable-otel-tracing={{ .Values.windows.otelTracing.enabled }}" + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: {{ .Values.azureCredentialFileConfigMap }} + key: path-windows + optional: true + - name: CSI_ENDPOINT + value: unix://C:\\var\\lib\\kubelet\\plugins\\{{ .Values.driver.name }}\\csi.sock + {{- if ne .Values.driver.httpsProxy "" }} + - name: HTTPS_PROXY + value: {{ .Values.driver.httpsProxy }} + {{- end }} + {{- if ne .Values.driver.httpProxy "" }} + - name: HTTP_PROXY + value: {{ .Values.driver.httpProxy }} + {{- end }} + {{- if ne .Values.driver.noProxy "" }} + - name: NO_PROXY + value: {{ .Values.driver.noProxy }} + {{- end }} + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: AZURE_GO_SDK_LOG_LEVEL + value: {{ .Values.driver.azureGoSDKLogLevel }} + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: AZURE_ENVIRONMENT_FILEPATH + value: C:\k\azurestackcloud.json + {{- end }} + {{- if .Values.windows.otelTracing.enabled }} + - name: OTEL_SERVICE_NAME + value: {{ .Values.windows.otelTracing.otelServiceName }} + - name: OTEL_EXPORTER_OTLP_ENDPOINT + value: {{ .Values.windows.otelTracing.otelExporterEndpoint }} + {{- end }} + imagePullPolicy: {{ .Values.image.azuredisk.pullPolicy }} +{{- end -}} diff --git a/charts/v1.29.2/azuredisk-csi-driver/templates/csi-azuredisk-node-windows.yaml b/charts/v1.29.2/azuredisk-csi-driver/templates/csi-azuredisk-node-windows.yaml new file mode 100644 index 000000000..cb0e2c7cf --- /dev/null +++ b/charts/v1.29.2/azuredisk-csi-driver/templates/csi-azuredisk-node-windows.yaml @@ -0,0 +1,245 @@ +{{- if and (.Values.windows.enabled) (ne .Values.windows.useHostProcessContainers true) }} +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: {{ .Values.windows.dsName }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- with .Values.windows.labels }} +{{ . | toYaml | indent 4 }} +{{- end }} +{{- with .Values.windows.annotations }} + annotations: +{{ . | toYaml | indent 4 }} +{{- end }} +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: {{ .Values.node.maxUnavailable }} + type: RollingUpdate + selector: + matchLabels: + app: {{ .Values.windows.dsName }} + template: + metadata: +{{ include "azuredisk.labels" . | indent 6 }} + app: {{ .Values.windows.dsName }} +{{- with .Values.windows.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.windows.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + serviceAccountName: {{ .Values.serviceAccount.node }} +{{- with .Values.windows.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} + nodeSelector: + kubernetes.io/os: windows +{{- with .Values.windows.nodeSelector }} +{{ toYaml . | indent 8 }} +{{- end }} + affinity: +{{- with .Values.windows.affinity }} +{{ toYaml . | indent 8 }} +{{- end }} + nodeAffinity: +{{ toYaml .Values.windows.nodeAffinity | indent 10 }} + priorityClassName: system-node-critical + securityContext: + seccompProfile: + type: RuntimeDefault + {{- include "azuredisk.pullSecrets" . | indent 6 }} + containers: + - name: liveness-probe + volumeMounts: + - mountPath: C:\csi + name: plugin-dir +{{- if hasPrefix "/" .Values.image.livenessProbe.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- else }} + image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- end }} + args: + - "--csi-address=$(CSI_ENDPOINT)" + - "--probe-timeout=3s" + - "--health-port={{ .Values.node.livenessProbe.healthPort }}" + - "--v=2" + env: + - name: CSI_ENDPOINT + value: unix://C:\\csi\\csi.sock + imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }} + resources: {{- toYaml .Values.windows.resources.livenessProbe | nindent 12 }} + - name: node-driver-registrar +{{- if hasPrefix "/" .Values.image.nodeDriverRegistrar.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- else }} + image: "{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- end }} + args: + - "--v=2" + - "--csi-address=$(CSI_ENDPOINT)" + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" +{{- if .Values.windows.enableRegistrationProbe }} + livenessProbe: + exec: + command: + - /csi-node-driver-registrar.exe + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --mode=kubelet-registration-probe + initialDelaySeconds: 60 + timeoutSeconds: 30 +{{- end }} + env: + - name: CSI_ENDPOINT + value: unix://C:\\csi\\csi.sock + - name: DRIVER_REG_SOCK_PATH + value: C:\\var\\lib\\kubelet\\plugins\\{{ .Values.driver.name }}\\csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + imagePullPolicy: {{ .Values.image.nodeDriverRegistrar.pullPolicy }} + volumeMounts: + - name: kubelet-dir + mountPath: "C:\\var\\lib\\kubelet" + - name: plugin-dir + mountPath: C:\csi + - name: registration-dir + mountPath: C:\registration + resources: {{- toYaml .Values.windows.resources.nodeDriverRegistrar | nindent 12 }} + - name: azuredisk +{{- if hasPrefix "/" .Values.image.azuredisk.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- else }} + image: "{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- end }} + args: + - "--v={{ .Values.node.logLevel }}" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--drivername={{ .Values.driver.name }}" + - "--volume-attach-limit={{ .Values.driver.volumeAttachLimit }}" + - "--cloud-config-secret-name={{ .Values.node.cloudConfigSecretName }}" + - "--cloud-config-secret-namespace={{ .Values.node.cloudConfigSecretNamespace }}" + - "--custom-user-agent={{ .Values.driver.customUserAgent }}" + - "--user-agent-suffix={{ .Values.driver.userAgentSuffix }}" + - "--allow-empty-cloud-config={{ .Values.node.allowEmptyCloudConfig }}" + - "--support-zone={{ .Values.node.supportZone }}" + - "--get-node-info-from-labels={{ .Values.windows.getNodeInfoFromLabels }}" + - "--get-nodeid-from-imds={{ .Values.node.getNodeIDFromIMDS }}" + - "--enable-otel-tracing={{ .Values.windows.otelTracing.enabled }}" + ports: + - containerPort: {{ .Values.node.livenessProbe.healthPort }} + name: healthz + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: {{ .Values.azureCredentialFileConfigMap }} + key: path-windows + optional: true + - name: CSI_ENDPOINT + value: unix://C:\\csi\\csi.sock + {{- if ne .Values.driver.httpsProxy "" }} + - name: HTTPS_PROXY + value: {{ .Values.driver.httpsProxy }} + {{- end }} + {{- if ne .Values.driver.httpProxy "" }} + - name: HTTP_PROXY + value: {{ .Values.driver.httpProxy }} + {{- end }} + {{- if ne .Values.driver.noProxy "" }} + - name: NO_PROXY + value: {{ .Values.driver.noProxy }} + {{- end }} + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: AZURE_GO_SDK_LOG_LEVEL + value: {{ .Values.driver.azureGoSDKLogLevel }} + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: AZURE_ENVIRONMENT_FILEPATH + value: C:\k\azurestackcloud.json + {{- end }} + {{- if .Values.windows.otelTracing.enabled }} + - name: OTEL_SERVICE_NAME + value: {{ .Values.windows.otelTracing.otelServiceName }} + - name: OTEL_EXPORTER_OTLP_ENDPOINT + value: {{ .Values.windows.otelTracing.otelExporterEndpoint }} + {{- end }} + imagePullPolicy: {{ .Values.image.azuredisk.pullPolicy }} + volumeMounts: + - name: kubelet-dir + mountPath: "C:\\var\\lib\\kubelet" + - name: plugin-dir + mountPath: C:\csi + - name: azure-config + mountPath: C:\k + - name: csi-proxy-fs-pipe-v1 + mountPath: \\.\pipe\csi-proxy-filesystem-v1 + - name: csi-proxy-disk-pipe-v1 + mountPath: \\.\pipe\csi-proxy-disk-v1 + - name: csi-proxy-volume-pipe-v1 + mountPath: \\.\pipe\csi-proxy-volume-v1 + # these paths are still included for compatibility, they're used + # only if the node has still the beta version of the CSI proxy + - name: csi-proxy-fs-pipe-v1beta1 + mountPath: \\.\pipe\csi-proxy-filesystem-v1beta1 + - name: csi-proxy-disk-pipe-v1beta2 + mountPath: \\.\pipe\csi-proxy-disk-v1beta2 + - name: csi-proxy-volume-pipe-v1beta2 + mountPath: \\.\pipe\csi-proxy-volume-v1beta2 + resources: {{- toYaml .Values.windows.resources.azuredisk | nindent 12 }} + volumes: + - name: csi-proxy-fs-pipe-v1 + hostPath: + path: \\.\pipe\csi-proxy-filesystem-v1 + - name: csi-proxy-disk-pipe-v1 + hostPath: + path: \\.\pipe\csi-proxy-disk-v1 + - name: csi-proxy-volume-pipe-v1 + hostPath: + path: \\.\pipe\csi-proxy-volume-v1 + # these paths are still included for compatibility, they're used + # only if the node has still the beta version of the CSI proxy + - name: csi-proxy-fs-pipe-v1beta1 + hostPath: + path: \\.\pipe\csi-proxy-filesystem-v1beta1 + - name: csi-proxy-disk-pipe-v1beta2 + hostPath: + path: \\.\pipe\csi-proxy-disk-v1beta2 + - name: csi-proxy-volume-pipe-v1beta2 + hostPath: + path: \\.\pipe\csi-proxy-volume-v1beta2 + - name: registration-dir + hostPath: + path: {{ .Values.windows.kubelet }}\plugins_registry\ + type: Directory + - name: kubelet-dir + hostPath: + path: {{ .Values.windows.kubelet }}\ + type: Directory + - name: plugin-dir + hostPath: + path: {{ .Values.windows.kubelet }}\plugins\{{ .Values.driver.name }}\ + type: DirectoryOrCreate + - name: azure-config + hostPath: + path: C:\k + type: DirectoryOrCreate +{{- end -}} diff --git a/charts/v1.29.2/azuredisk-csi-driver/templates/csi-azuredisk-node.yaml b/charts/v1.29.2/azuredisk-csi-driver/templates/csi-azuredisk-node.yaml new file mode 100644 index 000000000..e9cab38ce --- /dev/null +++ b/charts/v1.29.2/azuredisk-csi-driver/templates/csi-azuredisk-node.yaml @@ -0,0 +1,257 @@ +{{- if .Values.linux.enabled}} +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: {{ .Values.linux.dsName }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- with .Values.linux.labels }} +{{ . | toYaml | indent 4 }} +{{- end }} +{{- with .Values.linux.annotations }} + annotations: +{{ . | toYaml | indent 4 }} +{{- end }} +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: {{ .Values.node.maxUnavailable }} + type: RollingUpdate + selector: + matchLabels: + app: {{ .Values.linux.dsName }} + template: + metadata: +{{ include "azuredisk.labels" . | indent 6 }} + app: {{ .Values.linux.dsName }} + {{- if .Values.workloadIdentity.clientID }} + azure.workload.identity/use: "true" + {{- end }} +{{- with .Values.linux.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.linux.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + hostNetwork: {{ .Values.linux.hostNetwork }} + dnsPolicy: Default + serviceAccountName: {{ .Values.serviceAccount.node }} + nodeSelector: + kubernetes.io/os: linux +{{- with .Values.linux.nodeSelector }} +{{ toYaml . | indent 8 }} +{{- end }} + affinity: +{{- with .Values.linux.affinity }} +{{ toYaml . | indent 8 }} +{{- end }} + nodeAffinity: +{{ toYaml .Values.linux.nodeAffinity | indent 10 }} + priorityClassName: system-node-critical + securityContext: + seccompProfile: + type: RuntimeDefault +{{- with .Values.linux.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} + {{- include "azuredisk.pullSecrets" . | indent 6 }} + containers: + - name: liveness-probe + volumeMounts: + - mountPath: /csi + name: socket-dir +{{- if hasPrefix "/" .Values.image.livenessProbe.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- else }} + image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- end }} + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --health-port={{ .Values.node.livenessProbe.healthPort }} + - --v=2 + resources: {{- toYaml .Values.linux.resources.livenessProbe | nindent 12 }} + - name: node-driver-registrar +{{- if hasPrefix "/" .Values.image.nodeDriverRegistrar.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- else }} + image: "{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- end }} + args: + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --v=2 +{{- if .Values.linux.enableRegistrationProbe }} + livenessProbe: + exec: + command: + - /csi-node-driver-registrar + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --mode=kubelet-registration-probe + initialDelaySeconds: 30 + timeoutSeconds: 15 +{{- end }} + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: {{ .Values.linux.kubelet }}/plugins/{{ .Values.driver.name }}/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: registration-dir + mountPath: /registration + resources: {{- toYaml .Values.linux.resources.nodeDriverRegistrar | nindent 12 }} + - name: azuredisk +{{- if hasPrefix "/" .Values.image.azuredisk.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- else }} + image: "{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- end }} + args: + - "--v={{ .Values.node.logLevel }}" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--enable-perf-optimization={{ .Values.linux.enablePerfOptimization }}" + - "--drivername={{ .Values.driver.name }}" + - "--volume-attach-limit={{ .Values.driver.volumeAttachLimit }}" + - "--cloud-config-secret-name={{ .Values.node.cloudConfigSecretName }}" + - "--cloud-config-secret-namespace={{ .Values.node.cloudConfigSecretNamespace }}" + - "--custom-user-agent={{ .Values.driver.customUserAgent }}" + - "--user-agent-suffix={{ .Values.driver.userAgentSuffix }}" + - "--allow-empty-cloud-config={{ .Values.node.allowEmptyCloudConfig }}" + - "--support-zone={{ .Values.node.supportZone }}" + - "--get-node-info-from-labels={{ .Values.linux.getNodeInfoFromLabels }}" + - "--get-nodeid-from-imds={{ .Values.node.getNodeIDFromIMDS }}" + - "--enable-otel-tracing={{ .Values.linux.otelTracing.enabled }}" + ports: + - containerPort: {{ .Values.node.livenessProbe.healthPort }} + name: healthz + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: {{ .Values.azureCredentialFileConfigMap }} + key: path + optional: true + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + {{- if ne .Values.driver.httpsProxy "" }} + - name: HTTPS_PROXY + value: {{ .Values.driver.httpsProxy }} + {{- end }} + {{- if ne .Values.driver.httpProxy "" }} + - name: HTTP_PROXY + value: {{ .Values.driver.httpProxy }} + {{- end }} + {{- if ne .Values.driver.noProxy "" }} + - name: NO_PROXY + value: {{ .Values.driver.noProxy }} + {{- end }} + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: AZURE_GO_SDK_LOG_LEVEL + value: {{ .Values.driver.azureGoSDKLogLevel }} + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: AZURE_ENVIRONMENT_FILEPATH + value: /etc/kubernetes/azurestackcloud.json + {{- end }} + {{- if .Values.linux.otelTracing.enabled }} + - name: OTEL_SERVICE_NAME + value: {{ .Values.linux.otelTracing.otelServiceName }} + - name: OTEL_EXPORTER_OTLP_ENDPOINT + value: {{ .Values.linux.otelTracing.otelExporterEndpoint }} + {{- end }} + imagePullPolicy: {{ .Values.image.azuredisk.pullPolicy }} + securityContext: + privileged: true + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: {{ .Values.linux.kubelet }}/ + mountPropagation: Bidirectional + name: mountpoint-dir + - mountPath: /etc/kubernetes/ + name: azure-cred + - mountPath: /dev + name: device-dir + - mountPath: /sys/bus/scsi/devices + name: sys-devices-dir + - mountPath: /sys/class/ + name: sys-class + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: ssl + mountPath: /etc/ssl/certs + readOnly: true + {{- end }} + {{- if eq .Values.linux.distro "fedora" }} + - name: ssl + mountPath: /etc/ssl/certs + readOnly: true + - name: ssl-pki + mountPath: /etc/pki/ca-trust/extracted + readOnly: true + {{- end }} + resources: {{- toYaml .Values.linux.resources.azuredisk | nindent 12 }} + volumes: + - hostPath: + path: {{ .Values.linux.kubelet }}/plugins/{{ .Values.driver.name }} + type: DirectoryOrCreate + name: socket-dir + - hostPath: + path: {{ .Values.linux.kubelet }}/ + type: DirectoryOrCreate + name: mountpoint-dir + - hostPath: + path: {{ .Values.linux.kubelet }}/plugins_registry/ + type: DirectoryOrCreate + name: registration-dir + - hostPath: + path: /etc/kubernetes/ + type: DirectoryOrCreate + name: azure-cred + - hostPath: + path: /dev + type: Directory + name: device-dir + - hostPath: + path: /sys/bus/scsi/devices + type: Directory + name: sys-devices-dir + - hostPath: + path: /sys/class/ + type: Directory + name: sys-class + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: ssl + hostPath: + path: /etc/ssl/certs + {{- end }} + {{- if eq .Values.linux.distro "fedora" }} + - name: ssl + hostPath: + path: /etc/ssl/certs + - name: ssl-pki + hostPath: + path: /etc/pki/ca-trust/extracted + {{- end }} +{{- end -}} diff --git a/charts/v1.29.2/azuredisk-csi-driver/templates/csi-snapshot-controller.yaml b/charts/v1.29.2/azuredisk-csi-driver/templates/csi-snapshot-controller.yaml new file mode 100644 index 000000000..431a01b15 --- /dev/null +++ b/charts/v1.29.2/azuredisk-csi-driver/templates/csi-snapshot-controller.yaml @@ -0,0 +1,94 @@ +{{- if .Values.snapshot.enabled -}} +kind: Deployment +apiVersion: apps/v1 +metadata: + name: {{ .Values.snapshot.snapshotController.name }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- with .Values.snapshot.snapshotController.labels }} +{{ . | toYaml | indent 4 }} +{{- end }} +{{- with .Values.snapshot.snapshotController.annotations }} + annotations: +{{ . | toYaml | indent 4 }} +{{- end }} +spec: + replicas: {{ .Values.snapshot.snapshotController.replicas }} + selector: + matchLabels: + app: {{ .Values.snapshot.snapshotController.name }} + # the snapshot controller won't be marked as ready if the v1 CRDs are unavailable + # in #504 the snapshot-controller will exit after around 7.5 seconds if it + # can't find the v1 CRDs so this value should be greater than that + minReadySeconds: 15 + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: +{{ include "azuredisk.labels" . | indent 6 }} + app: {{ .Values.snapshot.snapshotController.name }} +{{- with .Values.snapshot.snapshotController.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.snapshot.snapshotController.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + serviceAccountName: {{ .Values.serviceAccount.snapshotController }} + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + securityContext: + seccompProfile: + type: RuntimeDefault +{{- with .Values.controller.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.controller.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} + {{- include "azuredisk.pullSecrets" . | indent 6 }} + containers: + - name: {{ .Values.snapshot.snapshotController.name }} +{{- if hasPrefix "/" .Values.snapshot.image.csiSnapshotController.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.snapshot.image.csiSnapshotController.repository }}:{{ .Values.snapshot.image.csiSnapshotController.tag }}" +{{- else }} + image: "{{ .Values.snapshot.image.csiSnapshotController.repository }}:{{ .Values.snapshot.image.csiSnapshotController.tag }}" +{{- end }} + args: + - "--v=2" + - "--leader-election=true" + - "--leader-election-namespace={{ .Release.Namespace }}" + resources: {{- toYaml .Values.snapshot.snapshotController.resources | nindent 12 }} + imagePullPolicy: {{ .Values.snapshot.image.csiSnapshotController.pullPolicy }} + +--- +{{- if .Values.snapshot.VolumeSnapshotClass.enabled -}} +kind: VolumeSnapshotClass +apiVersion: snapshot.storage.k8s.io/v1 +metadata: + name: {{ .Values.snapshot.VolumeSnapshotClass.name }} + annotations: + # This is what defines this resource as a hook. Without this line, the + # job is considered part of the release. + "helm.sh/hook": post-install +driver: {{ .Values.driver.name }} +deletionPolicy: {{ .Values.snapshot.VolumeSnapshotClass.deletionPolicy }} +parameters: + incremental: {{ .Values.snapshot.VolumeSnapshotClass.parameters.incremental }} + {{- if ne .Values.snapshot.VolumeSnapshotClass.parameters.resourceGroup "" }} + resourceGroup: {{ .Values.snapshot.VolumeSnapshotClass.parameters.resourceGroup }} + {{- end }} + tags: {{ .Values.snapshot.VolumeSnapshotClass.parameters.tags }} +{{- with .Values.snapshot.VolumeSnapshotClass.additionalLabels }} +additionalLabels: +{{ toYaml . | indent 2 }} +{{- end }} +{{- end -}} +{{- end -}} diff --git a/charts/v1.29.2/azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml b/charts/v1.29.2/azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml new file mode 100644 index 000000000..b78940fc6 --- /dev/null +++ b/charts/v1.29.2/azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml @@ -0,0 +1,199 @@ +{{- if .Values.rbac.create -}} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-external-provisioner-role +{{ include "azuredisk.labels" . | indent 2 }} +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["get", "list"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-csi-provisioner-binding +{{ include "azuredisk.labels" . | indent 2 }} +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ .Values.rbac.name }}-external-provisioner-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-external-attacher-role +{{ include "azuredisk.labels" . | indent 2 }} +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["csi.storage.k8s.io"] + resources: ["csinodeinfos"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments/status"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-csi-attacher-binding +{{ include "azuredisk.labels" . | indent 2 }} +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ .Values.rbac.name }}-external-attacher-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-external-snapshotter-role +{{ include "azuredisk.labels" . | indent 2 }} +rules: + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-csi-snapshotter-binding +{{ include "azuredisk.labels" . | indent 2 }} +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ .Values.rbac.name }}-external-snapshotter-role + apiGroup: rbac.authorization.k8s.io + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-external-resizer-role +{{ include "azuredisk.labels" . | indent 2 }} +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-csi-resizer-role +{{ include "azuredisk.labels" . | indent 2 }} +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ .Values.rbac.name }}-external-resizer-role + apiGroup: rbac.authorization.k8s.io + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-{{ .Values.rbac.name }}-controller-secret-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-{{ .Values.rbac.name }}-controller-secret-binding +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: csi-{{ .Values.rbac.name }}-controller-secret-role + apiGroup: rbac.authorization.k8s.io +{{ end }} diff --git a/charts/v1.29.2/azuredisk-csi-driver/templates/rbac-csi-azuredisk-node.yaml b/charts/v1.29.2/azuredisk-csi-driver/templates/rbac-csi-azuredisk-node.yaml new file mode 100644 index 000000000..ae7e080af --- /dev/null +++ b/charts/v1.29.2/azuredisk-csi-driver/templates/rbac-csi-azuredisk-node.yaml @@ -0,0 +1,28 @@ +{{- if .Values.rbac.create -}} +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-{{ .Values.rbac.name }}-node-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-{{ .Values.rbac.name }}-node-secret-binding +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.node }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: csi-{{ .Values.rbac.name }}-node-role + apiGroup: rbac.authorization.k8s.io +{{ end }} diff --git a/charts/v1.29.2/azuredisk-csi-driver/templates/rbac-csi-snapshot-controller.yaml b/charts/v1.29.2/azuredisk-csi-driver/templates/rbac-csi-snapshot-controller.yaml new file mode 100644 index 000000000..8e4278b6f --- /dev/null +++ b/charts/v1.29.2/azuredisk-csi-driver/templates/rbac-csi-snapshot-controller.yaml @@ -0,0 +1,72 @@ +{{- if and .Values.snapshot.enabled .Values.rbac.create -}} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots/status"] + verbs: ["update", "patch"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-binding +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.snapshotController }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: csi-snapshot-controller-role + apiGroup: rbac.authorization.k8s.io + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-leaderelection-role +rules: + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-leaderelection-binding +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.snapshotController }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: csi-snapshot-controller-leaderelection-role + apiGroup: rbac.authorization.k8s.io +{{ end }} diff --git a/charts/v1.29.2/azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-controller.yaml b/charts/v1.29.2/azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-controller.yaml new file mode 100644 index 000000000..1806e9c58 --- /dev/null +++ b/charts/v1.29.2/azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-controller.yaml @@ -0,0 +1,16 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- if .Values.workloadIdentity.clientID }} + azure.workload.identity/use: "true" + annotations: + azure.workload.identity/client-id: {{ .Values.workloadIdentity.clientID }} +{{- if .Values.workloadIdentity.tenantID }} + azure.workload.identity/tenant-id: {{ .Values.workloadIdentity.tenantID }} +{{- end }} +{{- end }} +{{- end -}} diff --git a/charts/v1.29.2/azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-node.yaml b/charts/v1.29.2/azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-node.yaml new file mode 100644 index 000000000..dcc2ff60b --- /dev/null +++ b/charts/v1.29.2/azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-node.yaml @@ -0,0 +1,16 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.node }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- if .Values.workloadIdentity.clientID }} + azure.workload.identity/use: "true" + annotations: + azure.workload.identity/client-id: {{ .Values.workloadIdentity.clientID }} +{{- if .Values.workloadIdentity.tenantID }} + azure.workload.identity/tenant-id: {{ .Values.workloadIdentity.tenantID }} +{{- end }} +{{- end }} +{{- end -}} diff --git a/charts/v1.29.2/azuredisk-csi-driver/templates/serviceaccount-csi-snapshot-controller.yaml b/charts/v1.29.2/azuredisk-csi-driver/templates/serviceaccount-csi-snapshot-controller.yaml new file mode 100644 index 000000000..7cdaad0b4 --- /dev/null +++ b/charts/v1.29.2/azuredisk-csi-driver/templates/serviceaccount-csi-snapshot-controller.yaml @@ -0,0 +1,7 @@ +{{- if and .Values.snapshot.enabled .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.snapshotController }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/charts/v1.29.2/azuredisk-csi-driver/values.yaml b/charts/v1.29.2/azuredisk-csi-driver/values.yaml new file mode 100644 index 000000000..7176a4f29 --- /dev/null +++ b/charts/v1.29.2/azuredisk-csi-driver/values.yaml @@ -0,0 +1,289 @@ +image: + baseRepo: mcr.microsoft.com + azuredisk: + repository: /oss/kubernetes-csi/azuredisk-csi + tag: v1.29.2 + pullPolicy: IfNotPresent + csiProvisioner: + repository: /oss/kubernetes-csi/csi-provisioner + tag: v3.5.0 + pullPolicy: IfNotPresent + csiAttacher: + repository: /oss/kubernetes-csi/csi-attacher + tag: v4.3.0 + pullPolicy: IfNotPresent + csiResizer: + repository: /oss/kubernetes-csi/csi-resizer + tag: v1.8.0 + pullPolicy: IfNotPresent + livenessProbe: + repository: /oss/kubernetes-csi/livenessprobe + tag: v2.10.0 + pullPolicy: IfNotPresent + nodeDriverRegistrar: + repository: /oss/kubernetes-csi/csi-node-driver-registrar + tag: v2.8.0 + pullPolicy: IfNotPresent + +serviceAccount: + create: true # When true, service accounts will be created for you. Set to false if you want to use your own. + controller: csi-azuredisk-controller-sa # Name of Service Account to be created or used + node: csi-azuredisk-node-sa # Name of Service Account to be created or used + snapshotController: csi-snapshot-controller-sa # Name of Service Account to be created or used + +rbac: + create: true + name: azuredisk + +controller: + name: csi-azuredisk-controller + cloudConfigSecretName: azure-cloud-provider + cloudConfigSecretNamespace: kube-system + allowEmptyCloudConfig: false + enableTrafficManager: false + trafficManagerPort: 7788 + replicas: 2 + metricsPort: 29604 + livenessProbe: + healthPort: 29602 + runOnMaster: false + runOnControlPlane: false + disableAvailabilitySetNodes: false + vmType: "" + provisionerWorkerThreads: 100 + attacherWorkerThreads: 1000 + vmssCacheTTLInSeconds: -1 + logLevel: 5 + otelTracing: + enabled: false + otelServiceName: csi-azuredisk-controller + otelExporterEndpoint: "http://localhost:4317" + tolerations: + - key: "node-role.kubernetes.io/master" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/controlplane" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/control-plane" + operator: "Exists" + effect: "NoSchedule" + hostNetwork: true # this setting could be disabled if controller does not depend on MSI setting + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + nodeSelector: {} + affinity: {} + resources: + csiProvisioner: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + csiAttacher: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + csiResizer: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + csiSnapshotter: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 20Mi + livenessProbe: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + azuredisk: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + +node: + cloudConfigSecretName: azure-cloud-provider + cloudConfigSecretNamespace: kube-system + supportZone: true + allowEmptyCloudConfig: true + getNodeIDFromIMDS: false + maxUnavailable: 1 + logLevel: 5 + livenessProbe: + healthPort: 29603 + +snapshot: + enabled: false + name: csi-snapshot-controller + image: + csiSnapshotter: + repository: /oss/kubernetes-csi/csi-snapshotter + tag: v6.3.1 + pullPolicy: IfNotPresent + csiSnapshotController: + repository: /oss/kubernetes-csi/snapshot-controller + tag: v6.3.1 + pullPolicy: IfNotPresent + snapshotController: + name: csi-snapshot-controller + replicas: 2 + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + resources: + limits: + memory: 300Mi + requests: + cpu: 10m + memory: 20Mi + VolumeSnapshotClass: + enabled: false + name: csi-azuredisk-vsc + deletionPolicy: Delete + parameters: + incremental: '"true"' # available values: "true", "false" ("true" by default for Azure Public Cloud, and "false" by default for Azure Stack Cloud) + resourceGroup: "" # available values: EXISTING RESOURCE GROUP (If not specified, snapshot will be stored in the same resource group as source Azure disk) + tags: "" # tag format: 'key1=val1,key2=val2' + additionalLabels: {} + +feature: + enableFSGroupPolicy: true + +driver: + name: disk.csi.azure.com + # maximum number of attachable volumes per node, + # maximum number is defined according to node instance type by default(-1) + volumeAttachLimit: -1 + customUserAgent: "" + userAgentSuffix: "OSS-helm" + azureGoSDKLogLevel: "" # available values: ""(no logs), DEBUG, INFO, WARNING, ERROR + httpsProxy: "" + httpProxy: "" + noProxy: "" + +linux: + enabled: true + dsName: csi-azuredisk-node # daemonset name + kubelet: /var/lib/kubelet + distro: debian # available values: debian, fedora + enablePerfOptimization: true + enableRegistrationProbe: true + otelTracing: + enabled: false + otelServiceName: csi-azuredisk-node + otelExporterEndpoint: "http://localhost:4317" + tolerations: + - operator: "Exists" + hostNetwork: true # this setting could be disabled if perfProfile is `none` + getNodeInfoFromLabels: false # get node info from node labels instead of IMDS + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + nodeSelector: {} + affinity: {} + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: type + operator: NotIn + values: + - virtual-kubelet + resources: + livenessProbe: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + nodeDriverRegistrar: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + azuredisk: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 20Mi + +windows: + enabled: true + useHostProcessContainers: false + dsName: csi-azuredisk-node-win # daemonset name + kubelet: 'C:\var\lib\kubelet' + getNodeInfoFromLabels: false # get node info from node labels instead of IMDS + enableRegistrationProbe: true + otelTracing: + enabled: false + otelServiceName: csi-azuredisk-node-win + otelExporterEndpoint: "http://localhost:4317" + tolerations: + - key: "node.kubernetes.io/os" + operator: "Exists" + effect: "NoSchedule" + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + nodeSelector: {} + affinity: {} + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: type + operator: NotIn + values: + - virtual-kubelet + resources: + livenessProbe: + limits: + memory: 150Mi + requests: + cpu: 10m + memory: 40Mi + nodeDriverRegistrar: + limits: + memory: 150Mi + requests: + cpu: 30m + memory: 40Mi + azuredisk: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 40Mi + +cloud: AzurePublicCloud + +## Reference to one or more secrets to be used when pulling images +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ +## +imagePullSecrets: [] +# - name: "image-pull-secret" + +workloadIdentity: + clientID: "" + # [optional] If the AAD application or user-assigned managed identity is not in the same tenant as the cluster + # then set tenantID with the application or user-assigned managed identity tenant ID + tenantID: "" + +azureCredentialFileConfigMap: azure-cred-file diff --git a/charts/v1.30.0/azuredisk-csi-driver-v1.30.0.tgz b/charts/v1.30.0/azuredisk-csi-driver-v1.30.0.tgz new file mode 100644 index 000000000..1e78bcc99 Binary files /dev/null and b/charts/v1.30.0/azuredisk-csi-driver-v1.30.0.tgz differ diff --git a/charts/v1.30.0/azuredisk-csi-driver/Chart.yaml b/charts/v1.30.0/azuredisk-csi-driver/Chart.yaml new file mode 100644 index 000000000..b60169fde --- /dev/null +++ b/charts/v1.30.0/azuredisk-csi-driver/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: v1.30.0 +description: Azure disk Container Storage Interface (CSI) Storage Plugin +name: azuredisk-csi-driver +version: v1.30.0 diff --git a/charts/v1.30.0/azuredisk-csi-driver/templates/NOTES.txt b/charts/v1.30.0/azuredisk-csi-driver/templates/NOTES.txt new file mode 100644 index 000000000..c198152ef --- /dev/null +++ b/charts/v1.30.0/azuredisk-csi-driver/templates/NOTES.txt @@ -0,0 +1,5 @@ +The Azure Disk CSI Driver is getting deployed to your cluster. + +To check Azure Disk CSI Driver pods status, please run: + + kubectl --namespace={{ .Release.Namespace }} get pods --selector="app.kubernetes.io/name={{ .Release.Name }}" --watch diff --git a/charts/v1.30.0/azuredisk-csi-driver/templates/_helpers.tpl b/charts/v1.30.0/azuredisk-csi-driver/templates/_helpers.tpl new file mode 100644 index 000000000..51fcc8a09 --- /dev/null +++ b/charts/v1.30.0/azuredisk-csi-driver/templates/_helpers.tpl @@ -0,0 +1,26 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* Expand the name of the chart.*/}} +{{- define "azuredisk.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* labels for helm resources */}} +{{- define "azuredisk.labels" -}} +labels: + app.kubernetes.io/instance: "{{ .Release.Name }}" + app.kubernetes.io/managed-by: "{{ .Release.Service }}" + app.kubernetes.io/name: "{{ template "azuredisk.name" . }}" + app.kubernetes.io/version: "{{ .Chart.AppVersion }}" + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" +{{- end -}} + +{{/* pull secrets for containers */}} +{{- define "azuredisk.pullSecrets" -}} +{{- if .Values.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- end }} +{{- end -}} \ No newline at end of file diff --git a/charts/v1.30.0/azuredisk-csi-driver/templates/crd-csi-snapshot.yaml b/charts/v1.30.0/azuredisk-csi-driver/templates/crd-csi-snapshot.yaml new file mode 100644 index 000000000..76df8af7e --- /dev/null +++ b/charts/v1.30.0/azuredisk-csi-driver/templates/crd-csi-snapshot.yaml @@ -0,0 +1,840 @@ +{{- if .Values.snapshot.enabled -}} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/665" + creationTimestamp: null + name: volumesnapshots.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshot + listKind: VolumeSnapshotList + plural: volumesnapshots + shortNames: + - vs + singular: volumesnapshot + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: If a new snapshot needs to be created, this contains the name of + the source PVC from which this snapshot was (or will be) created. + jsonPath: .spec.source.persistentVolumeClaimName + name: SourcePVC + type: string + - description: If a snapshot already exists, this contains the name of the existing + VolumeSnapshotContent object representing the existing snapshot. + jsonPath: .spec.source.volumeSnapshotContentName + name: SourceSnapshotContent + type: string + - description: Represents the minimum size of volume required to rehydrate from + this snapshot. + jsonPath: .status.restoreSize + name: RestoreSize + type: string + - description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot. + jsonPath: .spec.volumeSnapshotClassName + name: SnapshotClass + type: string + - description: Name of the VolumeSnapshotContent object to which the VolumeSnapshot + object intends to bind to. Please note that verification of binding actually + requires checking both VolumeSnapshot and VolumeSnapshotContent to ensure + both are pointing at each other. Binding MUST be verified prior to usage of + this object. + jsonPath: .status.boundVolumeSnapshotContentName + name: SnapshotContent + type: string + - description: Timestamp when the point-in-time snapshot was taken by the underlying + storage system. + jsonPath: .status.creationTime + name: CreationTime + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshot is a user's request for either creating a point-in-time + snapshot of a persistent volume, or binding to a pre-existing snapshot. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: 'spec defines the desired characteristics of a snapshot requested + by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots + Required.' + properties: + source: + description: source specifies where a snapshot will be created from. + This field is immutable after creation. Required. + properties: + persistentVolumeClaimName: + description: persistentVolumeClaimName specifies the name of the + PersistentVolumeClaim object representing the volume from which + a snapshot should be created. This PVC is assumed to be in the + same namespace as the VolumeSnapshot object. This field should + be set if the snapshot does not exists, and needs to be created. + This field is immutable. + type: string + volumeSnapshotContentName: + description: volumeSnapshotContentName specifies the name of a + pre-existing VolumeSnapshotContent object representing an existing + volume snapshot. This field should be set if the snapshot already + exists and only needs a representation in Kubernetes. This field + is immutable. + type: string + type: object + oneOf: + - required: ["persistentVolumeClaimName"] + - required: ["volumeSnapshotContentName"] + volumeSnapshotClassName: + description: 'VolumeSnapshotClassName is the name of the VolumeSnapshotClass + requested by the VolumeSnapshot. VolumeSnapshotClassName may be + left nil to indicate that the default SnapshotClass should be used. + A given cluster may have multiple default Volume SnapshotClasses: + one default per CSI Driver. If a VolumeSnapshot does not specify + a SnapshotClass, VolumeSnapshotSource will be checked to figure + out what the associated CSI Driver is, and the default VolumeSnapshotClass + associated with that CSI Driver will be used. If more than one VolumeSnapshotClass + exist for a given CSI Driver and more than one have been marked + as default, CreateSnapshot will fail and generate an event. Empty + string is not allowed for this field.' + type: string + required: + - source + type: object + status: + description: status represents the current information of a snapshot. + Consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent + objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent + point at each other) before using this object. + properties: + boundVolumeSnapshotContentName: + description: 'boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent + object to which this VolumeSnapshot object intends to bind to. If + not specified, it indicates that the VolumeSnapshot object has not + been successfully bound to a VolumeSnapshotContent object yet. NOTE: + To avoid possible security issues, consumers must verify binding + between VolumeSnapshot and VolumeSnapshotContent objects is successful + (by validating that both VolumeSnapshot and VolumeSnapshotContent + point at each other) before using this object.' + type: string + creationTime: + description: creationTime is the timestamp when the point-in-time + snapshot is taken by the underlying storage system. In dynamic snapshot + creation case, this field will be filled in by the snapshot controller + with the "creation_time" value returned from CSI "CreateSnapshot" + gRPC call. For a pre-existing snapshot, this field will be filled + with the "creation_time" value returned from the CSI "ListSnapshots" + gRPC call if the driver supports it. If not specified, it may indicate + that the creation time of the snapshot is unknown. + format: date-time + type: string + error: + description: error is the last observed error during snapshot creation, + if any. This field could be helpful to upper level controllers(i.e., + application controller) to decide whether they should continue on + waiting for the snapshot to be created based on the type of error + reported. The snapshot controller will keep retrying when an error + occurs during the snapshot creation. Upon success, this error field + will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error + during snapshot creation if specified. NOTE: message may be + logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if the snapshot is ready to be used + to restore a volume. In dynamic snapshot creation case, this field + will be filled in by the snapshot controller with the "ready_to_use" + value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing + snapshot, this field will be filled with the "ready_to_use" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it, otherwise, this field will be set to "True". If not specified, + it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + type: string + description: restoreSize represents the minimum size of volume required + to create a volume from this snapshot. In dynamic snapshot creation + case, this field will be filled in by the snapshot controller with + the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. + For a pre-existing snapshot, this field will be filled with the + "size_bytes" value returned from the CSI "ListSnapshots" gRPC call + if the driver supports it. When restoring a volume from this snapshot, + the size of the volume MUST NOT be smaller than the restoreSize + if it is specified, otherwise the restoration will fail. If not + specified, it indicates that the size is unknown. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: If a new snapshot needs to be created, this contains the name of the source PVC from which this snapshot was (or will be) created. + jsonPath: .spec.source.persistentVolumeClaimName + name: SourcePVC + type: string + - description: If a snapshot already exists, this contains the name of the existing VolumeSnapshotContent object representing the existing snapshot. + jsonPath: .spec.source.volumeSnapshotContentName + name: SourceSnapshotContent + type: string + - description: Represents the minimum size of volume required to rehydrate from this snapshot. + jsonPath: .status.restoreSize + name: RestoreSize + type: string + - description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot. + jsonPath: .spec.volumeSnapshotClassName + name: SnapshotClass + type: string + - description: Name of the VolumeSnapshotContent object to which the VolumeSnapshot object intends to bind to. Please note that verification of binding actually requires checking both VolumeSnapshot and VolumeSnapshotContent to ensure both are pointing at each other. Binding MUST be verified prior to usage of this object. + jsonPath: .status.boundVolumeSnapshotContentName + name: SnapshotContent + type: string + - description: Timestamp when the point-in-time snapshot was taken by the underlying storage system. + jsonPath: .status.creationTime + name: CreationTime + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshot is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshot" + schema: + openAPIV3Schema: + description: VolumeSnapshot is a user's request for either creating a point-in-time snapshot of a persistent volume, or binding to a pre-existing snapshot. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: 'spec defines the desired characteristics of a snapshot requested by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots Required.' + properties: + source: + description: source specifies where a snapshot will be created from. This field is immutable after creation. Required. + properties: + persistentVolumeClaimName: + description: persistentVolumeClaimName specifies the name of the PersistentVolumeClaim object representing the volume from which a snapshot should be created. This PVC is assumed to be in the same namespace as the VolumeSnapshot object. This field should be set if the snapshot does not exists, and needs to be created. This field is immutable. + type: string + volumeSnapshotContentName: + description: volumeSnapshotContentName specifies the name of a pre-existing VolumeSnapshotContent object representing an existing volume snapshot. This field should be set if the snapshot already exists and only needs a representation in Kubernetes. This field is immutable. + type: string + type: object + volumeSnapshotClassName: + description: 'VolumeSnapshotClassName is the name of the VolumeSnapshotClass requested by the VolumeSnapshot. VolumeSnapshotClassName may be left nil to indicate that the default SnapshotClass should be used. A given cluster may have multiple default Volume SnapshotClasses: one default per CSI Driver. If a VolumeSnapshot does not specify a SnapshotClass, VolumeSnapshotSource will be checked to figure out what the associated CSI Driver is, and the default VolumeSnapshotClass associated with that CSI Driver will be used. If more than one VolumeSnapshotClass exist for a given CSI Driver and more than one have been marked as default, CreateSnapshot will fail and generate an event. Empty string is not allowed for this field.' + type: string + required: + - source + type: object + status: + description: status represents the current information of a snapshot. Consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object. + properties: + boundVolumeSnapshotContentName: + description: 'boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent object to which this VolumeSnapshot object intends to bind to. If not specified, it indicates that the VolumeSnapshot object has not been successfully bound to a VolumeSnapshotContent object yet. NOTE: To avoid possible security issues, consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object.' + type: string + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it may indicate that the creation time of the snapshot is unknown. + format: date-time + type: string + error: + description: error is the last observed error during snapshot creation, if any. This field could be helpful to upper level controllers(i.e., application controller) to decide whether they should continue on waiting for the snapshot to be created based on the type of error reported. The snapshot controller will keep retrying when an error occurs during the snapshot creation. Upon success, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if the snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + type: string + description: restoreSize represents the minimum size of volume required to create a volume from this snapshot. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + required: + - spec + type: object + served: false + storage: false + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/665" + creationTimestamp: null + name: volumesnapshotclasses.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotClass + listKind: VolumeSnapshotClassList + plural: volumesnapshotclasses + shortNames: + - vsclass + - vsclasses + singular: volumesnapshotclass + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .driver + name: Driver + type: string + - description: Determines whether a VolumeSnapshotContent created through the + VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .deletionPolicy + name: DeletionPolicy + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshotClass specifies parameters that a underlying storage + system uses when creating a volume snapshot. A specific VolumeSnapshotClass + is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses + are non-namespaced + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + deletionPolicy: + description: deletionPolicy determines whether a VolumeSnapshotContent + created through the VolumeSnapshotClass should be deleted when its bound + VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". + "Retain" means that the VolumeSnapshotContent and its physical snapshot + on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent + and its physical snapshot on underlying storage system are deleted. + Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the storage driver that handles this + VolumeSnapshotClass. Required. + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + parameters: + additionalProperties: + type: string + description: parameters is a key-value map with storage driver specific + parameters for creating snapshots. These values are opaque to Kubernetes. + type: object + required: + - deletionPolicy + - driver + type: object + served: true + storage: true + subresources: {} + - additionalPrinterColumns: + - jsonPath: .driver + name: Driver + type: string + - description: Determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .deletionPolicy + name: DeletionPolicy + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshotClass is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshotClass" + schema: + openAPIV3Schema: + description: VolumeSnapshotClass specifies parameters that a underlying storage system uses when creating a volume snapshot. A specific VolumeSnapshotClass is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses are non-namespaced + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + deletionPolicy: + description: deletionPolicy determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the storage driver that handles this VolumeSnapshotClass. Required. + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + parameters: + additionalProperties: + type: string + description: parameters is a key-value map with storage driver specific parameters for creating snapshots. These values are opaque to Kubernetes. + type: object + required: + - deletionPolicy + - driver + type: object + served: false + storage: false + subresources: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/665" + creationTimestamp: null + name: volumesnapshotcontents.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotContent + listKind: VolumeSnapshotContentList + plural: volumesnapshotcontents + shortNames: + - vsc + - vscs + singular: volumesnapshotcontent + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: Represents the complete size of the snapshot in bytes + jsonPath: .status.restoreSize + name: RestoreSize + type: integer + - description: Determines whether this VolumeSnapshotContent and its physical + snapshot on the underlying storage system should be deleted when its bound + VolumeSnapshot is deleted. + jsonPath: .spec.deletionPolicy + name: DeletionPolicy + type: string + - description: Name of the CSI driver used to create the physical snapshot on + the underlying storage system. + jsonPath: .spec.driver + name: Driver + type: string + - description: Name of the VolumeSnapshotClass to which this snapshot belongs. + jsonPath: .spec.volumeSnapshotClassName + name: VolumeSnapshotClass + type: string + - description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent + object is bound. + jsonPath: .spec.volumeSnapshotRef.name + name: VolumeSnapshot + type: string + - description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.namespace + name: VolumeSnapshotNamespace + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshotContent represents the actual "on-disk" snapshot + object in the underlying storage system + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: spec defines properties of a VolumeSnapshotContent created + by the underlying storage system. Required. + properties: + deletionPolicy: + description: deletionPolicy determines whether this VolumeSnapshotContent + and its physical snapshot on the underlying storage system should + be deleted when its bound VolumeSnapshot is deleted. Supported values + are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent + and its physical snapshot on underlying storage system are kept. + "Delete" means that the VolumeSnapshotContent and its physical snapshot + on underlying storage system are deleted. For dynamically provisioned + snapshots, this field will automatically be filled in by the CSI + snapshotter sidecar with the "DeletionPolicy" field defined in the + corresponding VolumeSnapshotClass. For pre-existing snapshots, users + MUST specify this field when creating the VolumeSnapshotContent + object. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the CSI driver used to create the + physical snapshot on the underlying storage system. This MUST be + the same as the name returned by the CSI GetPluginName() call for + that driver. Required. + type: string + source: + description: source specifies whether the snapshot is (or should be) + dynamically provisioned or already exists, and just requires a Kubernetes + object representation. This field is immutable after creation. Required. + properties: + snapshotHandle: + description: snapshotHandle specifies the CSI "snapshot_id" of + a pre-existing snapshot on the underlying storage system for + which a Kubernetes object representation was (or should be) + created. This field is immutable. + type: string + volumeHandle: + description: volumeHandle specifies the CSI "volume_id" of the + volume from which a snapshot should be dynamically taken from. + This field is immutable. + type: string + type: object + oneOf: + - required: ["snapshotHandle"] + - required: ["volumeHandle"] + sourceVolumeMode: + description: SourceVolumeMode is the mode of the volume whose snapshot + is taken. Can be either “Filesystem” or “Block”. If not specified, + it indicates the source volume's mode is unknown. This field is + immutable. This field is an alpha field. + type: string + volumeSnapshotClassName: + description: name of the VolumeSnapshotClass from which this snapshot + was (or will be) created. Note that after provisioning, the VolumeSnapshotClass + may be deleted or recreated with different set of values, and as + such, should not be referenced post-snapshot creation. + type: string + volumeSnapshotRef: + description: volumeSnapshotRef specifies the VolumeSnapshot object + to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName + field must reference to this VolumeSnapshotContent's name for the + bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent + object, name and namespace of the VolumeSnapshot object MUST be + provided for binding to happen. This field is immutable after creation. + Required. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + required: + - deletionPolicy + - driver + - source + - volumeSnapshotRef + type: object + status: + description: status represents the current information of a snapshot. + properties: + creationTime: + description: creationTime is the timestamp when the point-in-time + snapshot is taken by the underlying storage system. In dynamic snapshot + creation case, this field will be filled in by the CSI snapshotter + sidecar with the "creation_time" value returned from CSI "CreateSnapshot" + gRPC call. For a pre-existing snapshot, this field will be filled + with the "creation_time" value returned from the CSI "ListSnapshots" + gRPC call if the driver supports it. If not specified, it indicates + the creation time is unknown. The format of this field is a Unix + nanoseconds time encoded as an int64. On Unix, the command `date + +%s%N` returns the current time in nanoseconds since 1970-01-01 + 00:00:00 UTC. + format: int64 + type: integer + error: + description: error is the last observed error during snapshot creation, + if any. Upon success after retry, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error + during snapshot creation if specified. NOTE: message may be + logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used + to restore a volume. In dynamic snapshot creation case, this field + will be filled in by the CSI snapshotter sidecar with the "ready_to_use" + value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing + snapshot, this field will be filled with the "ready_to_use" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it, otherwise, this field will be set to "True". If not specified, + it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + description: restoreSize represents the complete size of the snapshot + in bytes. In dynamic snapshot creation case, this field will be + filled in by the CSI snapshotter sidecar with the "size_bytes" value + returned from CSI "CreateSnapshot" gRPC call. For a pre-existing + snapshot, this field will be filled with the "size_bytes" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it. When restoring a volume from this snapshot, the size of the + volume MUST NOT be smaller than the restoreSize if it is specified, + otherwise the restoration will fail. If not specified, it indicates + that the size is unknown. + format: int64 + minimum: 0 + type: integer + snapshotHandle: + description: snapshotHandle is the CSI "snapshot_id" of a snapshot + on the underlying storage system. If not specified, it indicates + that dynamic snapshot creation has either failed or it is still + in progress. + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: Represents the complete size of the snapshot in bytes + jsonPath: .status.restoreSize + name: RestoreSize + type: integer + - description: Determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .spec.deletionPolicy + name: DeletionPolicy + type: string + - description: Name of the CSI driver used to create the physical snapshot on the underlying storage system. + jsonPath: .spec.driver + name: Driver + type: string + - description: Name of the VolumeSnapshotClass to which this snapshot belongs. + jsonPath: .spec.volumeSnapshotClassName + name: VolumeSnapshotClass + type: string + - description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.name + name: VolumeSnapshot + type: string + - description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.namespace + name: VolumeSnapshotNamespace + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshotContent is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshotContent" + schema: + openAPIV3Schema: + description: VolumeSnapshotContent represents the actual "on-disk" snapshot object in the underlying storage system + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: spec defines properties of a VolumeSnapshotContent created by the underlying storage system. Required. + properties: + deletionPolicy: + description: deletionPolicy determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. For dynamically provisioned snapshots, this field will automatically be filled in by the CSI snapshotter sidecar with the "DeletionPolicy" field defined in the corresponding VolumeSnapshotClass. For pre-existing snapshots, users MUST specify this field when creating the VolumeSnapshotContent object. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the CSI driver used to create the physical snapshot on the underlying storage system. This MUST be the same as the name returned by the CSI GetPluginName() call for that driver. Required. + type: string + source: + description: source specifies whether the snapshot is (or should be) dynamically provisioned or already exists, and just requires a Kubernetes object representation. This field is immutable after creation. Required. + properties: + snapshotHandle: + description: snapshotHandle specifies the CSI "snapshot_id" of a pre-existing snapshot on the underlying storage system for which a Kubernetes object representation was (or should be) created. This field is immutable. + type: string + volumeHandle: + description: volumeHandle specifies the CSI "volume_id" of the volume from which a snapshot should be dynamically taken from. This field is immutable. + type: string + type: object + volumeSnapshotClassName: + description: name of the VolumeSnapshotClass from which this snapshot was (or will be) created. Note that after provisioning, the VolumeSnapshotClass may be deleted or recreated with different set of values, and as such, should not be referenced post-snapshot creation. + type: string + volumeSnapshotRef: + description: volumeSnapshotRef specifies the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName field must reference to this VolumeSnapshotContent's name for the bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent object, name and namespace of the VolumeSnapshot object MUST be provided for binding to happen. This field is immutable after creation. Required. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + required: + - deletionPolicy + - driver + - source + - volumeSnapshotRef + type: object + status: + description: status represents the current information of a snapshot. + properties: + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it indicates the creation time is unknown. The format of this field is a Unix nanoseconds time encoded as an int64. On Unix, the command `date +%s%N` returns the current time in nanoseconds since 1970-01-01 00:00:00 UTC. + format: int64 + type: integer + error: + description: error is the last observed error during snapshot creation, if any. Upon success after retry, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + description: restoreSize represents the complete size of the snapshot in bytes. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown. + format: int64 + minimum: 0 + type: integer + snapshotHandle: + description: snapshotHandle is the CSI "snapshot_id" of a snapshot on the underlying storage system. If not specified, it indicates that dynamic snapshot creation has either failed or it is still in progress. + type: string + type: object + required: + - spec + type: object + served: false + storage: false + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +{{- end -}} diff --git a/charts/v1.30.0/azuredisk-csi-driver/templates/csi-azuredisk-controller.yaml b/charts/v1.30.0/azuredisk-csi-driver/templates/csi-azuredisk-controller.yaml new file mode 100644 index 000000000..b0274d020 --- /dev/null +++ b/charts/v1.30.0/azuredisk-csi-driver/templates/csi-azuredisk-controller.yaml @@ -0,0 +1,289 @@ +kind: Deployment +apiVersion: apps/v1 +metadata: + name: {{ .Values.controller.name }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- with .Values.controller.labels }} +{{ . | toYaml | indent 4 }} +{{- end }} +{{- with .Values.controller.annotations }} + annotations: +{{ . | toYaml | indent 4 }} +{{- end }} +spec: + replicas: {{ .Values.controller.replicas }} + selector: + matchLabels: + app: {{ .Values.controller.name }} + template: + metadata: +{{ include "azuredisk.labels" . | indent 6 }} + app: {{ .Values.controller.name }} + {{- if .Values.workloadIdentity.clientID }} + azure.workload.identity/use: "true" + {{- end }} +{{- with .Values.controller.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.controller.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + hostNetwork: {{ .Values.controller.hostNetwork }} + serviceAccountName: {{ .Values.serviceAccount.controller }} + nodeSelector: + kubernetes.io/os: linux +{{- with .Values.controller.nodeSelector }} +{{ toYaml . | indent 8 }} +{{- end }} + {{- if .Values.controller.runOnMaster}} + node-role.kubernetes.io/master: "" + {{- end}} + {{- if .Values.controller.runOnControlPlane}} + node-role.kubernetes.io/control-plane: "" + {{- end}} + priorityClassName: system-cluster-critical + securityContext: + seccompProfile: + type: RuntimeDefault +{{- with .Values.controller.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.controller.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} + {{- include "azuredisk.pullSecrets" . | indent 6 }} + containers: + - name: csi-provisioner +{{- if hasPrefix "/" .Values.image.csiProvisioner.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.csiProvisioner.repository }}:{{ .Values.image.csiProvisioner.tag }}" +{{- else }} + image: "{{ .Values.image.csiProvisioner.repository }}:{{ .Values.image.csiProvisioner.tag }}" +{{- end }} + args: + - "--feature-gates=Topology=true,HonorPVReclaimPolicy=true" + - "--csi-address=$(ADDRESS)" + - "--v=2" + - "--timeout=30s" + - "--leader-election" + - "--leader-election-namespace={{ .Release.Namespace }}" + - "--worker-threads={{ .Values.controller.provisionerWorkerThreads }}" + - "--extra-create-metadata=true" + - "--strict-topology=true" + - "--kube-api-qps=50" + - "--kube-api-burst=100" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + resources: {{- toYaml .Values.controller.resources.csiProvisioner | nindent 12 }} + - name: csi-attacher +{{- if hasPrefix "/" .Values.image.csiAttacher.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.csiAttacher.repository }}:{{ .Values.image.csiAttacher.tag }}" +{{- else }} + image: "{{ .Values.image.csiAttacher.repository }}:{{ .Values.image.csiAttacher.tag }}" +{{- end }} + args: + - "-v=2" + - "-csi-address=$(ADDRESS)" + - "-timeout=1200s" + - "-leader-election" + - "--leader-election-namespace={{ .Release.Namespace }}" + - "-worker-threads={{ .Values.controller.attacherWorkerThreads }}" + - "-kube-api-qps=200" + - "-kube-api-burst=400" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + resources: {{- toYaml .Values.controller.resources.csiAttacher | nindent 12 }} + - name: csi-snapshotter +{{- if hasPrefix "/" .Values.snapshot.image.csiSnapshotter.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.snapshot.image.csiSnapshotter.repository }}:{{ .Values.snapshot.image.csiSnapshotter.tag }}" +{{- else }} + image: "{{ .Values.snapshot.image.csiSnapshotter.repository }}:{{ .Values.snapshot.image.csiSnapshotter.tag }}" +{{- end }} + args: + - "-csi-address=$(ADDRESS)" + - "-leader-election" + - "--leader-election-namespace={{ .Release.Namespace }}" + - "-v=2" + - "--timeout=1200s" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: {{- toYaml .Values.controller.resources.csiSnapshotter | nindent 12 }} + - name: csi-resizer +{{- if hasPrefix "/" .Values.image.csiResizer.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.csiResizer.repository }}:{{ .Values.image.csiResizer.tag }}" +{{- else }} + image: "{{ .Values.image.csiResizer.repository }}:{{ .Values.image.csiResizer.tag }}" +{{- end }} + args: + - "-csi-address=$(ADDRESS)" + - "-v=2" + - "-leader-election" + - "--leader-election-namespace={{ .Release.Namespace }}" + - '-handle-volume-inuse-error=false' + - '-feature-gates=RecoverVolumeExpansionFailure=true' + - "-timeout=240s" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: {{- toYaml .Values.controller.resources.csiResizer | nindent 12 }} + - name: liveness-probe +{{- if hasPrefix "/" .Values.image.livenessProbe.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- else }} + image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- end }} + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --health-port={{ .Values.controller.livenessProbe.healthPort }} + - --v=2 + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: {{- toYaml .Values.controller.resources.livenessProbe | nindent 12 }} +{{- if eq .Values.controller.enableTrafficManager true }} + - image: mcr.microsoft.com/aks/ccp/ccp-auto-thrust:master.221118.2 + imagePullPolicy: IfNotPresent + name: proxy + command: + - /ccp-auto-thrust + args: + - "--port={{ .Values.controller.trafficManagerPort }}" + ports: + - containerPort: {{ .Values.controller.trafficManagerPort }} + protocol: TCP +{{- end }} + - name: azuredisk +{{- if hasPrefix "/" .Values.image.azuredisk.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- else }} + image: "{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- end }} + args: + - "--v={{ .Values.controller.logLevel }}" + - "--endpoint=$(CSI_ENDPOINT)" + - "--metrics-address=0.0.0.0:{{ .Values.controller.metricsPort }}" + - "--disable-avset-nodes={{ .Values.controller.disableAvailabilitySetNodes }}" + - "--vm-type={{ .Values.controller.vmType }}" + - "--drivername={{ .Values.driver.name }}" + - "--cloud-config-secret-name={{ .Values.controller.cloudConfigSecretName }}" + - "--cloud-config-secret-namespace={{ .Values.controller.cloudConfigSecretNamespace }}" + - "--custom-user-agent={{ .Values.driver.customUserAgent }}" + - "--user-agent-suffix={{ .Values.driver.userAgentSuffix }}" + - "--allow-empty-cloud-config={{ .Values.controller.allowEmptyCloudConfig }}" + - "--vmss-cache-ttl-seconds={{ .Values.controller.vmssCacheTTLInSeconds }}" + - "--enable-traffic-manager={{ .Values.controller.enableTrafficManager }}" + - "--traffic-manager-port={{ .Values.controller.trafficManagerPort }}" + - "--enable-otel-tracing={{ .Values.controller.otelTracing.enabled }}" + - "--check-disk-lun-collision=true" + ports: + - containerPort: {{ .Values.controller.livenessProbe.healthPort }} + name: healthz + protocol: TCP + - containerPort: {{ .Values.controller.metricsPort }} + name: metrics + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: {{ .Values.azureCredentialFileConfigMap }} + key: path + optional: true + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + {{- if ne .Values.driver.httpsProxy "" }} + - name: HTTPS_PROXY + value: {{ .Values.driver.httpsProxy }} + {{- end }} + {{- if ne .Values.driver.httpProxy "" }} + - name: HTTP_PROXY + value: {{ .Values.driver.httpProxy }} + {{- end }} + {{- if ne .Values.driver.noProxy "" }} + - name: NO_PROXY + value: {{ .Values.driver.noProxy }} + {{- end }} + - name: AZURE_GO_SDK_LOG_LEVEL + value: {{ .Values.driver.azureGoSDKLogLevel }} + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: AZURE_ENVIRONMENT_FILEPATH + value: /etc/kubernetes/azurestackcloud.json + {{- end }} + {{- if .Values.controller.otelTracing.enabled }} + - name: OTEL_SERVICE_NAME + value: {{ .Values.controller.otelTracing.otelServiceName }} + - name: OTEL_EXPORTER_OTLP_ENDPOINT + value: {{ .Values.controller.otelTracing.otelExporterEndpoint }} + {{- end }} + imagePullPolicy: {{ .Values.image.azuredisk.pullPolicy }} + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /etc/kubernetes/ + name: azure-cred + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: ssl + mountPath: /etc/ssl/certs + readOnly: true + {{- end }} + {{- if eq .Values.linux.distro "fedora" }} + - name: ssl + mountPath: /etc/ssl/certs + readOnly: true + - name: ssl-pki + mountPath: /etc/pki/ca-trust/extracted + readOnly: true + {{- end }} + resources: {{- toYaml .Values.controller.resources.azuredisk | nindent 12 }} + volumes: + - name: socket-dir + emptyDir: {} + - name: azure-cred + hostPath: + path: /etc/kubernetes/ + type: DirectoryOrCreate + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: ssl + hostPath: + path: /etc/ssl/certs + {{- end }} + {{- if eq .Values.linux.distro "fedora" }} + - name: ssl + hostPath: + path: /etc/ssl/certs + - name: ssl-pki + hostPath: + path: /etc/pki/ca-trust/extracted + {{- end }} diff --git a/charts/v1.30.0/azuredisk-csi-driver/templates/csi-azuredisk-driver.yaml b/charts/v1.30.0/azuredisk-csi-driver/templates/csi-azuredisk-driver.yaml new file mode 100644 index 000000000..21f941748 --- /dev/null +++ b/charts/v1.30.0/azuredisk-csi-driver/templates/csi-azuredisk-driver.yaml @@ -0,0 +1,13 @@ +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: {{ .Values.driver.name }} + annotations: + csiDriver: "{{ .Values.image.azuredisk.tag }}" + snapshot: "{{ .Values.snapshot.image.csiSnapshotter.tag }}" +spec: + attachRequired: true + podInfoOnMount: false + {{- if .Values.feature.enableFSGroupPolicy}} + fsGroupPolicy: File + {{- end}} diff --git a/charts/v1.30.0/azuredisk-csi-driver/templates/csi-azuredisk-node-windows-hostprocess.yaml b/charts/v1.30.0/azuredisk-csi-driver/templates/csi-azuredisk-node-windows-hostprocess.yaml new file mode 100644 index 000000000..73513e0d5 --- /dev/null +++ b/charts/v1.30.0/azuredisk-csi-driver/templates/csi-azuredisk-node-windows-hostprocess.yaml @@ -0,0 +1,161 @@ +{{- if and (.Values.windows.enabled) (eq .Values.windows.useHostProcessContainers true) }} +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: {{ .Values.windows.dsName }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- with .Values.windows.labels }} +{{ . | toYaml | indent 4 }} +{{- end }} +{{- with .Values.windows.annotations }} + annotations: +{{ . | toYaml | indent 4 }} +{{- end }} +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: {{ .Values.node.maxUnavailable }} + type: RollingUpdate + selector: + matchLabels: + app: {{ .Values.windows.dsName }} + template: + metadata: +{{ include "azuredisk.labels" . | indent 6 }} + app: {{ .Values.windows.dsName }} +{{- with .Values.windows.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.windows.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + serviceAccountName: {{ .Values.serviceAccount.node }} +{{- with .Values.windows.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} + nodeSelector: + kubernetes.io/os: windows +{{- with .Values.windows.nodeSelector }} +{{ toYaml . | indent 8 }} +{{- end }} + affinity: +{{- with .Values.windows.affinity }} +{{ toYaml . | indent 8 }} +{{- end }} + nodeAffinity: +{{ toYaml .Values.windows.nodeAffinity | indent 10 }} + priorityClassName: system-node-critical + {{- include "azuredisk.pullSecrets" . | indent 6 }} + securityContext: + seccompProfile: + type: RuntimeDefault + windowsOptions: + hostProcess: true + runAsUserName: "NT AUTHORITY\\SYSTEM" + hostNetwork: true + initContainers: + - name: init +{{- if hasPrefix "/" .Values.image.azuredisk.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}-windows-hp" +{{- else }} + image: "{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}-windows-hp" +{{- end }} + imagePullPolicy: {{ .Values.image.azuredisk.pullPolicy }} + command: + - "powershell.exe" + - "-c" + - "New-Item -ItemType Directory -Path C:\\var\\lib\\kubelet\\plugins\\{{ .Values.driver.name }}\\ -Force" + containers: + - name: node-driver-registrar +{{- if hasPrefix "/" .Values.image.nodeDriverRegistrar.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- else }} + image: "{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- end }} + command: + - "csi-node-driver-registrar.exe" + args: + - "--csi-address=$(CSI_ENDPOINT)" + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" + - "--plugin-registration-path=$(PLUGIN_REG_DIR)" + - "--v=2" + env: + - name: CSI_ENDPOINT + value: unix://{{ .Values.windows.kubelet }}\plugins\{{ .Values.driver.name }}\csi.sock + - name: DRIVER_REG_SOCK_PATH + value: C:\\var\\lib\\kubelet\\plugins\\{{ .Values.driver.name }}\\csi.sock + - name: PLUGIN_REG_DIR + value: C:\\var\\lib\\kubelet\\plugins_registry\\ + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + imagePullPolicy: {{ .Values.image.nodeDriverRegistrar.pullPolicy }} + resources: {{- toYaml .Values.windows.resources.nodeDriverRegistrar | nindent 12 }} + - name: azuredisk +{{- if hasPrefix "/" .Values.image.azuredisk.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}-windows-hp" +{{- else }} + image: "{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}-windows-hp" +{{- end }} + command: + - "azurediskplugin.exe" + args: + - "--v={{ .Values.node.logLevel }}" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--drivername={{ .Values.driver.name }}" + - "--volume-attach-limit={{ .Values.driver.volumeAttachLimit }}" + - "--cloud-config-secret-name={{ .Values.node.cloudConfigSecretName }}" + - "--cloud-config-secret-namespace={{ .Values.node.cloudConfigSecretNamespace }}" + - "--custom-user-agent={{ .Values.driver.customUserAgent }}" + - "--user-agent-suffix={{ .Values.driver.userAgentSuffix }}" + - "--allow-empty-cloud-config={{ .Values.node.allowEmptyCloudConfig }}" + - "--support-zone={{ .Values.node.supportZone }}" + - "--enable-windows-host-process=true" + - "--get-nodeid-from-imds={{ .Values.node.getNodeIDFromIMDS }}" + - "--enable-otel-tracing={{ .Values.windows.otelTracing.enabled }}" + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: {{ .Values.azureCredentialFileConfigMap }} + key: path-windows + optional: true + - name: CSI_ENDPOINT + value: unix://C:\\var\\lib\\kubelet\\plugins\\{{ .Values.driver.name }}\\csi.sock + {{- if ne .Values.driver.httpsProxy "" }} + - name: HTTPS_PROXY + value: {{ .Values.driver.httpsProxy }} + {{- end }} + {{- if ne .Values.driver.httpProxy "" }} + - name: HTTP_PROXY + value: {{ .Values.driver.httpProxy }} + {{- end }} + {{- if ne .Values.driver.noProxy "" }} + - name: NO_PROXY + value: {{ .Values.driver.noProxy }} + {{- end }} + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: AZURE_GO_SDK_LOG_LEVEL + value: {{ .Values.driver.azureGoSDKLogLevel }} + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: AZURE_ENVIRONMENT_FILEPATH + value: C:\k\azurestackcloud.json + {{- end }} + {{- if .Values.windows.otelTracing.enabled }} + - name: OTEL_SERVICE_NAME + value: {{ .Values.windows.otelTracing.otelServiceName }} + - name: OTEL_EXPORTER_OTLP_ENDPOINT + value: {{ .Values.windows.otelTracing.otelExporterEndpoint }} + {{- end }} + imagePullPolicy: {{ .Values.image.azuredisk.pullPolicy }} +{{- end -}} diff --git a/charts/v1.30.0/azuredisk-csi-driver/templates/csi-azuredisk-node-windows.yaml b/charts/v1.30.0/azuredisk-csi-driver/templates/csi-azuredisk-node-windows.yaml new file mode 100644 index 000000000..cb0e2c7cf --- /dev/null +++ b/charts/v1.30.0/azuredisk-csi-driver/templates/csi-azuredisk-node-windows.yaml @@ -0,0 +1,245 @@ +{{- if and (.Values.windows.enabled) (ne .Values.windows.useHostProcessContainers true) }} +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: {{ .Values.windows.dsName }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- with .Values.windows.labels }} +{{ . | toYaml | indent 4 }} +{{- end }} +{{- with .Values.windows.annotations }} + annotations: +{{ . | toYaml | indent 4 }} +{{- end }} +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: {{ .Values.node.maxUnavailable }} + type: RollingUpdate + selector: + matchLabels: + app: {{ .Values.windows.dsName }} + template: + metadata: +{{ include "azuredisk.labels" . | indent 6 }} + app: {{ .Values.windows.dsName }} +{{- with .Values.windows.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.windows.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + serviceAccountName: {{ .Values.serviceAccount.node }} +{{- with .Values.windows.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} + nodeSelector: + kubernetes.io/os: windows +{{- with .Values.windows.nodeSelector }} +{{ toYaml . | indent 8 }} +{{- end }} + affinity: +{{- with .Values.windows.affinity }} +{{ toYaml . | indent 8 }} +{{- end }} + nodeAffinity: +{{ toYaml .Values.windows.nodeAffinity | indent 10 }} + priorityClassName: system-node-critical + securityContext: + seccompProfile: + type: RuntimeDefault + {{- include "azuredisk.pullSecrets" . | indent 6 }} + containers: + - name: liveness-probe + volumeMounts: + - mountPath: C:\csi + name: plugin-dir +{{- if hasPrefix "/" .Values.image.livenessProbe.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- else }} + image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- end }} + args: + - "--csi-address=$(CSI_ENDPOINT)" + - "--probe-timeout=3s" + - "--health-port={{ .Values.node.livenessProbe.healthPort }}" + - "--v=2" + env: + - name: CSI_ENDPOINT + value: unix://C:\\csi\\csi.sock + imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }} + resources: {{- toYaml .Values.windows.resources.livenessProbe | nindent 12 }} + - name: node-driver-registrar +{{- if hasPrefix "/" .Values.image.nodeDriverRegistrar.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- else }} + image: "{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- end }} + args: + - "--v=2" + - "--csi-address=$(CSI_ENDPOINT)" + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" +{{- if .Values.windows.enableRegistrationProbe }} + livenessProbe: + exec: + command: + - /csi-node-driver-registrar.exe + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --mode=kubelet-registration-probe + initialDelaySeconds: 60 + timeoutSeconds: 30 +{{- end }} + env: + - name: CSI_ENDPOINT + value: unix://C:\\csi\\csi.sock + - name: DRIVER_REG_SOCK_PATH + value: C:\\var\\lib\\kubelet\\plugins\\{{ .Values.driver.name }}\\csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + imagePullPolicy: {{ .Values.image.nodeDriverRegistrar.pullPolicy }} + volumeMounts: + - name: kubelet-dir + mountPath: "C:\\var\\lib\\kubelet" + - name: plugin-dir + mountPath: C:\csi + - name: registration-dir + mountPath: C:\registration + resources: {{- toYaml .Values.windows.resources.nodeDriverRegistrar | nindent 12 }} + - name: azuredisk +{{- if hasPrefix "/" .Values.image.azuredisk.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- else }} + image: "{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- end }} + args: + - "--v={{ .Values.node.logLevel }}" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--drivername={{ .Values.driver.name }}" + - "--volume-attach-limit={{ .Values.driver.volumeAttachLimit }}" + - "--cloud-config-secret-name={{ .Values.node.cloudConfigSecretName }}" + - "--cloud-config-secret-namespace={{ .Values.node.cloudConfigSecretNamespace }}" + - "--custom-user-agent={{ .Values.driver.customUserAgent }}" + - "--user-agent-suffix={{ .Values.driver.userAgentSuffix }}" + - "--allow-empty-cloud-config={{ .Values.node.allowEmptyCloudConfig }}" + - "--support-zone={{ .Values.node.supportZone }}" + - "--get-node-info-from-labels={{ .Values.windows.getNodeInfoFromLabels }}" + - "--get-nodeid-from-imds={{ .Values.node.getNodeIDFromIMDS }}" + - "--enable-otel-tracing={{ .Values.windows.otelTracing.enabled }}" + ports: + - containerPort: {{ .Values.node.livenessProbe.healthPort }} + name: healthz + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: {{ .Values.azureCredentialFileConfigMap }} + key: path-windows + optional: true + - name: CSI_ENDPOINT + value: unix://C:\\csi\\csi.sock + {{- if ne .Values.driver.httpsProxy "" }} + - name: HTTPS_PROXY + value: {{ .Values.driver.httpsProxy }} + {{- end }} + {{- if ne .Values.driver.httpProxy "" }} + - name: HTTP_PROXY + value: {{ .Values.driver.httpProxy }} + {{- end }} + {{- if ne .Values.driver.noProxy "" }} + - name: NO_PROXY + value: {{ .Values.driver.noProxy }} + {{- end }} + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: AZURE_GO_SDK_LOG_LEVEL + value: {{ .Values.driver.azureGoSDKLogLevel }} + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: AZURE_ENVIRONMENT_FILEPATH + value: C:\k\azurestackcloud.json + {{- end }} + {{- if .Values.windows.otelTracing.enabled }} + - name: OTEL_SERVICE_NAME + value: {{ .Values.windows.otelTracing.otelServiceName }} + - name: OTEL_EXPORTER_OTLP_ENDPOINT + value: {{ .Values.windows.otelTracing.otelExporterEndpoint }} + {{- end }} + imagePullPolicy: {{ .Values.image.azuredisk.pullPolicy }} + volumeMounts: + - name: kubelet-dir + mountPath: "C:\\var\\lib\\kubelet" + - name: plugin-dir + mountPath: C:\csi + - name: azure-config + mountPath: C:\k + - name: csi-proxy-fs-pipe-v1 + mountPath: \\.\pipe\csi-proxy-filesystem-v1 + - name: csi-proxy-disk-pipe-v1 + mountPath: \\.\pipe\csi-proxy-disk-v1 + - name: csi-proxy-volume-pipe-v1 + mountPath: \\.\pipe\csi-proxy-volume-v1 + # these paths are still included for compatibility, they're used + # only if the node has still the beta version of the CSI proxy + - name: csi-proxy-fs-pipe-v1beta1 + mountPath: \\.\pipe\csi-proxy-filesystem-v1beta1 + - name: csi-proxy-disk-pipe-v1beta2 + mountPath: \\.\pipe\csi-proxy-disk-v1beta2 + - name: csi-proxy-volume-pipe-v1beta2 + mountPath: \\.\pipe\csi-proxy-volume-v1beta2 + resources: {{- toYaml .Values.windows.resources.azuredisk | nindent 12 }} + volumes: + - name: csi-proxy-fs-pipe-v1 + hostPath: + path: \\.\pipe\csi-proxy-filesystem-v1 + - name: csi-proxy-disk-pipe-v1 + hostPath: + path: \\.\pipe\csi-proxy-disk-v1 + - name: csi-proxy-volume-pipe-v1 + hostPath: + path: \\.\pipe\csi-proxy-volume-v1 + # these paths are still included for compatibility, they're used + # only if the node has still the beta version of the CSI proxy + - name: csi-proxy-fs-pipe-v1beta1 + hostPath: + path: \\.\pipe\csi-proxy-filesystem-v1beta1 + - name: csi-proxy-disk-pipe-v1beta2 + hostPath: + path: \\.\pipe\csi-proxy-disk-v1beta2 + - name: csi-proxy-volume-pipe-v1beta2 + hostPath: + path: \\.\pipe\csi-proxy-volume-v1beta2 + - name: registration-dir + hostPath: + path: {{ .Values.windows.kubelet }}\plugins_registry\ + type: Directory + - name: kubelet-dir + hostPath: + path: {{ .Values.windows.kubelet }}\ + type: Directory + - name: plugin-dir + hostPath: + path: {{ .Values.windows.kubelet }}\plugins\{{ .Values.driver.name }}\ + type: DirectoryOrCreate + - name: azure-config + hostPath: + path: C:\k + type: DirectoryOrCreate +{{- end -}} diff --git a/charts/v1.30.0/azuredisk-csi-driver/templates/csi-azuredisk-node.yaml b/charts/v1.30.0/azuredisk-csi-driver/templates/csi-azuredisk-node.yaml new file mode 100644 index 000000000..e9cab38ce --- /dev/null +++ b/charts/v1.30.0/azuredisk-csi-driver/templates/csi-azuredisk-node.yaml @@ -0,0 +1,257 @@ +{{- if .Values.linux.enabled}} +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: {{ .Values.linux.dsName }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- with .Values.linux.labels }} +{{ . | toYaml | indent 4 }} +{{- end }} +{{- with .Values.linux.annotations }} + annotations: +{{ . | toYaml | indent 4 }} +{{- end }} +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: {{ .Values.node.maxUnavailable }} + type: RollingUpdate + selector: + matchLabels: + app: {{ .Values.linux.dsName }} + template: + metadata: +{{ include "azuredisk.labels" . | indent 6 }} + app: {{ .Values.linux.dsName }} + {{- if .Values.workloadIdentity.clientID }} + azure.workload.identity/use: "true" + {{- end }} +{{- with .Values.linux.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.linux.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + hostNetwork: {{ .Values.linux.hostNetwork }} + dnsPolicy: Default + serviceAccountName: {{ .Values.serviceAccount.node }} + nodeSelector: + kubernetes.io/os: linux +{{- with .Values.linux.nodeSelector }} +{{ toYaml . | indent 8 }} +{{- end }} + affinity: +{{- with .Values.linux.affinity }} +{{ toYaml . | indent 8 }} +{{- end }} + nodeAffinity: +{{ toYaml .Values.linux.nodeAffinity | indent 10 }} + priorityClassName: system-node-critical + securityContext: + seccompProfile: + type: RuntimeDefault +{{- with .Values.linux.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} + {{- include "azuredisk.pullSecrets" . | indent 6 }} + containers: + - name: liveness-probe + volumeMounts: + - mountPath: /csi + name: socket-dir +{{- if hasPrefix "/" .Values.image.livenessProbe.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- else }} + image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- end }} + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --health-port={{ .Values.node.livenessProbe.healthPort }} + - --v=2 + resources: {{- toYaml .Values.linux.resources.livenessProbe | nindent 12 }} + - name: node-driver-registrar +{{- if hasPrefix "/" .Values.image.nodeDriverRegistrar.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- else }} + image: "{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- end }} + args: + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --v=2 +{{- if .Values.linux.enableRegistrationProbe }} + livenessProbe: + exec: + command: + - /csi-node-driver-registrar + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --mode=kubelet-registration-probe + initialDelaySeconds: 30 + timeoutSeconds: 15 +{{- end }} + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: {{ .Values.linux.kubelet }}/plugins/{{ .Values.driver.name }}/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: registration-dir + mountPath: /registration + resources: {{- toYaml .Values.linux.resources.nodeDriverRegistrar | nindent 12 }} + - name: azuredisk +{{- if hasPrefix "/" .Values.image.azuredisk.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- else }} + image: "{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- end }} + args: + - "--v={{ .Values.node.logLevel }}" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--enable-perf-optimization={{ .Values.linux.enablePerfOptimization }}" + - "--drivername={{ .Values.driver.name }}" + - "--volume-attach-limit={{ .Values.driver.volumeAttachLimit }}" + - "--cloud-config-secret-name={{ .Values.node.cloudConfigSecretName }}" + - "--cloud-config-secret-namespace={{ .Values.node.cloudConfigSecretNamespace }}" + - "--custom-user-agent={{ .Values.driver.customUserAgent }}" + - "--user-agent-suffix={{ .Values.driver.userAgentSuffix }}" + - "--allow-empty-cloud-config={{ .Values.node.allowEmptyCloudConfig }}" + - "--support-zone={{ .Values.node.supportZone }}" + - "--get-node-info-from-labels={{ .Values.linux.getNodeInfoFromLabels }}" + - "--get-nodeid-from-imds={{ .Values.node.getNodeIDFromIMDS }}" + - "--enable-otel-tracing={{ .Values.linux.otelTracing.enabled }}" + ports: + - containerPort: {{ .Values.node.livenessProbe.healthPort }} + name: healthz + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: {{ .Values.azureCredentialFileConfigMap }} + key: path + optional: true + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + {{- if ne .Values.driver.httpsProxy "" }} + - name: HTTPS_PROXY + value: {{ .Values.driver.httpsProxy }} + {{- end }} + {{- if ne .Values.driver.httpProxy "" }} + - name: HTTP_PROXY + value: {{ .Values.driver.httpProxy }} + {{- end }} + {{- if ne .Values.driver.noProxy "" }} + - name: NO_PROXY + value: {{ .Values.driver.noProxy }} + {{- end }} + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: AZURE_GO_SDK_LOG_LEVEL + value: {{ .Values.driver.azureGoSDKLogLevel }} + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: AZURE_ENVIRONMENT_FILEPATH + value: /etc/kubernetes/azurestackcloud.json + {{- end }} + {{- if .Values.linux.otelTracing.enabled }} + - name: OTEL_SERVICE_NAME + value: {{ .Values.linux.otelTracing.otelServiceName }} + - name: OTEL_EXPORTER_OTLP_ENDPOINT + value: {{ .Values.linux.otelTracing.otelExporterEndpoint }} + {{- end }} + imagePullPolicy: {{ .Values.image.azuredisk.pullPolicy }} + securityContext: + privileged: true + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: {{ .Values.linux.kubelet }}/ + mountPropagation: Bidirectional + name: mountpoint-dir + - mountPath: /etc/kubernetes/ + name: azure-cred + - mountPath: /dev + name: device-dir + - mountPath: /sys/bus/scsi/devices + name: sys-devices-dir + - mountPath: /sys/class/ + name: sys-class + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: ssl + mountPath: /etc/ssl/certs + readOnly: true + {{- end }} + {{- if eq .Values.linux.distro "fedora" }} + - name: ssl + mountPath: /etc/ssl/certs + readOnly: true + - name: ssl-pki + mountPath: /etc/pki/ca-trust/extracted + readOnly: true + {{- end }} + resources: {{- toYaml .Values.linux.resources.azuredisk | nindent 12 }} + volumes: + - hostPath: + path: {{ .Values.linux.kubelet }}/plugins/{{ .Values.driver.name }} + type: DirectoryOrCreate + name: socket-dir + - hostPath: + path: {{ .Values.linux.kubelet }}/ + type: DirectoryOrCreate + name: mountpoint-dir + - hostPath: + path: {{ .Values.linux.kubelet }}/plugins_registry/ + type: DirectoryOrCreate + name: registration-dir + - hostPath: + path: /etc/kubernetes/ + type: DirectoryOrCreate + name: azure-cred + - hostPath: + path: /dev + type: Directory + name: device-dir + - hostPath: + path: /sys/bus/scsi/devices + type: Directory + name: sys-devices-dir + - hostPath: + path: /sys/class/ + type: Directory + name: sys-class + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: ssl + hostPath: + path: /etc/ssl/certs + {{- end }} + {{- if eq .Values.linux.distro "fedora" }} + - name: ssl + hostPath: + path: /etc/ssl/certs + - name: ssl-pki + hostPath: + path: /etc/pki/ca-trust/extracted + {{- end }} +{{- end -}} diff --git a/charts/v1.30.0/azuredisk-csi-driver/templates/csi-snapshot-controller.yaml b/charts/v1.30.0/azuredisk-csi-driver/templates/csi-snapshot-controller.yaml new file mode 100644 index 000000000..431a01b15 --- /dev/null +++ b/charts/v1.30.0/azuredisk-csi-driver/templates/csi-snapshot-controller.yaml @@ -0,0 +1,94 @@ +{{- if .Values.snapshot.enabled -}} +kind: Deployment +apiVersion: apps/v1 +metadata: + name: {{ .Values.snapshot.snapshotController.name }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- with .Values.snapshot.snapshotController.labels }} +{{ . | toYaml | indent 4 }} +{{- end }} +{{- with .Values.snapshot.snapshotController.annotations }} + annotations: +{{ . | toYaml | indent 4 }} +{{- end }} +spec: + replicas: {{ .Values.snapshot.snapshotController.replicas }} + selector: + matchLabels: + app: {{ .Values.snapshot.snapshotController.name }} + # the snapshot controller won't be marked as ready if the v1 CRDs are unavailable + # in #504 the snapshot-controller will exit after around 7.5 seconds if it + # can't find the v1 CRDs so this value should be greater than that + minReadySeconds: 15 + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: +{{ include "azuredisk.labels" . | indent 6 }} + app: {{ .Values.snapshot.snapshotController.name }} +{{- with .Values.snapshot.snapshotController.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.snapshot.snapshotController.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + serviceAccountName: {{ .Values.serviceAccount.snapshotController }} + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + securityContext: + seccompProfile: + type: RuntimeDefault +{{- with .Values.controller.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.controller.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} + {{- include "azuredisk.pullSecrets" . | indent 6 }} + containers: + - name: {{ .Values.snapshot.snapshotController.name }} +{{- if hasPrefix "/" .Values.snapshot.image.csiSnapshotController.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.snapshot.image.csiSnapshotController.repository }}:{{ .Values.snapshot.image.csiSnapshotController.tag }}" +{{- else }} + image: "{{ .Values.snapshot.image.csiSnapshotController.repository }}:{{ .Values.snapshot.image.csiSnapshotController.tag }}" +{{- end }} + args: + - "--v=2" + - "--leader-election=true" + - "--leader-election-namespace={{ .Release.Namespace }}" + resources: {{- toYaml .Values.snapshot.snapshotController.resources | nindent 12 }} + imagePullPolicy: {{ .Values.snapshot.image.csiSnapshotController.pullPolicy }} + +--- +{{- if .Values.snapshot.VolumeSnapshotClass.enabled -}} +kind: VolumeSnapshotClass +apiVersion: snapshot.storage.k8s.io/v1 +metadata: + name: {{ .Values.snapshot.VolumeSnapshotClass.name }} + annotations: + # This is what defines this resource as a hook. Without this line, the + # job is considered part of the release. + "helm.sh/hook": post-install +driver: {{ .Values.driver.name }} +deletionPolicy: {{ .Values.snapshot.VolumeSnapshotClass.deletionPolicy }} +parameters: + incremental: {{ .Values.snapshot.VolumeSnapshotClass.parameters.incremental }} + {{- if ne .Values.snapshot.VolumeSnapshotClass.parameters.resourceGroup "" }} + resourceGroup: {{ .Values.snapshot.VolumeSnapshotClass.parameters.resourceGroup }} + {{- end }} + tags: {{ .Values.snapshot.VolumeSnapshotClass.parameters.tags }} +{{- with .Values.snapshot.VolumeSnapshotClass.additionalLabels }} +additionalLabels: +{{ toYaml . | indent 2 }} +{{- end }} +{{- end -}} +{{- end -}} diff --git a/charts/v1.30.0/azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml b/charts/v1.30.0/azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml new file mode 100644 index 000000000..b78940fc6 --- /dev/null +++ b/charts/v1.30.0/azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml @@ -0,0 +1,199 @@ +{{- if .Values.rbac.create -}} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-external-provisioner-role +{{ include "azuredisk.labels" . | indent 2 }} +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["get", "list"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-csi-provisioner-binding +{{ include "azuredisk.labels" . | indent 2 }} +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ .Values.rbac.name }}-external-provisioner-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-external-attacher-role +{{ include "azuredisk.labels" . | indent 2 }} +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["csi.storage.k8s.io"] + resources: ["csinodeinfos"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments/status"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-csi-attacher-binding +{{ include "azuredisk.labels" . | indent 2 }} +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ .Values.rbac.name }}-external-attacher-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-external-snapshotter-role +{{ include "azuredisk.labels" . | indent 2 }} +rules: + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-csi-snapshotter-binding +{{ include "azuredisk.labels" . | indent 2 }} +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ .Values.rbac.name }}-external-snapshotter-role + apiGroup: rbac.authorization.k8s.io + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-external-resizer-role +{{ include "azuredisk.labels" . | indent 2 }} +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-csi-resizer-role +{{ include "azuredisk.labels" . | indent 2 }} +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ .Values.rbac.name }}-external-resizer-role + apiGroup: rbac.authorization.k8s.io + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-{{ .Values.rbac.name }}-controller-secret-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-{{ .Values.rbac.name }}-controller-secret-binding +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: csi-{{ .Values.rbac.name }}-controller-secret-role + apiGroup: rbac.authorization.k8s.io +{{ end }} diff --git a/charts/v1.30.0/azuredisk-csi-driver/templates/rbac-csi-azuredisk-node.yaml b/charts/v1.30.0/azuredisk-csi-driver/templates/rbac-csi-azuredisk-node.yaml new file mode 100644 index 000000000..ae7e080af --- /dev/null +++ b/charts/v1.30.0/azuredisk-csi-driver/templates/rbac-csi-azuredisk-node.yaml @@ -0,0 +1,28 @@ +{{- if .Values.rbac.create -}} +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-{{ .Values.rbac.name }}-node-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-{{ .Values.rbac.name }}-node-secret-binding +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.node }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: csi-{{ .Values.rbac.name }}-node-role + apiGroup: rbac.authorization.k8s.io +{{ end }} diff --git a/charts/v1.30.0/azuredisk-csi-driver/templates/rbac-csi-snapshot-controller.yaml b/charts/v1.30.0/azuredisk-csi-driver/templates/rbac-csi-snapshot-controller.yaml new file mode 100644 index 000000000..8e4278b6f --- /dev/null +++ b/charts/v1.30.0/azuredisk-csi-driver/templates/rbac-csi-snapshot-controller.yaml @@ -0,0 +1,72 @@ +{{- if and .Values.snapshot.enabled .Values.rbac.create -}} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots/status"] + verbs: ["update", "patch"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-binding +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.snapshotController }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: csi-snapshot-controller-role + apiGroup: rbac.authorization.k8s.io + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-leaderelection-role +rules: + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-leaderelection-binding +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.snapshotController }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: csi-snapshot-controller-leaderelection-role + apiGroup: rbac.authorization.k8s.io +{{ end }} diff --git a/charts/v1.30.0/azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-controller.yaml b/charts/v1.30.0/azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-controller.yaml new file mode 100644 index 000000000..1806e9c58 --- /dev/null +++ b/charts/v1.30.0/azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-controller.yaml @@ -0,0 +1,16 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- if .Values.workloadIdentity.clientID }} + azure.workload.identity/use: "true" + annotations: + azure.workload.identity/client-id: {{ .Values.workloadIdentity.clientID }} +{{- if .Values.workloadIdentity.tenantID }} + azure.workload.identity/tenant-id: {{ .Values.workloadIdentity.tenantID }} +{{- end }} +{{- end }} +{{- end -}} diff --git a/charts/v1.30.0/azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-node.yaml b/charts/v1.30.0/azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-node.yaml new file mode 100644 index 000000000..dcc2ff60b --- /dev/null +++ b/charts/v1.30.0/azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-node.yaml @@ -0,0 +1,16 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.node }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- if .Values.workloadIdentity.clientID }} + azure.workload.identity/use: "true" + annotations: + azure.workload.identity/client-id: {{ .Values.workloadIdentity.clientID }} +{{- if .Values.workloadIdentity.tenantID }} + azure.workload.identity/tenant-id: {{ .Values.workloadIdentity.tenantID }} +{{- end }} +{{- end }} +{{- end -}} diff --git a/charts/v1.30.0/azuredisk-csi-driver/templates/serviceaccount-csi-snapshot-controller.yaml b/charts/v1.30.0/azuredisk-csi-driver/templates/serviceaccount-csi-snapshot-controller.yaml new file mode 100644 index 000000000..7cdaad0b4 --- /dev/null +++ b/charts/v1.30.0/azuredisk-csi-driver/templates/serviceaccount-csi-snapshot-controller.yaml @@ -0,0 +1,7 @@ +{{- if and .Values.snapshot.enabled .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.snapshotController }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/charts/v1.30.0/azuredisk-csi-driver/values.yaml b/charts/v1.30.0/azuredisk-csi-driver/values.yaml new file mode 100644 index 000000000..6c40c8043 --- /dev/null +++ b/charts/v1.30.0/azuredisk-csi-driver/values.yaml @@ -0,0 +1,289 @@ +image: + baseRepo: mcr.microsoft.com + azuredisk: + repository: /oss/kubernetes-csi/azuredisk-csi + tag: v1.30.0 + pullPolicy: IfNotPresent + csiProvisioner: + repository: /oss/kubernetes-csi/csi-provisioner + tag: v4.0.0 + pullPolicy: IfNotPresent + csiAttacher: + repository: /oss/kubernetes-csi/csi-attacher + tag: v4.5.0 + pullPolicy: IfNotPresent + csiResizer: + repository: /oss/kubernetes-csi/csi-resizer + tag: v1.9.3 + pullPolicy: IfNotPresent + livenessProbe: + repository: /oss/kubernetes-csi/livenessprobe + tag: v2.12.0 + pullPolicy: IfNotPresent + nodeDriverRegistrar: + repository: /oss/kubernetes-csi/csi-node-driver-registrar + tag: v2.10.0 + pullPolicy: IfNotPresent + +serviceAccount: + create: true # When true, service accounts will be created for you. Set to false if you want to use your own. + controller: csi-azuredisk-controller-sa # Name of Service Account to be created or used + node: csi-azuredisk-node-sa # Name of Service Account to be created or used + snapshotController: csi-snapshot-controller-sa # Name of Service Account to be created or used + +rbac: + create: true + name: azuredisk + +controller: + name: csi-azuredisk-controller + cloudConfigSecretName: azure-cloud-provider + cloudConfigSecretNamespace: kube-system + allowEmptyCloudConfig: false + enableTrafficManager: false + trafficManagerPort: 7788 + replicas: 2 + metricsPort: 29604 + livenessProbe: + healthPort: 29602 + runOnMaster: false + runOnControlPlane: false + disableAvailabilitySetNodes: false + vmType: "" + provisionerWorkerThreads: 100 + attacherWorkerThreads: 1000 + vmssCacheTTLInSeconds: -1 + logLevel: 5 + otelTracing: + enabled: false + otelServiceName: csi-azuredisk-controller + otelExporterEndpoint: "http://localhost:4317" + tolerations: + - key: "node-role.kubernetes.io/master" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/controlplane" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/control-plane" + operator: "Exists" + effect: "NoSchedule" + hostNetwork: true # this setting could be disabled if controller does not depend on MSI setting + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + nodeSelector: {} + affinity: {} + resources: + csiProvisioner: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + csiAttacher: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + csiResizer: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + csiSnapshotter: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 20Mi + livenessProbe: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + azuredisk: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + +node: + cloudConfigSecretName: azure-cloud-provider + cloudConfigSecretNamespace: kube-system + supportZone: true + allowEmptyCloudConfig: true + getNodeIDFromIMDS: false + maxUnavailable: 1 + logLevel: 5 + livenessProbe: + healthPort: 29603 + +snapshot: + enabled: false + name: csi-snapshot-controller + image: + csiSnapshotter: + repository: /oss/kubernetes-csi/csi-snapshotter + tag: v6.3.3 + pullPolicy: IfNotPresent + csiSnapshotController: + repository: /oss/kubernetes-csi/snapshot-controller + tag: v6.3.3 + pullPolicy: IfNotPresent + snapshotController: + name: csi-snapshot-controller + replicas: 2 + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + resources: + limits: + memory: 300Mi + requests: + cpu: 10m + memory: 20Mi + VolumeSnapshotClass: + enabled: false + name: csi-azuredisk-vsc + deletionPolicy: Delete + parameters: + incremental: '"true"' # available values: "true", "false" ("true" by default for Azure Public Cloud, and "false" by default for Azure Stack Cloud) + resourceGroup: "" # available values: EXISTING RESOURCE GROUP (If not specified, snapshot will be stored in the same resource group as source Azure disk) + tags: "" # tag format: 'key1=val1,key2=val2' + additionalLabels: {} + +feature: + enableFSGroupPolicy: true + +driver: + name: disk.csi.azure.com + # maximum number of attachable volumes per node, + # maximum number is defined according to node instance type by default(-1) + volumeAttachLimit: -1 + customUserAgent: "" + userAgentSuffix: "OSS-helm" + azureGoSDKLogLevel: "" # available values: ""(no logs), DEBUG, INFO, WARNING, ERROR + httpsProxy: "" + httpProxy: "" + noProxy: "" + +linux: + enabled: true + dsName: csi-azuredisk-node # daemonset name + kubelet: /var/lib/kubelet + distro: debian # available values: debian, fedora + enablePerfOptimization: true + enableRegistrationProbe: true + otelTracing: + enabled: false + otelServiceName: csi-azuredisk-node + otelExporterEndpoint: "http://localhost:4317" + tolerations: + - operator: "Exists" + hostNetwork: true # this setting could be disabled if perfProfile is `none` + getNodeInfoFromLabels: false # get node info from node labels instead of IMDS + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + nodeSelector: {} + affinity: {} + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: type + operator: NotIn + values: + - virtual-kubelet + resources: + livenessProbe: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + nodeDriverRegistrar: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + azuredisk: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 20Mi + +windows: + enabled: true + useHostProcessContainers: false + dsName: csi-azuredisk-node-win # daemonset name + kubelet: 'C:\var\lib\kubelet' + getNodeInfoFromLabels: false # get node info from node labels instead of IMDS + enableRegistrationProbe: true + otelTracing: + enabled: false + otelServiceName: csi-azuredisk-node-win + otelExporterEndpoint: "http://localhost:4317" + tolerations: + - key: "node.kubernetes.io/os" + operator: "Exists" + effect: "NoSchedule" + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + nodeSelector: {} + affinity: {} + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: type + operator: NotIn + values: + - virtual-kubelet + resources: + livenessProbe: + limits: + memory: 150Mi + requests: + cpu: 10m + memory: 40Mi + nodeDriverRegistrar: + limits: + memory: 150Mi + requests: + cpu: 30m + memory: 40Mi + azuredisk: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 40Mi + +cloud: AzurePublicCloud + +## Reference to one or more secrets to be used when pulling images +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ +## +imagePullSecrets: [] +# - name: "image-pull-secret" + +workloadIdentity: + clientID: "" + # [optional] If the AAD application or user-assigned managed identity is not in the same tenant as the cluster + # then set tenantID with the application or user-assigned managed identity tenant ID + tenantID: "" + +azureCredentialFileConfigMap: azure-cred-file diff --git a/charts/v1.30.1/azuredisk-csi-driver-v1.30.1.tgz b/charts/v1.30.1/azuredisk-csi-driver-v1.30.1.tgz new file mode 100644 index 000000000..b4f170736 Binary files /dev/null and b/charts/v1.30.1/azuredisk-csi-driver-v1.30.1.tgz differ diff --git a/charts/v1.30.1/azuredisk-csi-driver/Chart.yaml b/charts/v1.30.1/azuredisk-csi-driver/Chart.yaml new file mode 100644 index 000000000..d25a27c52 --- /dev/null +++ b/charts/v1.30.1/azuredisk-csi-driver/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: v1.30.1 +description: Azure disk Container Storage Interface (CSI) Storage Plugin +name: azuredisk-csi-driver +version: v1.30.1 diff --git a/charts/v1.30.1/azuredisk-csi-driver/templates/NOTES.txt b/charts/v1.30.1/azuredisk-csi-driver/templates/NOTES.txt new file mode 100644 index 000000000..c198152ef --- /dev/null +++ b/charts/v1.30.1/azuredisk-csi-driver/templates/NOTES.txt @@ -0,0 +1,5 @@ +The Azure Disk CSI Driver is getting deployed to your cluster. + +To check Azure Disk CSI Driver pods status, please run: + + kubectl --namespace={{ .Release.Namespace }} get pods --selector="app.kubernetes.io/name={{ .Release.Name }}" --watch diff --git a/charts/v1.30.1/azuredisk-csi-driver/templates/_helpers.tpl b/charts/v1.30.1/azuredisk-csi-driver/templates/_helpers.tpl new file mode 100644 index 000000000..51fcc8a09 --- /dev/null +++ b/charts/v1.30.1/azuredisk-csi-driver/templates/_helpers.tpl @@ -0,0 +1,26 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* Expand the name of the chart.*/}} +{{- define "azuredisk.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* labels for helm resources */}} +{{- define "azuredisk.labels" -}} +labels: + app.kubernetes.io/instance: "{{ .Release.Name }}" + app.kubernetes.io/managed-by: "{{ .Release.Service }}" + app.kubernetes.io/name: "{{ template "azuredisk.name" . }}" + app.kubernetes.io/version: "{{ .Chart.AppVersion }}" + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" +{{- end -}} + +{{/* pull secrets for containers */}} +{{- define "azuredisk.pullSecrets" -}} +{{- if .Values.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- end }} +{{- end -}} \ No newline at end of file diff --git a/charts/v1.30.1/azuredisk-csi-driver/templates/crd-csi-snapshot.yaml b/charts/v1.30.1/azuredisk-csi-driver/templates/crd-csi-snapshot.yaml new file mode 100644 index 000000000..76df8af7e --- /dev/null +++ b/charts/v1.30.1/azuredisk-csi-driver/templates/crd-csi-snapshot.yaml @@ -0,0 +1,840 @@ +{{- if .Values.snapshot.enabled -}} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/665" + creationTimestamp: null + name: volumesnapshots.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshot + listKind: VolumeSnapshotList + plural: volumesnapshots + shortNames: + - vs + singular: volumesnapshot + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: If a new snapshot needs to be created, this contains the name of + the source PVC from which this snapshot was (or will be) created. + jsonPath: .spec.source.persistentVolumeClaimName + name: SourcePVC + type: string + - description: If a snapshot already exists, this contains the name of the existing + VolumeSnapshotContent object representing the existing snapshot. + jsonPath: .spec.source.volumeSnapshotContentName + name: SourceSnapshotContent + type: string + - description: Represents the minimum size of volume required to rehydrate from + this snapshot. + jsonPath: .status.restoreSize + name: RestoreSize + type: string + - description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot. + jsonPath: .spec.volumeSnapshotClassName + name: SnapshotClass + type: string + - description: Name of the VolumeSnapshotContent object to which the VolumeSnapshot + object intends to bind to. Please note that verification of binding actually + requires checking both VolumeSnapshot and VolumeSnapshotContent to ensure + both are pointing at each other. Binding MUST be verified prior to usage of + this object. + jsonPath: .status.boundVolumeSnapshotContentName + name: SnapshotContent + type: string + - description: Timestamp when the point-in-time snapshot was taken by the underlying + storage system. + jsonPath: .status.creationTime + name: CreationTime + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshot is a user's request for either creating a point-in-time + snapshot of a persistent volume, or binding to a pre-existing snapshot. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: 'spec defines the desired characteristics of a snapshot requested + by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots + Required.' + properties: + source: + description: source specifies where a snapshot will be created from. + This field is immutable after creation. Required. + properties: + persistentVolumeClaimName: + description: persistentVolumeClaimName specifies the name of the + PersistentVolumeClaim object representing the volume from which + a snapshot should be created. This PVC is assumed to be in the + same namespace as the VolumeSnapshot object. This field should + be set if the snapshot does not exists, and needs to be created. + This field is immutable. + type: string + volumeSnapshotContentName: + description: volumeSnapshotContentName specifies the name of a + pre-existing VolumeSnapshotContent object representing an existing + volume snapshot. This field should be set if the snapshot already + exists and only needs a representation in Kubernetes. This field + is immutable. + type: string + type: object + oneOf: + - required: ["persistentVolumeClaimName"] + - required: ["volumeSnapshotContentName"] + volumeSnapshotClassName: + description: 'VolumeSnapshotClassName is the name of the VolumeSnapshotClass + requested by the VolumeSnapshot. VolumeSnapshotClassName may be + left nil to indicate that the default SnapshotClass should be used. + A given cluster may have multiple default Volume SnapshotClasses: + one default per CSI Driver. If a VolumeSnapshot does not specify + a SnapshotClass, VolumeSnapshotSource will be checked to figure + out what the associated CSI Driver is, and the default VolumeSnapshotClass + associated with that CSI Driver will be used. If more than one VolumeSnapshotClass + exist for a given CSI Driver and more than one have been marked + as default, CreateSnapshot will fail and generate an event. Empty + string is not allowed for this field.' + type: string + required: + - source + type: object + status: + description: status represents the current information of a snapshot. + Consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent + objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent + point at each other) before using this object. + properties: + boundVolumeSnapshotContentName: + description: 'boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent + object to which this VolumeSnapshot object intends to bind to. If + not specified, it indicates that the VolumeSnapshot object has not + been successfully bound to a VolumeSnapshotContent object yet. NOTE: + To avoid possible security issues, consumers must verify binding + between VolumeSnapshot and VolumeSnapshotContent objects is successful + (by validating that both VolumeSnapshot and VolumeSnapshotContent + point at each other) before using this object.' + type: string + creationTime: + description: creationTime is the timestamp when the point-in-time + snapshot is taken by the underlying storage system. In dynamic snapshot + creation case, this field will be filled in by the snapshot controller + with the "creation_time" value returned from CSI "CreateSnapshot" + gRPC call. For a pre-existing snapshot, this field will be filled + with the "creation_time" value returned from the CSI "ListSnapshots" + gRPC call if the driver supports it. If not specified, it may indicate + that the creation time of the snapshot is unknown. + format: date-time + type: string + error: + description: error is the last observed error during snapshot creation, + if any. This field could be helpful to upper level controllers(i.e., + application controller) to decide whether they should continue on + waiting for the snapshot to be created based on the type of error + reported. The snapshot controller will keep retrying when an error + occurs during the snapshot creation. Upon success, this error field + will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error + during snapshot creation if specified. NOTE: message may be + logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if the snapshot is ready to be used + to restore a volume. In dynamic snapshot creation case, this field + will be filled in by the snapshot controller with the "ready_to_use" + value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing + snapshot, this field will be filled with the "ready_to_use" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it, otherwise, this field will be set to "True". If not specified, + it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + type: string + description: restoreSize represents the minimum size of volume required + to create a volume from this snapshot. In dynamic snapshot creation + case, this field will be filled in by the snapshot controller with + the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. + For a pre-existing snapshot, this field will be filled with the + "size_bytes" value returned from the CSI "ListSnapshots" gRPC call + if the driver supports it. When restoring a volume from this snapshot, + the size of the volume MUST NOT be smaller than the restoreSize + if it is specified, otherwise the restoration will fail. If not + specified, it indicates that the size is unknown. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: If a new snapshot needs to be created, this contains the name of the source PVC from which this snapshot was (or will be) created. + jsonPath: .spec.source.persistentVolumeClaimName + name: SourcePVC + type: string + - description: If a snapshot already exists, this contains the name of the existing VolumeSnapshotContent object representing the existing snapshot. + jsonPath: .spec.source.volumeSnapshotContentName + name: SourceSnapshotContent + type: string + - description: Represents the minimum size of volume required to rehydrate from this snapshot. + jsonPath: .status.restoreSize + name: RestoreSize + type: string + - description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot. + jsonPath: .spec.volumeSnapshotClassName + name: SnapshotClass + type: string + - description: Name of the VolumeSnapshotContent object to which the VolumeSnapshot object intends to bind to. Please note that verification of binding actually requires checking both VolumeSnapshot and VolumeSnapshotContent to ensure both are pointing at each other. Binding MUST be verified prior to usage of this object. + jsonPath: .status.boundVolumeSnapshotContentName + name: SnapshotContent + type: string + - description: Timestamp when the point-in-time snapshot was taken by the underlying storage system. + jsonPath: .status.creationTime + name: CreationTime + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshot is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshot" + schema: + openAPIV3Schema: + description: VolumeSnapshot is a user's request for either creating a point-in-time snapshot of a persistent volume, or binding to a pre-existing snapshot. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: 'spec defines the desired characteristics of a snapshot requested by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots Required.' + properties: + source: + description: source specifies where a snapshot will be created from. This field is immutable after creation. Required. + properties: + persistentVolumeClaimName: + description: persistentVolumeClaimName specifies the name of the PersistentVolumeClaim object representing the volume from which a snapshot should be created. This PVC is assumed to be in the same namespace as the VolumeSnapshot object. This field should be set if the snapshot does not exists, and needs to be created. This field is immutable. + type: string + volumeSnapshotContentName: + description: volumeSnapshotContentName specifies the name of a pre-existing VolumeSnapshotContent object representing an existing volume snapshot. This field should be set if the snapshot already exists and only needs a representation in Kubernetes. This field is immutable. + type: string + type: object + volumeSnapshotClassName: + description: 'VolumeSnapshotClassName is the name of the VolumeSnapshotClass requested by the VolumeSnapshot. VolumeSnapshotClassName may be left nil to indicate that the default SnapshotClass should be used. A given cluster may have multiple default Volume SnapshotClasses: one default per CSI Driver. If a VolumeSnapshot does not specify a SnapshotClass, VolumeSnapshotSource will be checked to figure out what the associated CSI Driver is, and the default VolumeSnapshotClass associated with that CSI Driver will be used. If more than one VolumeSnapshotClass exist for a given CSI Driver and more than one have been marked as default, CreateSnapshot will fail and generate an event. Empty string is not allowed for this field.' + type: string + required: + - source + type: object + status: + description: status represents the current information of a snapshot. Consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object. + properties: + boundVolumeSnapshotContentName: + description: 'boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent object to which this VolumeSnapshot object intends to bind to. If not specified, it indicates that the VolumeSnapshot object has not been successfully bound to a VolumeSnapshotContent object yet. NOTE: To avoid possible security issues, consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object.' + type: string + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it may indicate that the creation time of the snapshot is unknown. + format: date-time + type: string + error: + description: error is the last observed error during snapshot creation, if any. This field could be helpful to upper level controllers(i.e., application controller) to decide whether they should continue on waiting for the snapshot to be created based on the type of error reported. The snapshot controller will keep retrying when an error occurs during the snapshot creation. Upon success, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if the snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + type: string + description: restoreSize represents the minimum size of volume required to create a volume from this snapshot. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + required: + - spec + type: object + served: false + storage: false + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/665" + creationTimestamp: null + name: volumesnapshotclasses.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotClass + listKind: VolumeSnapshotClassList + plural: volumesnapshotclasses + shortNames: + - vsclass + - vsclasses + singular: volumesnapshotclass + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .driver + name: Driver + type: string + - description: Determines whether a VolumeSnapshotContent created through the + VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .deletionPolicy + name: DeletionPolicy + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshotClass specifies parameters that a underlying storage + system uses when creating a volume snapshot. A specific VolumeSnapshotClass + is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses + are non-namespaced + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + deletionPolicy: + description: deletionPolicy determines whether a VolumeSnapshotContent + created through the VolumeSnapshotClass should be deleted when its bound + VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". + "Retain" means that the VolumeSnapshotContent and its physical snapshot + on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent + and its physical snapshot on underlying storage system are deleted. + Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the storage driver that handles this + VolumeSnapshotClass. Required. + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + parameters: + additionalProperties: + type: string + description: parameters is a key-value map with storage driver specific + parameters for creating snapshots. These values are opaque to Kubernetes. + type: object + required: + - deletionPolicy + - driver + type: object + served: true + storage: true + subresources: {} + - additionalPrinterColumns: + - jsonPath: .driver + name: Driver + type: string + - description: Determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .deletionPolicy + name: DeletionPolicy + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshotClass is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshotClass" + schema: + openAPIV3Schema: + description: VolumeSnapshotClass specifies parameters that a underlying storage system uses when creating a volume snapshot. A specific VolumeSnapshotClass is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses are non-namespaced + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + deletionPolicy: + description: deletionPolicy determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the storage driver that handles this VolumeSnapshotClass. Required. + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + parameters: + additionalProperties: + type: string + description: parameters is a key-value map with storage driver specific parameters for creating snapshots. These values are opaque to Kubernetes. + type: object + required: + - deletionPolicy + - driver + type: object + served: false + storage: false + subresources: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/665" + creationTimestamp: null + name: volumesnapshotcontents.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotContent + listKind: VolumeSnapshotContentList + plural: volumesnapshotcontents + shortNames: + - vsc + - vscs + singular: volumesnapshotcontent + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: Represents the complete size of the snapshot in bytes + jsonPath: .status.restoreSize + name: RestoreSize + type: integer + - description: Determines whether this VolumeSnapshotContent and its physical + snapshot on the underlying storage system should be deleted when its bound + VolumeSnapshot is deleted. + jsonPath: .spec.deletionPolicy + name: DeletionPolicy + type: string + - description: Name of the CSI driver used to create the physical snapshot on + the underlying storage system. + jsonPath: .spec.driver + name: Driver + type: string + - description: Name of the VolumeSnapshotClass to which this snapshot belongs. + jsonPath: .spec.volumeSnapshotClassName + name: VolumeSnapshotClass + type: string + - description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent + object is bound. + jsonPath: .spec.volumeSnapshotRef.name + name: VolumeSnapshot + type: string + - description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.namespace + name: VolumeSnapshotNamespace + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshotContent represents the actual "on-disk" snapshot + object in the underlying storage system + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: spec defines properties of a VolumeSnapshotContent created + by the underlying storage system. Required. + properties: + deletionPolicy: + description: deletionPolicy determines whether this VolumeSnapshotContent + and its physical snapshot on the underlying storage system should + be deleted when its bound VolumeSnapshot is deleted. Supported values + are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent + and its physical snapshot on underlying storage system are kept. + "Delete" means that the VolumeSnapshotContent and its physical snapshot + on underlying storage system are deleted. For dynamically provisioned + snapshots, this field will automatically be filled in by the CSI + snapshotter sidecar with the "DeletionPolicy" field defined in the + corresponding VolumeSnapshotClass. For pre-existing snapshots, users + MUST specify this field when creating the VolumeSnapshotContent + object. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the CSI driver used to create the + physical snapshot on the underlying storage system. This MUST be + the same as the name returned by the CSI GetPluginName() call for + that driver. Required. + type: string + source: + description: source specifies whether the snapshot is (or should be) + dynamically provisioned or already exists, and just requires a Kubernetes + object representation. This field is immutable after creation. Required. + properties: + snapshotHandle: + description: snapshotHandle specifies the CSI "snapshot_id" of + a pre-existing snapshot on the underlying storage system for + which a Kubernetes object representation was (or should be) + created. This field is immutable. + type: string + volumeHandle: + description: volumeHandle specifies the CSI "volume_id" of the + volume from which a snapshot should be dynamically taken from. + This field is immutable. + type: string + type: object + oneOf: + - required: ["snapshotHandle"] + - required: ["volumeHandle"] + sourceVolumeMode: + description: SourceVolumeMode is the mode of the volume whose snapshot + is taken. Can be either “Filesystem” or “Block”. If not specified, + it indicates the source volume's mode is unknown. This field is + immutable. This field is an alpha field. + type: string + volumeSnapshotClassName: + description: name of the VolumeSnapshotClass from which this snapshot + was (or will be) created. Note that after provisioning, the VolumeSnapshotClass + may be deleted or recreated with different set of values, and as + such, should not be referenced post-snapshot creation. + type: string + volumeSnapshotRef: + description: volumeSnapshotRef specifies the VolumeSnapshot object + to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName + field must reference to this VolumeSnapshotContent's name for the + bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent + object, name and namespace of the VolumeSnapshot object MUST be + provided for binding to happen. This field is immutable after creation. + Required. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + required: + - deletionPolicy + - driver + - source + - volumeSnapshotRef + type: object + status: + description: status represents the current information of a snapshot. + properties: + creationTime: + description: creationTime is the timestamp when the point-in-time + snapshot is taken by the underlying storage system. In dynamic snapshot + creation case, this field will be filled in by the CSI snapshotter + sidecar with the "creation_time" value returned from CSI "CreateSnapshot" + gRPC call. For a pre-existing snapshot, this field will be filled + with the "creation_time" value returned from the CSI "ListSnapshots" + gRPC call if the driver supports it. If not specified, it indicates + the creation time is unknown. The format of this field is a Unix + nanoseconds time encoded as an int64. On Unix, the command `date + +%s%N` returns the current time in nanoseconds since 1970-01-01 + 00:00:00 UTC. + format: int64 + type: integer + error: + description: error is the last observed error during snapshot creation, + if any. Upon success after retry, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error + during snapshot creation if specified. NOTE: message may be + logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used + to restore a volume. In dynamic snapshot creation case, this field + will be filled in by the CSI snapshotter sidecar with the "ready_to_use" + value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing + snapshot, this field will be filled with the "ready_to_use" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it, otherwise, this field will be set to "True". If not specified, + it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + description: restoreSize represents the complete size of the snapshot + in bytes. In dynamic snapshot creation case, this field will be + filled in by the CSI snapshotter sidecar with the "size_bytes" value + returned from CSI "CreateSnapshot" gRPC call. For a pre-existing + snapshot, this field will be filled with the "size_bytes" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it. When restoring a volume from this snapshot, the size of the + volume MUST NOT be smaller than the restoreSize if it is specified, + otherwise the restoration will fail. If not specified, it indicates + that the size is unknown. + format: int64 + minimum: 0 + type: integer + snapshotHandle: + description: snapshotHandle is the CSI "snapshot_id" of a snapshot + on the underlying storage system. If not specified, it indicates + that dynamic snapshot creation has either failed or it is still + in progress. + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: Represents the complete size of the snapshot in bytes + jsonPath: .status.restoreSize + name: RestoreSize + type: integer + - description: Determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .spec.deletionPolicy + name: DeletionPolicy + type: string + - description: Name of the CSI driver used to create the physical snapshot on the underlying storage system. + jsonPath: .spec.driver + name: Driver + type: string + - description: Name of the VolumeSnapshotClass to which this snapshot belongs. + jsonPath: .spec.volumeSnapshotClassName + name: VolumeSnapshotClass + type: string + - description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.name + name: VolumeSnapshot + type: string + - description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.namespace + name: VolumeSnapshotNamespace + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshotContent is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshotContent" + schema: + openAPIV3Schema: + description: VolumeSnapshotContent represents the actual "on-disk" snapshot object in the underlying storage system + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: spec defines properties of a VolumeSnapshotContent created by the underlying storage system. Required. + properties: + deletionPolicy: + description: deletionPolicy determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. For dynamically provisioned snapshots, this field will automatically be filled in by the CSI snapshotter sidecar with the "DeletionPolicy" field defined in the corresponding VolumeSnapshotClass. For pre-existing snapshots, users MUST specify this field when creating the VolumeSnapshotContent object. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the CSI driver used to create the physical snapshot on the underlying storage system. This MUST be the same as the name returned by the CSI GetPluginName() call for that driver. Required. + type: string + source: + description: source specifies whether the snapshot is (or should be) dynamically provisioned or already exists, and just requires a Kubernetes object representation. This field is immutable after creation. Required. + properties: + snapshotHandle: + description: snapshotHandle specifies the CSI "snapshot_id" of a pre-existing snapshot on the underlying storage system for which a Kubernetes object representation was (or should be) created. This field is immutable. + type: string + volumeHandle: + description: volumeHandle specifies the CSI "volume_id" of the volume from which a snapshot should be dynamically taken from. This field is immutable. + type: string + type: object + volumeSnapshotClassName: + description: name of the VolumeSnapshotClass from which this snapshot was (or will be) created. Note that after provisioning, the VolumeSnapshotClass may be deleted or recreated with different set of values, and as such, should not be referenced post-snapshot creation. + type: string + volumeSnapshotRef: + description: volumeSnapshotRef specifies the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName field must reference to this VolumeSnapshotContent's name for the bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent object, name and namespace of the VolumeSnapshot object MUST be provided for binding to happen. This field is immutable after creation. Required. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + required: + - deletionPolicy + - driver + - source + - volumeSnapshotRef + type: object + status: + description: status represents the current information of a snapshot. + properties: + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it indicates the creation time is unknown. The format of this field is a Unix nanoseconds time encoded as an int64. On Unix, the command `date +%s%N` returns the current time in nanoseconds since 1970-01-01 00:00:00 UTC. + format: int64 + type: integer + error: + description: error is the last observed error during snapshot creation, if any. Upon success after retry, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + description: restoreSize represents the complete size of the snapshot in bytes. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown. + format: int64 + minimum: 0 + type: integer + snapshotHandle: + description: snapshotHandle is the CSI "snapshot_id" of a snapshot on the underlying storage system. If not specified, it indicates that dynamic snapshot creation has either failed or it is still in progress. + type: string + type: object + required: + - spec + type: object + served: false + storage: false + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +{{- end -}} diff --git a/charts/v1.30.1/azuredisk-csi-driver/templates/csi-azuredisk-controller.yaml b/charts/v1.30.1/azuredisk-csi-driver/templates/csi-azuredisk-controller.yaml new file mode 100644 index 000000000..45e5f2f53 --- /dev/null +++ b/charts/v1.30.1/azuredisk-csi-driver/templates/csi-azuredisk-controller.yaml @@ -0,0 +1,290 @@ +kind: Deployment +apiVersion: apps/v1 +metadata: + name: {{ .Values.controller.name }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- with .Values.controller.labels }} +{{ . | toYaml | indent 4 }} +{{- end }} +{{- with .Values.controller.annotations }} + annotations: +{{ . | toYaml | indent 4 }} +{{- end }} +spec: + replicas: {{ .Values.controller.replicas }} + selector: + matchLabels: + app: {{ .Values.controller.name }} + template: + metadata: +{{ include "azuredisk.labels" . | indent 6 }} + app: {{ .Values.controller.name }} + {{- if .Values.workloadIdentity.clientID }} + azure.workload.identity/use: "true" + {{- end }} +{{- with .Values.controller.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.controller.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + hostNetwork: {{ .Values.controller.hostNetwork }} + serviceAccountName: {{ .Values.serviceAccount.controller }} + nodeSelector: + kubernetes.io/os: linux +{{- with .Values.controller.nodeSelector }} +{{ toYaml . | indent 8 }} +{{- end }} + {{- if .Values.controller.runOnMaster}} + node-role.kubernetes.io/master: "" + {{- end}} + {{- if .Values.controller.runOnControlPlane}} + node-role.kubernetes.io/control-plane: "" + {{- end}} + priorityClassName: system-cluster-critical + securityContext: + seccompProfile: + type: RuntimeDefault +{{- with .Values.controller.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.controller.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} + {{- include "azuredisk.pullSecrets" . | indent 6 }} + containers: + - name: csi-provisioner +{{- if hasPrefix "/" .Values.image.csiProvisioner.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.csiProvisioner.repository }}:{{ .Values.image.csiProvisioner.tag }}" +{{- else }} + image: "{{ .Values.image.csiProvisioner.repository }}:{{ .Values.image.csiProvisioner.tag }}" +{{- end }} + args: + - "--feature-gates=Topology=true,HonorPVReclaimPolicy=true" + - "--csi-address=$(ADDRESS)" + - "--v=2" + - "--timeout=30s" + - "--leader-election" + - "--leader-election-namespace={{ .Release.Namespace }}" + - "--worker-threads={{ .Values.controller.provisionerWorkerThreads }}" + - "--extra-create-metadata=true" + - "--strict-topology=true" + - "--kube-api-qps=50" + - "--kube-api-burst=100" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + resources: {{- toYaml .Values.controller.resources.csiProvisioner | nindent 12 }} + - name: csi-attacher +{{- if hasPrefix "/" .Values.image.csiAttacher.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.csiAttacher.repository }}:{{ .Values.image.csiAttacher.tag }}" +{{- else }} + image: "{{ .Values.image.csiAttacher.repository }}:{{ .Values.image.csiAttacher.tag }}" +{{- end }} + args: + - "-v=2" + - "-csi-address=$(ADDRESS)" + - "-timeout=1200s" + - "-leader-election" + - "--leader-election-namespace={{ .Release.Namespace }}" + - "-worker-threads={{ .Values.controller.attacherWorkerThreads }}" + - "-kube-api-qps=200" + - "-kube-api-burst=400" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + resources: {{- toYaml .Values.controller.resources.csiAttacher | nindent 12 }} + - name: csi-snapshotter +{{- if hasPrefix "/" .Values.snapshot.image.csiSnapshotter.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.snapshot.image.csiSnapshotter.repository }}:{{ .Values.snapshot.image.csiSnapshotter.tag }}" +{{- else }} + image: "{{ .Values.snapshot.image.csiSnapshotter.repository }}:{{ .Values.snapshot.image.csiSnapshotter.tag }}" +{{- end }} + args: + - "-csi-address=$(ADDRESS)" + - "-leader-election" + - "--leader-election-namespace={{ .Release.Namespace }}" + - "-v=2" + - "--timeout=1200s" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: {{- toYaml .Values.controller.resources.csiSnapshotter | nindent 12 }} + - name: csi-resizer +{{- if hasPrefix "/" .Values.image.csiResizer.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.csiResizer.repository }}:{{ .Values.image.csiResizer.tag }}" +{{- else }} + image: "{{ .Values.image.csiResizer.repository }}:{{ .Values.image.csiResizer.tag }}" +{{- end }} + args: + - "-csi-address=$(ADDRESS)" + - "-v=2" + - "-leader-election" + - "--leader-election-namespace={{ .Release.Namespace }}" + - '-handle-volume-inuse-error=false' + - '-feature-gates=RecoverVolumeExpansionFailure=true' + - "-timeout=240s" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: {{- toYaml .Values.controller.resources.csiResizer | nindent 12 }} + - name: liveness-probe +{{- if hasPrefix "/" .Values.image.livenessProbe.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- else }} + image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- end }} + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --http-endpoint=localhost:{{ .Values.controller.livenessProbe.healthPort }} + - --v=2 + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: {{- toYaml .Values.controller.resources.livenessProbe | nindent 12 }} +{{- if eq .Values.controller.enableTrafficManager true }} + - image: mcr.microsoft.com/aks/ccp/ccp-auto-thrust:master.221118.2 + imagePullPolicy: IfNotPresent + name: proxy + command: + - /ccp-auto-thrust + args: + - "--port={{ .Values.controller.trafficManagerPort }}" + ports: + - containerPort: {{ .Values.controller.trafficManagerPort }} + protocol: TCP +{{- end }} + - name: azuredisk +{{- if hasPrefix "/" .Values.image.azuredisk.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- else }} + image: "{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- end }} + args: + - "--v={{ .Values.controller.logLevel }}" + - "--endpoint=$(CSI_ENDPOINT)" + - "--metrics-address=0.0.0.0:{{ .Values.controller.metricsPort }}" + - "--disable-avset-nodes={{ .Values.controller.disableAvailabilitySetNodes }}" + - "--vm-type={{ .Values.controller.vmType }}" + - "--drivername={{ .Values.driver.name }}" + - "--cloud-config-secret-name={{ .Values.controller.cloudConfigSecretName }}" + - "--cloud-config-secret-namespace={{ .Values.controller.cloudConfigSecretNamespace }}" + - "--custom-user-agent={{ .Values.driver.customUserAgent }}" + - "--user-agent-suffix={{ .Values.driver.userAgentSuffix }}" + - "--allow-empty-cloud-config={{ .Values.controller.allowEmptyCloudConfig }}" + - "--vmss-cache-ttl-seconds={{ .Values.controller.vmssCacheTTLInSeconds }}" + - "--enable-traffic-manager={{ .Values.controller.enableTrafficManager }}" + - "--traffic-manager-port={{ .Values.controller.trafficManagerPort }}" + - "--enable-otel-tracing={{ .Values.controller.otelTracing.enabled }}" + - "--check-disk-lun-collision=true" + {{- range $value := .Values.controller.extraArgs }} + - {{ $value | quote }} + {{- end }} + ports: + - containerPort: {{ .Values.controller.metricsPort }} + name: metrics + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + host: localhost + path: /healthz + port: {{ .Values.controller.livenessProbe.healthPort }} + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: {{ .Values.azureCredentialFileConfigMap }} + key: path + optional: true + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + {{- if ne .Values.driver.httpsProxy "" }} + - name: HTTPS_PROXY + value: {{ .Values.driver.httpsProxy }} + {{- end }} + {{- if ne .Values.driver.httpProxy "" }} + - name: HTTP_PROXY + value: {{ .Values.driver.httpProxy }} + {{- end }} + {{- if ne .Values.driver.noProxy "" }} + - name: NO_PROXY + value: {{ .Values.driver.noProxy }} + {{- end }} + - name: AZURE_GO_SDK_LOG_LEVEL + value: {{ .Values.driver.azureGoSDKLogLevel }} + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: AZURE_ENVIRONMENT_FILEPATH + value: /etc/kubernetes/azurestackcloud.json + {{- end }} + {{- if .Values.controller.otelTracing.enabled }} + - name: OTEL_SERVICE_NAME + value: {{ .Values.controller.otelTracing.otelServiceName }} + - name: OTEL_EXPORTER_OTLP_ENDPOINT + value: {{ .Values.controller.otelTracing.otelExporterEndpoint }} + {{- end }} + imagePullPolicy: {{ .Values.image.azuredisk.pullPolicy }} + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /etc/kubernetes/ + name: azure-cred + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: ssl + mountPath: /etc/ssl/certs + readOnly: true + {{- end }} + {{- if eq .Values.linux.distro "fedora" }} + - name: ssl + mountPath: /etc/ssl/certs + readOnly: true + - name: ssl-pki + mountPath: /etc/pki/ca-trust/extracted + readOnly: true + {{- end }} + resources: {{- toYaml .Values.controller.resources.azuredisk | nindent 12 }} + volumes: + - name: socket-dir + emptyDir: {} + - name: azure-cred + hostPath: + path: /etc/kubernetes/ + type: DirectoryOrCreate + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: ssl + hostPath: + path: /etc/ssl/certs + {{- end }} + {{- if eq .Values.linux.distro "fedora" }} + - name: ssl + hostPath: + path: /etc/ssl/certs + - name: ssl-pki + hostPath: + path: /etc/pki/ca-trust/extracted + {{- end }} diff --git a/charts/v1.30.1/azuredisk-csi-driver/templates/csi-azuredisk-driver.yaml b/charts/v1.30.1/azuredisk-csi-driver/templates/csi-azuredisk-driver.yaml new file mode 100644 index 000000000..21f941748 --- /dev/null +++ b/charts/v1.30.1/azuredisk-csi-driver/templates/csi-azuredisk-driver.yaml @@ -0,0 +1,13 @@ +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: {{ .Values.driver.name }} + annotations: + csiDriver: "{{ .Values.image.azuredisk.tag }}" + snapshot: "{{ .Values.snapshot.image.csiSnapshotter.tag }}" +spec: + attachRequired: true + podInfoOnMount: false + {{- if .Values.feature.enableFSGroupPolicy}} + fsGroupPolicy: File + {{- end}} diff --git a/charts/v1.30.1/azuredisk-csi-driver/templates/csi-azuredisk-node-windows-hostprocess.yaml b/charts/v1.30.1/azuredisk-csi-driver/templates/csi-azuredisk-node-windows-hostprocess.yaml new file mode 100644 index 000000000..73513e0d5 --- /dev/null +++ b/charts/v1.30.1/azuredisk-csi-driver/templates/csi-azuredisk-node-windows-hostprocess.yaml @@ -0,0 +1,161 @@ +{{- if and (.Values.windows.enabled) (eq .Values.windows.useHostProcessContainers true) }} +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: {{ .Values.windows.dsName }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- with .Values.windows.labels }} +{{ . | toYaml | indent 4 }} +{{- end }} +{{- with .Values.windows.annotations }} + annotations: +{{ . | toYaml | indent 4 }} +{{- end }} +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: {{ .Values.node.maxUnavailable }} + type: RollingUpdate + selector: + matchLabels: + app: {{ .Values.windows.dsName }} + template: + metadata: +{{ include "azuredisk.labels" . | indent 6 }} + app: {{ .Values.windows.dsName }} +{{- with .Values.windows.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.windows.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + serviceAccountName: {{ .Values.serviceAccount.node }} +{{- with .Values.windows.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} + nodeSelector: + kubernetes.io/os: windows +{{- with .Values.windows.nodeSelector }} +{{ toYaml . | indent 8 }} +{{- end }} + affinity: +{{- with .Values.windows.affinity }} +{{ toYaml . | indent 8 }} +{{- end }} + nodeAffinity: +{{ toYaml .Values.windows.nodeAffinity | indent 10 }} + priorityClassName: system-node-critical + {{- include "azuredisk.pullSecrets" . | indent 6 }} + securityContext: + seccompProfile: + type: RuntimeDefault + windowsOptions: + hostProcess: true + runAsUserName: "NT AUTHORITY\\SYSTEM" + hostNetwork: true + initContainers: + - name: init +{{- if hasPrefix "/" .Values.image.azuredisk.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}-windows-hp" +{{- else }} + image: "{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}-windows-hp" +{{- end }} + imagePullPolicy: {{ .Values.image.azuredisk.pullPolicy }} + command: + - "powershell.exe" + - "-c" + - "New-Item -ItemType Directory -Path C:\\var\\lib\\kubelet\\plugins\\{{ .Values.driver.name }}\\ -Force" + containers: + - name: node-driver-registrar +{{- if hasPrefix "/" .Values.image.nodeDriverRegistrar.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- else }} + image: "{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- end }} + command: + - "csi-node-driver-registrar.exe" + args: + - "--csi-address=$(CSI_ENDPOINT)" + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" + - "--plugin-registration-path=$(PLUGIN_REG_DIR)" + - "--v=2" + env: + - name: CSI_ENDPOINT + value: unix://{{ .Values.windows.kubelet }}\plugins\{{ .Values.driver.name }}\csi.sock + - name: DRIVER_REG_SOCK_PATH + value: C:\\var\\lib\\kubelet\\plugins\\{{ .Values.driver.name }}\\csi.sock + - name: PLUGIN_REG_DIR + value: C:\\var\\lib\\kubelet\\plugins_registry\\ + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + imagePullPolicy: {{ .Values.image.nodeDriverRegistrar.pullPolicy }} + resources: {{- toYaml .Values.windows.resources.nodeDriverRegistrar | nindent 12 }} + - name: azuredisk +{{- if hasPrefix "/" .Values.image.azuredisk.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}-windows-hp" +{{- else }} + image: "{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}-windows-hp" +{{- end }} + command: + - "azurediskplugin.exe" + args: + - "--v={{ .Values.node.logLevel }}" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--drivername={{ .Values.driver.name }}" + - "--volume-attach-limit={{ .Values.driver.volumeAttachLimit }}" + - "--cloud-config-secret-name={{ .Values.node.cloudConfigSecretName }}" + - "--cloud-config-secret-namespace={{ .Values.node.cloudConfigSecretNamespace }}" + - "--custom-user-agent={{ .Values.driver.customUserAgent }}" + - "--user-agent-suffix={{ .Values.driver.userAgentSuffix }}" + - "--allow-empty-cloud-config={{ .Values.node.allowEmptyCloudConfig }}" + - "--support-zone={{ .Values.node.supportZone }}" + - "--enable-windows-host-process=true" + - "--get-nodeid-from-imds={{ .Values.node.getNodeIDFromIMDS }}" + - "--enable-otel-tracing={{ .Values.windows.otelTracing.enabled }}" + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: {{ .Values.azureCredentialFileConfigMap }} + key: path-windows + optional: true + - name: CSI_ENDPOINT + value: unix://C:\\var\\lib\\kubelet\\plugins\\{{ .Values.driver.name }}\\csi.sock + {{- if ne .Values.driver.httpsProxy "" }} + - name: HTTPS_PROXY + value: {{ .Values.driver.httpsProxy }} + {{- end }} + {{- if ne .Values.driver.httpProxy "" }} + - name: HTTP_PROXY + value: {{ .Values.driver.httpProxy }} + {{- end }} + {{- if ne .Values.driver.noProxy "" }} + - name: NO_PROXY + value: {{ .Values.driver.noProxy }} + {{- end }} + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: AZURE_GO_SDK_LOG_LEVEL + value: {{ .Values.driver.azureGoSDKLogLevel }} + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: AZURE_ENVIRONMENT_FILEPATH + value: C:\k\azurestackcloud.json + {{- end }} + {{- if .Values.windows.otelTracing.enabled }} + - name: OTEL_SERVICE_NAME + value: {{ .Values.windows.otelTracing.otelServiceName }} + - name: OTEL_EXPORTER_OTLP_ENDPOINT + value: {{ .Values.windows.otelTracing.otelExporterEndpoint }} + {{- end }} + imagePullPolicy: {{ .Values.image.azuredisk.pullPolicy }} +{{- end -}} diff --git a/charts/v1.30.1/azuredisk-csi-driver/templates/csi-azuredisk-node-windows.yaml b/charts/v1.30.1/azuredisk-csi-driver/templates/csi-azuredisk-node-windows.yaml new file mode 100644 index 000000000..cb0e2c7cf --- /dev/null +++ b/charts/v1.30.1/azuredisk-csi-driver/templates/csi-azuredisk-node-windows.yaml @@ -0,0 +1,245 @@ +{{- if and (.Values.windows.enabled) (ne .Values.windows.useHostProcessContainers true) }} +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: {{ .Values.windows.dsName }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- with .Values.windows.labels }} +{{ . | toYaml | indent 4 }} +{{- end }} +{{- with .Values.windows.annotations }} + annotations: +{{ . | toYaml | indent 4 }} +{{- end }} +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: {{ .Values.node.maxUnavailable }} + type: RollingUpdate + selector: + matchLabels: + app: {{ .Values.windows.dsName }} + template: + metadata: +{{ include "azuredisk.labels" . | indent 6 }} + app: {{ .Values.windows.dsName }} +{{- with .Values.windows.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.windows.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + serviceAccountName: {{ .Values.serviceAccount.node }} +{{- with .Values.windows.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} + nodeSelector: + kubernetes.io/os: windows +{{- with .Values.windows.nodeSelector }} +{{ toYaml . | indent 8 }} +{{- end }} + affinity: +{{- with .Values.windows.affinity }} +{{ toYaml . | indent 8 }} +{{- end }} + nodeAffinity: +{{ toYaml .Values.windows.nodeAffinity | indent 10 }} + priorityClassName: system-node-critical + securityContext: + seccompProfile: + type: RuntimeDefault + {{- include "azuredisk.pullSecrets" . | indent 6 }} + containers: + - name: liveness-probe + volumeMounts: + - mountPath: C:\csi + name: plugin-dir +{{- if hasPrefix "/" .Values.image.livenessProbe.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- else }} + image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- end }} + args: + - "--csi-address=$(CSI_ENDPOINT)" + - "--probe-timeout=3s" + - "--health-port={{ .Values.node.livenessProbe.healthPort }}" + - "--v=2" + env: + - name: CSI_ENDPOINT + value: unix://C:\\csi\\csi.sock + imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }} + resources: {{- toYaml .Values.windows.resources.livenessProbe | nindent 12 }} + - name: node-driver-registrar +{{- if hasPrefix "/" .Values.image.nodeDriverRegistrar.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- else }} + image: "{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- end }} + args: + - "--v=2" + - "--csi-address=$(CSI_ENDPOINT)" + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" +{{- if .Values.windows.enableRegistrationProbe }} + livenessProbe: + exec: + command: + - /csi-node-driver-registrar.exe + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --mode=kubelet-registration-probe + initialDelaySeconds: 60 + timeoutSeconds: 30 +{{- end }} + env: + - name: CSI_ENDPOINT + value: unix://C:\\csi\\csi.sock + - name: DRIVER_REG_SOCK_PATH + value: C:\\var\\lib\\kubelet\\plugins\\{{ .Values.driver.name }}\\csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + imagePullPolicy: {{ .Values.image.nodeDriverRegistrar.pullPolicy }} + volumeMounts: + - name: kubelet-dir + mountPath: "C:\\var\\lib\\kubelet" + - name: plugin-dir + mountPath: C:\csi + - name: registration-dir + mountPath: C:\registration + resources: {{- toYaml .Values.windows.resources.nodeDriverRegistrar | nindent 12 }} + - name: azuredisk +{{- if hasPrefix "/" .Values.image.azuredisk.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- else }} + image: "{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- end }} + args: + - "--v={{ .Values.node.logLevel }}" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--drivername={{ .Values.driver.name }}" + - "--volume-attach-limit={{ .Values.driver.volumeAttachLimit }}" + - "--cloud-config-secret-name={{ .Values.node.cloudConfigSecretName }}" + - "--cloud-config-secret-namespace={{ .Values.node.cloudConfigSecretNamespace }}" + - "--custom-user-agent={{ .Values.driver.customUserAgent }}" + - "--user-agent-suffix={{ .Values.driver.userAgentSuffix }}" + - "--allow-empty-cloud-config={{ .Values.node.allowEmptyCloudConfig }}" + - "--support-zone={{ .Values.node.supportZone }}" + - "--get-node-info-from-labels={{ .Values.windows.getNodeInfoFromLabels }}" + - "--get-nodeid-from-imds={{ .Values.node.getNodeIDFromIMDS }}" + - "--enable-otel-tracing={{ .Values.windows.otelTracing.enabled }}" + ports: + - containerPort: {{ .Values.node.livenessProbe.healthPort }} + name: healthz + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: {{ .Values.azureCredentialFileConfigMap }} + key: path-windows + optional: true + - name: CSI_ENDPOINT + value: unix://C:\\csi\\csi.sock + {{- if ne .Values.driver.httpsProxy "" }} + - name: HTTPS_PROXY + value: {{ .Values.driver.httpsProxy }} + {{- end }} + {{- if ne .Values.driver.httpProxy "" }} + - name: HTTP_PROXY + value: {{ .Values.driver.httpProxy }} + {{- end }} + {{- if ne .Values.driver.noProxy "" }} + - name: NO_PROXY + value: {{ .Values.driver.noProxy }} + {{- end }} + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: AZURE_GO_SDK_LOG_LEVEL + value: {{ .Values.driver.azureGoSDKLogLevel }} + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: AZURE_ENVIRONMENT_FILEPATH + value: C:\k\azurestackcloud.json + {{- end }} + {{- if .Values.windows.otelTracing.enabled }} + - name: OTEL_SERVICE_NAME + value: {{ .Values.windows.otelTracing.otelServiceName }} + - name: OTEL_EXPORTER_OTLP_ENDPOINT + value: {{ .Values.windows.otelTracing.otelExporterEndpoint }} + {{- end }} + imagePullPolicy: {{ .Values.image.azuredisk.pullPolicy }} + volumeMounts: + - name: kubelet-dir + mountPath: "C:\\var\\lib\\kubelet" + - name: plugin-dir + mountPath: C:\csi + - name: azure-config + mountPath: C:\k + - name: csi-proxy-fs-pipe-v1 + mountPath: \\.\pipe\csi-proxy-filesystem-v1 + - name: csi-proxy-disk-pipe-v1 + mountPath: \\.\pipe\csi-proxy-disk-v1 + - name: csi-proxy-volume-pipe-v1 + mountPath: \\.\pipe\csi-proxy-volume-v1 + # these paths are still included for compatibility, they're used + # only if the node has still the beta version of the CSI proxy + - name: csi-proxy-fs-pipe-v1beta1 + mountPath: \\.\pipe\csi-proxy-filesystem-v1beta1 + - name: csi-proxy-disk-pipe-v1beta2 + mountPath: \\.\pipe\csi-proxy-disk-v1beta2 + - name: csi-proxy-volume-pipe-v1beta2 + mountPath: \\.\pipe\csi-proxy-volume-v1beta2 + resources: {{- toYaml .Values.windows.resources.azuredisk | nindent 12 }} + volumes: + - name: csi-proxy-fs-pipe-v1 + hostPath: + path: \\.\pipe\csi-proxy-filesystem-v1 + - name: csi-proxy-disk-pipe-v1 + hostPath: + path: \\.\pipe\csi-proxy-disk-v1 + - name: csi-proxy-volume-pipe-v1 + hostPath: + path: \\.\pipe\csi-proxy-volume-v1 + # these paths are still included for compatibility, they're used + # only if the node has still the beta version of the CSI proxy + - name: csi-proxy-fs-pipe-v1beta1 + hostPath: + path: \\.\pipe\csi-proxy-filesystem-v1beta1 + - name: csi-proxy-disk-pipe-v1beta2 + hostPath: + path: \\.\pipe\csi-proxy-disk-v1beta2 + - name: csi-proxy-volume-pipe-v1beta2 + hostPath: + path: \\.\pipe\csi-proxy-volume-v1beta2 + - name: registration-dir + hostPath: + path: {{ .Values.windows.kubelet }}\plugins_registry\ + type: Directory + - name: kubelet-dir + hostPath: + path: {{ .Values.windows.kubelet }}\ + type: Directory + - name: plugin-dir + hostPath: + path: {{ .Values.windows.kubelet }}\plugins\{{ .Values.driver.name }}\ + type: DirectoryOrCreate + - name: azure-config + hostPath: + path: C:\k + type: DirectoryOrCreate +{{- end -}} diff --git a/charts/v1.30.1/azuredisk-csi-driver/templates/csi-azuredisk-node.yaml b/charts/v1.30.1/azuredisk-csi-driver/templates/csi-azuredisk-node.yaml new file mode 100644 index 000000000..dea8d3e55 --- /dev/null +++ b/charts/v1.30.1/azuredisk-csi-driver/templates/csi-azuredisk-node.yaml @@ -0,0 +1,254 @@ +{{- if .Values.linux.enabled}} +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: {{ .Values.linux.dsName }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- with .Values.linux.labels }} +{{ . | toYaml | indent 4 }} +{{- end }} +{{- with .Values.linux.annotations }} + annotations: +{{ . | toYaml | indent 4 }} +{{- end }} +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: {{ .Values.node.maxUnavailable }} + type: RollingUpdate + selector: + matchLabels: + app: {{ .Values.linux.dsName }} + template: + metadata: +{{ include "azuredisk.labels" . | indent 6 }} + app: {{ .Values.linux.dsName }} + {{- if .Values.workloadIdentity.clientID }} + azure.workload.identity/use: "true" + {{- end }} +{{- with .Values.linux.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.linux.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + hostNetwork: {{ .Values.linux.hostNetwork }} + dnsPolicy: Default + serviceAccountName: {{ .Values.serviceAccount.node }} + nodeSelector: + kubernetes.io/os: linux +{{- with .Values.linux.nodeSelector }} +{{ toYaml . | indent 8 }} +{{- end }} + affinity: +{{- with .Values.linux.affinity }} +{{ toYaml . | indent 8 }} +{{- end }} + nodeAffinity: +{{ toYaml .Values.linux.nodeAffinity | indent 10 }} + priorityClassName: system-node-critical + securityContext: + seccompProfile: + type: RuntimeDefault +{{- with .Values.linux.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} + {{- include "azuredisk.pullSecrets" . | indent 6 }} + containers: + - name: liveness-probe + volumeMounts: + - mountPath: /csi + name: socket-dir +{{- if hasPrefix "/" .Values.image.livenessProbe.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- else }} + image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- end }} + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --http-endpoint=localhost:{{ .Values.node.livenessProbe.healthPort }} + - --v=2 + resources: {{- toYaml .Values.linux.resources.livenessProbe | nindent 12 }} + - name: node-driver-registrar +{{- if hasPrefix "/" .Values.image.nodeDriverRegistrar.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- else }} + image: "{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- end }} + args: + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --v=2 +{{- if .Values.linux.enableRegistrationProbe }} + livenessProbe: + exec: + command: + - /csi-node-driver-registrar + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --mode=kubelet-registration-probe + initialDelaySeconds: 30 + timeoutSeconds: 15 +{{- end }} + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: {{ .Values.linux.kubelet }}/plugins/{{ .Values.driver.name }}/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: registration-dir + mountPath: /registration + resources: {{- toYaml .Values.linux.resources.nodeDriverRegistrar | nindent 12 }} + - name: azuredisk +{{- if hasPrefix "/" .Values.image.azuredisk.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- else }} + image: "{{ .Values.image.azuredisk.repository }}:{{ .Values.image.azuredisk.tag }}" +{{- end }} + args: + - "--v={{ .Values.node.logLevel }}" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--enable-perf-optimization={{ .Values.linux.enablePerfOptimization }}" + - "--drivername={{ .Values.driver.name }}" + - "--volume-attach-limit={{ .Values.driver.volumeAttachLimit }}" + - "--cloud-config-secret-name={{ .Values.node.cloudConfigSecretName }}" + - "--cloud-config-secret-namespace={{ .Values.node.cloudConfigSecretNamespace }}" + - "--custom-user-agent={{ .Values.driver.customUserAgent }}" + - "--user-agent-suffix={{ .Values.driver.userAgentSuffix }}" + - "--allow-empty-cloud-config={{ .Values.node.allowEmptyCloudConfig }}" + - "--support-zone={{ .Values.node.supportZone }}" + - "--get-node-info-from-labels={{ .Values.linux.getNodeInfoFromLabels }}" + - "--get-nodeid-from-imds={{ .Values.node.getNodeIDFromIMDS }}" + - "--enable-otel-tracing={{ .Values.linux.otelTracing.enabled }}" + livenessProbe: + failureThreshold: 5 + httpGet: + host: localhost + path: /healthz + port: {{ .Values.node.livenessProbe.healthPort }} + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: {{ .Values.azureCredentialFileConfigMap }} + key: path + optional: true + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + {{- if ne .Values.driver.httpsProxy "" }} + - name: HTTPS_PROXY + value: {{ .Values.driver.httpsProxy }} + {{- end }} + {{- if ne .Values.driver.httpProxy "" }} + - name: HTTP_PROXY + value: {{ .Values.driver.httpProxy }} + {{- end }} + {{- if ne .Values.driver.noProxy "" }} + - name: NO_PROXY + value: {{ .Values.driver.noProxy }} + {{- end }} + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: AZURE_GO_SDK_LOG_LEVEL + value: {{ .Values.driver.azureGoSDKLogLevel }} + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: AZURE_ENVIRONMENT_FILEPATH + value: /etc/kubernetes/azurestackcloud.json + {{- end }} + {{- if .Values.linux.otelTracing.enabled }} + - name: OTEL_SERVICE_NAME + value: {{ .Values.linux.otelTracing.otelServiceName }} + - name: OTEL_EXPORTER_OTLP_ENDPOINT + value: {{ .Values.linux.otelTracing.otelExporterEndpoint }} + {{- end }} + imagePullPolicy: {{ .Values.image.azuredisk.pullPolicy }} + securityContext: + privileged: true + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: {{ .Values.linux.kubelet }}/ + mountPropagation: Bidirectional + name: mountpoint-dir + - mountPath: /etc/kubernetes/ + name: azure-cred + - mountPath: /dev + name: device-dir + - mountPath: /sys/bus/scsi/devices + name: sys-devices-dir + - mountPath: /sys/class/ + name: sys-class + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: ssl + mountPath: /etc/ssl/certs + readOnly: true + {{- end }} + {{- if eq .Values.linux.distro "fedora" }} + - name: ssl + mountPath: /etc/ssl/certs + readOnly: true + - name: ssl-pki + mountPath: /etc/pki/ca-trust/extracted + readOnly: true + {{- end }} + resources: {{- toYaml .Values.linux.resources.azuredisk | nindent 12 }} + volumes: + - hostPath: + path: {{ .Values.linux.kubelet }}/plugins/{{ .Values.driver.name }} + type: DirectoryOrCreate + name: socket-dir + - hostPath: + path: {{ .Values.linux.kubelet }}/ + type: DirectoryOrCreate + name: mountpoint-dir + - hostPath: + path: {{ .Values.linux.kubelet }}/plugins_registry/ + type: DirectoryOrCreate + name: registration-dir + - hostPath: + path: /etc/kubernetes/ + type: DirectoryOrCreate + name: azure-cred + - hostPath: + path: /dev + type: Directory + name: device-dir + - hostPath: + path: /sys/bus/scsi/devices + type: Directory + name: sys-devices-dir + - hostPath: + path: /sys/class/ + type: Directory + name: sys-class + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: ssl + hostPath: + path: /etc/ssl/certs + {{- end }} + {{- if eq .Values.linux.distro "fedora" }} + - name: ssl + hostPath: + path: /etc/ssl/certs + - name: ssl-pki + hostPath: + path: /etc/pki/ca-trust/extracted + {{- end }} +{{- end -}} diff --git a/charts/v1.30.1/azuredisk-csi-driver/templates/csi-snapshot-controller.yaml b/charts/v1.30.1/azuredisk-csi-driver/templates/csi-snapshot-controller.yaml new file mode 100644 index 000000000..431a01b15 --- /dev/null +++ b/charts/v1.30.1/azuredisk-csi-driver/templates/csi-snapshot-controller.yaml @@ -0,0 +1,94 @@ +{{- if .Values.snapshot.enabled -}} +kind: Deployment +apiVersion: apps/v1 +metadata: + name: {{ .Values.snapshot.snapshotController.name }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- with .Values.snapshot.snapshotController.labels }} +{{ . | toYaml | indent 4 }} +{{- end }} +{{- with .Values.snapshot.snapshotController.annotations }} + annotations: +{{ . | toYaml | indent 4 }} +{{- end }} +spec: + replicas: {{ .Values.snapshot.snapshotController.replicas }} + selector: + matchLabels: + app: {{ .Values.snapshot.snapshotController.name }} + # the snapshot controller won't be marked as ready if the v1 CRDs are unavailable + # in #504 the snapshot-controller will exit after around 7.5 seconds if it + # can't find the v1 CRDs so this value should be greater than that + minReadySeconds: 15 + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: +{{ include "azuredisk.labels" . | indent 6 }} + app: {{ .Values.snapshot.snapshotController.name }} +{{- with .Values.snapshot.snapshotController.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.snapshot.snapshotController.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + serviceAccountName: {{ .Values.serviceAccount.snapshotController }} + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + securityContext: + seccompProfile: + type: RuntimeDefault +{{- with .Values.controller.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.controller.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} + {{- include "azuredisk.pullSecrets" . | indent 6 }} + containers: + - name: {{ .Values.snapshot.snapshotController.name }} +{{- if hasPrefix "/" .Values.snapshot.image.csiSnapshotController.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.snapshot.image.csiSnapshotController.repository }}:{{ .Values.snapshot.image.csiSnapshotController.tag }}" +{{- else }} + image: "{{ .Values.snapshot.image.csiSnapshotController.repository }}:{{ .Values.snapshot.image.csiSnapshotController.tag }}" +{{- end }} + args: + - "--v=2" + - "--leader-election=true" + - "--leader-election-namespace={{ .Release.Namespace }}" + resources: {{- toYaml .Values.snapshot.snapshotController.resources | nindent 12 }} + imagePullPolicy: {{ .Values.snapshot.image.csiSnapshotController.pullPolicy }} + +--- +{{- if .Values.snapshot.VolumeSnapshotClass.enabled -}} +kind: VolumeSnapshotClass +apiVersion: snapshot.storage.k8s.io/v1 +metadata: + name: {{ .Values.snapshot.VolumeSnapshotClass.name }} + annotations: + # This is what defines this resource as a hook. Without this line, the + # job is considered part of the release. + "helm.sh/hook": post-install +driver: {{ .Values.driver.name }} +deletionPolicy: {{ .Values.snapshot.VolumeSnapshotClass.deletionPolicy }} +parameters: + incremental: {{ .Values.snapshot.VolumeSnapshotClass.parameters.incremental }} + {{- if ne .Values.snapshot.VolumeSnapshotClass.parameters.resourceGroup "" }} + resourceGroup: {{ .Values.snapshot.VolumeSnapshotClass.parameters.resourceGroup }} + {{- end }} + tags: {{ .Values.snapshot.VolumeSnapshotClass.parameters.tags }} +{{- with .Values.snapshot.VolumeSnapshotClass.additionalLabels }} +additionalLabels: +{{ toYaml . | indent 2 }} +{{- end }} +{{- end -}} +{{- end -}} diff --git a/charts/v1.30.1/azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml b/charts/v1.30.1/azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml new file mode 100644 index 000000000..b78940fc6 --- /dev/null +++ b/charts/v1.30.1/azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml @@ -0,0 +1,199 @@ +{{- if .Values.rbac.create -}} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-external-provisioner-role +{{ include "azuredisk.labels" . | indent 2 }} +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["get", "list"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-csi-provisioner-binding +{{ include "azuredisk.labels" . | indent 2 }} +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ .Values.rbac.name }}-external-provisioner-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-external-attacher-role +{{ include "azuredisk.labels" . | indent 2 }} +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["csi.storage.k8s.io"] + resources: ["csinodeinfos"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments/status"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-csi-attacher-binding +{{ include "azuredisk.labels" . | indent 2 }} +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ .Values.rbac.name }}-external-attacher-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-external-snapshotter-role +{{ include "azuredisk.labels" . | indent 2 }} +rules: + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-csi-snapshotter-binding +{{ include "azuredisk.labels" . | indent 2 }} +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ .Values.rbac.name }}-external-snapshotter-role + apiGroup: rbac.authorization.k8s.io + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-external-resizer-role +{{ include "azuredisk.labels" . | indent 2 }} +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-csi-resizer-role +{{ include "azuredisk.labels" . | indent 2 }} +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ .Values.rbac.name }}-external-resizer-role + apiGroup: rbac.authorization.k8s.io + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-{{ .Values.rbac.name }}-controller-secret-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-{{ .Values.rbac.name }}-controller-secret-binding +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: csi-{{ .Values.rbac.name }}-controller-secret-role + apiGroup: rbac.authorization.k8s.io +{{ end }} diff --git a/charts/v1.30.1/azuredisk-csi-driver/templates/rbac-csi-azuredisk-node.yaml b/charts/v1.30.1/azuredisk-csi-driver/templates/rbac-csi-azuredisk-node.yaml new file mode 100644 index 000000000..ae7e080af --- /dev/null +++ b/charts/v1.30.1/azuredisk-csi-driver/templates/rbac-csi-azuredisk-node.yaml @@ -0,0 +1,28 @@ +{{- if .Values.rbac.create -}} +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-{{ .Values.rbac.name }}-node-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-{{ .Values.rbac.name }}-node-secret-binding +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.node }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: csi-{{ .Values.rbac.name }}-node-role + apiGroup: rbac.authorization.k8s.io +{{ end }} diff --git a/charts/v1.30.1/azuredisk-csi-driver/templates/rbac-csi-snapshot-controller.yaml b/charts/v1.30.1/azuredisk-csi-driver/templates/rbac-csi-snapshot-controller.yaml new file mode 100644 index 000000000..8e4278b6f --- /dev/null +++ b/charts/v1.30.1/azuredisk-csi-driver/templates/rbac-csi-snapshot-controller.yaml @@ -0,0 +1,72 @@ +{{- if and .Values.snapshot.enabled .Values.rbac.create -}} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots/status"] + verbs: ["update", "patch"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-binding +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.snapshotController }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: csi-snapshot-controller-role + apiGroup: rbac.authorization.k8s.io + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-leaderelection-role +rules: + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-leaderelection-binding +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.snapshotController }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: csi-snapshot-controller-leaderelection-role + apiGroup: rbac.authorization.k8s.io +{{ end }} diff --git a/charts/v1.30.1/azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-controller.yaml b/charts/v1.30.1/azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-controller.yaml new file mode 100644 index 000000000..1806e9c58 --- /dev/null +++ b/charts/v1.30.1/azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-controller.yaml @@ -0,0 +1,16 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- if .Values.workloadIdentity.clientID }} + azure.workload.identity/use: "true" + annotations: + azure.workload.identity/client-id: {{ .Values.workloadIdentity.clientID }} +{{- if .Values.workloadIdentity.tenantID }} + azure.workload.identity/tenant-id: {{ .Values.workloadIdentity.tenantID }} +{{- end }} +{{- end }} +{{- end -}} diff --git a/charts/v1.30.1/azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-node.yaml b/charts/v1.30.1/azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-node.yaml new file mode 100644 index 000000000..dcc2ff60b --- /dev/null +++ b/charts/v1.30.1/azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-node.yaml @@ -0,0 +1,16 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.node }} + namespace: {{ .Release.Namespace }} +{{ include "azuredisk.labels" . | indent 2 }} +{{- if .Values.workloadIdentity.clientID }} + azure.workload.identity/use: "true" + annotations: + azure.workload.identity/client-id: {{ .Values.workloadIdentity.clientID }} +{{- if .Values.workloadIdentity.tenantID }} + azure.workload.identity/tenant-id: {{ .Values.workloadIdentity.tenantID }} +{{- end }} +{{- end }} +{{- end -}} diff --git a/charts/v1.30.1/azuredisk-csi-driver/templates/serviceaccount-csi-snapshot-controller.yaml b/charts/v1.30.1/azuredisk-csi-driver/templates/serviceaccount-csi-snapshot-controller.yaml new file mode 100644 index 000000000..7cdaad0b4 --- /dev/null +++ b/charts/v1.30.1/azuredisk-csi-driver/templates/serviceaccount-csi-snapshot-controller.yaml @@ -0,0 +1,7 @@ +{{- if and .Values.snapshot.enabled .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.snapshotController }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/charts/v1.30.1/azuredisk-csi-driver/values.yaml b/charts/v1.30.1/azuredisk-csi-driver/values.yaml new file mode 100644 index 000000000..62c02a754 --- /dev/null +++ b/charts/v1.30.1/azuredisk-csi-driver/values.yaml @@ -0,0 +1,290 @@ +image: + baseRepo: mcr.microsoft.com + azuredisk: + repository: /oss/kubernetes-csi/azuredisk-csi + tag: v1.30.1 + pullPolicy: IfNotPresent + csiProvisioner: + repository: /oss/kubernetes-csi/csi-provisioner + tag: v4.0.0 + pullPolicy: IfNotPresent + csiAttacher: + repository: /oss/kubernetes-csi/csi-attacher + tag: v4.5.0 + pullPolicy: IfNotPresent + csiResizer: + repository: /oss/kubernetes-csi/csi-resizer + tag: v1.9.3 + pullPolicy: IfNotPresent + livenessProbe: + repository: /oss/kubernetes-csi/livenessprobe + tag: v2.12.0 + pullPolicy: IfNotPresent + nodeDriverRegistrar: + repository: /oss/kubernetes-csi/csi-node-driver-registrar + tag: v2.10.0 + pullPolicy: IfNotPresent + +serviceAccount: + create: true # When true, service accounts will be created for you. Set to false if you want to use your own. + controller: csi-azuredisk-controller-sa # Name of Service Account to be created or used + node: csi-azuredisk-node-sa # Name of Service Account to be created or used + snapshotController: csi-snapshot-controller-sa # Name of Service Account to be created or used + +rbac: + create: true + name: azuredisk + +controller: + name: csi-azuredisk-controller + cloudConfigSecretName: azure-cloud-provider + cloudConfigSecretNamespace: kube-system + allowEmptyCloudConfig: false + enableTrafficManager: false + trafficManagerPort: 7788 + replicas: 2 + metricsPort: 29604 + livenessProbe: + healthPort: 29602 + runOnMaster: false + runOnControlPlane: false + disableAvailabilitySetNodes: false + vmType: "" + provisionerWorkerThreads: 100 + attacherWorkerThreads: 1000 + vmssCacheTTLInSeconds: -1 + logLevel: 5 + extraArgs: [] + otelTracing: + enabled: false + otelServiceName: csi-azuredisk-controller + otelExporterEndpoint: "http://localhost:4317" + tolerations: + - key: "node-role.kubernetes.io/master" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/controlplane" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/control-plane" + operator: "Exists" + effect: "NoSchedule" + hostNetwork: true # this setting could be disabled if controller does not depend on MSI setting + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + nodeSelector: {} + affinity: {} + resources: + csiProvisioner: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + csiAttacher: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + csiResizer: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + csiSnapshotter: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 20Mi + livenessProbe: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + azuredisk: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + +node: + cloudConfigSecretName: azure-cloud-provider + cloudConfigSecretNamespace: kube-system + supportZone: true + allowEmptyCloudConfig: true + getNodeIDFromIMDS: false + maxUnavailable: 1 + logLevel: 5 + livenessProbe: + healthPort: 29603 + +snapshot: + enabled: false + name: csi-snapshot-controller + image: + csiSnapshotter: + repository: /oss/kubernetes-csi/csi-snapshotter + tag: v6.3.3 + pullPolicy: IfNotPresent + csiSnapshotController: + repository: /oss/kubernetes-csi/snapshot-controller + tag: v6.3.3 + pullPolicy: IfNotPresent + snapshotController: + name: csi-snapshot-controller + replicas: 2 + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + resources: + limits: + memory: 300Mi + requests: + cpu: 10m + memory: 20Mi + VolumeSnapshotClass: + enabled: false + name: csi-azuredisk-vsc + deletionPolicy: Delete + parameters: + incremental: '"true"' # available values: "true", "false" ("true" by default for Azure Public Cloud, and "false" by default for Azure Stack Cloud) + resourceGroup: "" # available values: EXISTING RESOURCE GROUP (If not specified, snapshot will be stored in the same resource group as source Azure disk) + tags: "" # tag format: 'key1=val1,key2=val2' + additionalLabels: {} + +feature: + enableFSGroupPolicy: true + +driver: + name: disk.csi.azure.com + # maximum number of attachable volumes per node, + # maximum number is defined according to node instance type by default(-1) + volumeAttachLimit: -1 + customUserAgent: "" + userAgentSuffix: "OSS-helm" + azureGoSDKLogLevel: "" # available values: ""(no logs), DEBUG, INFO, WARNING, ERROR + httpsProxy: "" + httpProxy: "" + noProxy: "" + +linux: + enabled: true + dsName: csi-azuredisk-node # daemonset name + kubelet: /var/lib/kubelet + distro: debian # available values: debian, fedora + enablePerfOptimization: true + enableRegistrationProbe: true + otelTracing: + enabled: false + otelServiceName: csi-azuredisk-node + otelExporterEndpoint: "http://localhost:4317" + tolerations: + - operator: "Exists" + hostNetwork: true # this setting could be disabled if perfProfile is `none` + getNodeInfoFromLabels: false # get node info from node labels instead of IMDS + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + nodeSelector: {} + affinity: {} + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: type + operator: NotIn + values: + - virtual-kubelet + resources: + livenessProbe: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + nodeDriverRegistrar: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + azuredisk: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 20Mi + +windows: + enabled: true + useHostProcessContainers: false + dsName: csi-azuredisk-node-win # daemonset name + kubelet: 'C:\var\lib\kubelet' + getNodeInfoFromLabels: false # get node info from node labels instead of IMDS + enableRegistrationProbe: true + otelTracing: + enabled: false + otelServiceName: csi-azuredisk-node-win + otelExporterEndpoint: "http://localhost:4317" + tolerations: + - key: "node.kubernetes.io/os" + operator: "Exists" + effect: "NoSchedule" + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + nodeSelector: {} + affinity: {} + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: type + operator: NotIn + values: + - virtual-kubelet + resources: + livenessProbe: + limits: + memory: 150Mi + requests: + cpu: 10m + memory: 40Mi + nodeDriverRegistrar: + limits: + memory: 150Mi + requests: + cpu: 30m + memory: 40Mi + azuredisk: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 40Mi + +cloud: AzurePublicCloud + +## Reference to one or more secrets to be used when pulling images +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ +## +imagePullSecrets: [] +# - name: "image-pull-secret" + +workloadIdentity: + clientID: "" + # [optional] If the AAD application or user-assigned managed identity is not in the same tenant as the cluster + # then set tenantID with the application or user-assigned managed identity tenant ID + tenantID: "" + +azureCredentialFileConfigMap: azure-cred-file diff --git a/deploy/csi-azuredisk-controller.yaml b/deploy/csi-azuredisk-controller.yaml index ab1ac0f67..c07dc710d 100644 --- a/deploy/csi-azuredisk-controller.yaml +++ b/deploy/csi-azuredisk-controller.yaml @@ -34,7 +34,7 @@ spec: effect: "NoSchedule" containers: - name: csi-provisioner - image: mcr.microsoft.com/oss/kubernetes-csi/csi-provisioner:v3.5.0 + image: mcr.microsoft.com/oss/kubernetes-csi/csi-provisioner:v4.0.0 args: - "--feature-gates=Topology=true,HonorPVReclaimPolicy=true" - "--csi-address=$(ADDRESS)" @@ -60,7 +60,7 @@ spec: cpu: 10m memory: 20Mi - name: csi-attacher - image: mcr.microsoft.com/oss/kubernetes-csi/csi-attacher:v4.3.0 + image: mcr.microsoft.com/oss/kubernetes-csi/csi-attacher:v4.5.0 args: - "-v=2" - "-csi-address=$(ADDRESS)" @@ -83,13 +83,13 @@ spec: cpu: 10m memory: 20Mi - name: csi-snapshotter - image: mcr.microsoft.com/oss/kubernetes-csi/csi-snapshotter:v6.2.2 + image: mcr.microsoft.com/oss/kubernetes-csi/csi-snapshotter:v6.3.3 args: - "-csi-address=$(ADDRESS)" - "-leader-election" - "--leader-election-namespace=kube-system" - "--v=2" - - "--timeout=600s" + - "--timeout=1200s" env: - name: ADDRESS value: /csi/csi.sock @@ -103,7 +103,7 @@ spec: cpu: 10m memory: 20Mi - name: csi-resizer - image: mcr.microsoft.com/oss/kubernetes-csi/csi-resizer:v1.8.0 + image: mcr.microsoft.com/oss/kubernetes-csi/csi-resizer:v1.9.3 args: - "-csi-address=$(ADDRESS)" - "-v=2" @@ -125,11 +125,11 @@ spec: cpu: 10m memory: 20Mi - name: liveness-probe - image: mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.10.0 + image: mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.12.0 args: - --csi-address=/csi/csi.sock - --probe-timeout=3s - - --health-port=29602 + - --http-endpoint=localhost:29602 - --v=2 volumeMounts: - name: socket-dir @@ -141,7 +141,7 @@ spec: cpu: 10m memory: 20Mi - name: azuredisk - image: mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.29.0 + image: mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.30.1 imagePullPolicy: IfNotPresent args: - "--v=5" @@ -151,17 +151,15 @@ spec: - "--disable-avset-nodes=false" - "--allow-empty-cloud-config=false" ports: - - containerPort: 29602 - name: healthz - protocol: TCP - containerPort: 29604 name: metrics protocol: TCP livenessProbe: failureThreshold: 5 httpGet: + host: localhost path: /healthz - port: healthz + port: 29602 initialDelaySeconds: 30 timeoutSeconds: 10 periodSeconds: 30 diff --git a/deploy/csi-azuredisk-driver.yaml b/deploy/csi-azuredisk-driver.yaml index 0ff59b2f2..f2f10fdaa 100644 --- a/deploy/csi-azuredisk-driver.yaml +++ b/deploy/csi-azuredisk-driver.yaml @@ -4,7 +4,7 @@ kind: CSIDriver metadata: name: disk.csi.azure.com annotations: - csiDriver: v1.29.0 + csiDriver: v1.30.0 snapshot: v6.2.1 spec: attachRequired: true diff --git a/deploy/csi-azuredisk-node-windows-hostprocess.yaml b/deploy/csi-azuredisk-node-windows-hostprocess.yaml index 36262ee8b..105103ef5 100644 --- a/deploy/csi-azuredisk-node-windows-hostprocess.yaml +++ b/deploy/csi-azuredisk-node-windows-hostprocess.yaml @@ -41,7 +41,7 @@ spec: hostNetwork: true initContainers: - name: init - image: mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.29.0-windows-hp + image: mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.30.1-windows-hp imagePullPolicy: IfNotPresent command: - "powershell.exe" @@ -49,7 +49,7 @@ spec: - "New-Item -ItemType Directory -Path C:\\var\\lib\\kubelet\\plugins\\disk.csi.azure.com\\ -Force" containers: - name: node-driver-registrar - image: mcr.microsoft.com/oss/kubernetes-csi/csi-node-driver-registrar:v2.8.0 + image: mcr.microsoft.com/oss/kubernetes-csi/csi-node-driver-registrar:v2.10.0 imagePullPolicy: IfNotPresent command: - "csi-node-driver-registrar.exe" @@ -76,7 +76,7 @@ spec: cpu: 30m memory: 40Mi - name: azuredisk - image: mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.29.0-windows-hp + image: mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.30.1-windows-hp imagePullPolicy: IfNotPresent command: - "azurediskplugin.exe" diff --git a/deploy/csi-azuredisk-node-windows.yaml b/deploy/csi-azuredisk-node-windows.yaml index 124d7d438..f63a16160 100644 --- a/deploy/csi-azuredisk-node-windows.yaml +++ b/deploy/csi-azuredisk-node-windows.yaml @@ -42,7 +42,7 @@ spec: volumeMounts: - mountPath: C:\csi name: plugin-dir - image: mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.10.0 + image: mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.12.0 args: - "--csi-address=$(CSI_ENDPOINT)" - "--probe-timeout=3s" @@ -58,7 +58,7 @@ spec: cpu: 10m memory: 40Mi - name: node-driver-registrar - image: mcr.microsoft.com/oss/kubernetes-csi/csi-node-driver-registrar:v2.8.0 + image: mcr.microsoft.com/oss/kubernetes-csi/csi-node-driver-registrar:v2.10.0 args: - "--v=2" - "--csi-address=$(CSI_ENDPOINT)" @@ -94,7 +94,7 @@ spec: cpu: 30m memory: 40Mi - name: azuredisk - image: mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.29.0 + image: mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.30.1 imagePullPolicy: IfNotPresent args: - "--v=5" diff --git a/deploy/csi-azuredisk-node.yaml b/deploy/csi-azuredisk-node.yaml index fd54cdde9..b34f09296 100644 --- a/deploy/csi-azuredisk-node.yaml +++ b/deploy/csi-azuredisk-node.yaml @@ -42,11 +42,11 @@ spec: volumeMounts: - mountPath: /csi name: socket-dir - image: mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.10.0 + image: mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.12.0 args: - --csi-address=/csi/csi.sock - --probe-timeout=3s - - --health-port=29603 + - --http-endpoint=localhost:29603 - --v=2 resources: limits: @@ -55,7 +55,7 @@ spec: cpu: 10m memory: 20Mi - name: node-driver-registrar - image: mcr.microsoft.com/oss/kubernetes-csi/csi-node-driver-registrar:v2.8.0 + image: mcr.microsoft.com/oss/kubernetes-csi/csi-node-driver-registrar:v2.10.0 args: - --csi-address=$(ADDRESS) - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) @@ -85,7 +85,7 @@ spec: cpu: 10m memory: 20Mi - name: azuredisk - image: mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.29.0 + image: mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.30.1 imagePullPolicy: IfNotPresent args: - "--v=5" @@ -94,15 +94,12 @@ spec: - "--enable-perf-optimization=true" - "--allow-empty-cloud-config=true" - "--get-node-info-from-labels=false" - ports: - - containerPort: 29603 - name: healthz - protocol: TCP livenessProbe: failureThreshold: 5 httpGet: + host: localhost path: /healthz - port: healthz + port: 29603 initialDelaySeconds: 30 timeoutSeconds: 10 periodSeconds: 30 diff --git a/deploy/csi-snapshot-controller.yaml b/deploy/csi-snapshot-controller.yaml index 8303b1a73..4fc5c7945 100644 --- a/deploy/csi-snapshot-controller.yaml +++ b/deploy/csi-snapshot-controller.yaml @@ -45,7 +45,7 @@ spec: effect: "NoSchedule" containers: - name: csi-snapshot-controller - image: mcr.microsoft.com/oss/kubernetes-csi/snapshot-controller:v6.2.2 + image: mcr.microsoft.com/oss/kubernetes-csi/snapshot-controller:v6.3.3 args: - "--v=2" - "--leader-election=true" diff --git a/deploy/example/azure.json b/deploy/example/azure.json index e02848e3c..b555370d2 100644 --- a/deploy/example/azure.json +++ b/deploy/example/azure.json @@ -6,8 +6,8 @@ "location": "eastus2", // mandatory "aadClientId": "xxxx-xxxx-xxxx-xxxx-xxxx", // mandatory if using service principal "aadClientSecret": "xxxx-xxxx-xxxx-xxxx-xxxx", // mandatory if using service principal - "useManagedIdentityExtension": false, // set true if using managed idenitty - "userAssignedIdentityID": "", // mandatory if using managed idenitty + "useManagedIdentityExtension": false, // set true if using managed identity + "userAssignedIdentityID": "", // mandatory if using managed identity "useInstanceMetadata": true, // optional "vmType": "standard", // optional "subnetName": "k8s-subnet", // optional diff --git a/deploy/example/logs/0.2.0/csi-azuredisk-controller.log b/deploy/example/logs/0.2.0/csi-azuredisk-controller.log index cf674c535..0496e6465 100644 --- a/deploy/example/logs/0.2.0/csi-azuredisk-controller.log +++ b/deploy/example/logs/0.2.0/csi-azuredisk-controller.log @@ -80,8 +80,8 @@ I0708 02:07:02.908790 1 utils.go:106] GRPC call: /csi.v1.Controller/Contro I0708 02:07:02.908930 1 utils.go:107] GRPC request: volume_id:"/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/disks/pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d" node_id:"aks-agentpool-41197296-0" volume_capability: access_mode: > volume_context: volume_context: volume_context: volume_context: I0708 02:07:02.909162 1 controllerserver.go:267] ControllerPublishVolume: called with args {VolumeId:/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/disks/pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d NodeId:aks-agentpool-41197296-0 VolumeCapability:mount: access_mode: Readonly:false Secrets:map[] VolumeContext:map[cachingMode:ReadOnly kind:managed skuname:Standard_LRS storage.kubernetes.io/csiProvisionerIdentity:1562551478534-8081-disk.csi.azure.com] XXX_NoUnkeyedLiteral:{} XXX_unrecognized:[] XXX_sizecache:0} I0708 02:07:02.948540 1 controllerserver.go:304] GetDiskLun returned: . Initiating attaching volume "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/disks/pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d" to node "aks-agentpool-41197296-0". -I0708 02:07:02.948613 1 hashed.go:47] hashedKeyMutex.LockKey(...) called for id "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0" -I0708 02:07:02.948626 1 hashed.go:49] hashedKeyMutex.LockKey(...) for id "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0" completed. +I0708 02:07:02.948613 1 hashed.go:47] hashedKeyMutex.LockKey(...) called for id "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0" +I0708 02:07:02.948626 1 hashed.go:49] hashedKeyMutex.LockKey(...) for id "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0" completed. I0708 02:07:03.011845 1 controllerserver.go:335] Trying to attach volume "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/disks/pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d" lun 0 to node "aks-agentpool-41197296-0" I0708 02:07:03.011880 1 azure_controller_standard.go:77] azureDisk - update(MC_andy-virtualnode_andy-virtualnode_eastus2): vm(aks-agentpool-41197296-0) - attach disk(pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d) I0708 02:07:19.142897 1 utils.go:106] GRPC call: /csi.v1.Identity/Probe @@ -90,8 +90,8 @@ I0708 02:07:19.142945 1 utils.go:112] GRPC response: ready: I0708 02:07:38.506091 1 azure_controller_standard.go:94] azureDisk - attach disk(pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d) succeeded I0708 02:07:38.506121 1 controllerserver.go:338] Attach operation successful: volume "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/disks/pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d" attached to node "aks-agentpool-41197296-0". I0708 02:07:38.506138 1 controllerserver.go:343] attach volume "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/disks/pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d" lun 0 to node "aks-agentpool-41197296-0" successfully -I0708 02:07:38.506155 1 hashed.go:54] hashedKeyMutex.UnlockKey(...) called for id "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0" -I0708 02:07:38.506164 1 hashed.go:56] hashedKeyMutex.UnlockKey(...) for id "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0" completed. +I0708 02:07:38.506155 1 hashed.go:54] hashedKeyMutex.UnlockKey(...) called for id "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0" +I0708 02:07:38.506164 1 hashed.go:56] hashedKeyMutex.UnlockKey(...) for id "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0" completed. I0708 02:07:38.506174 1 utils.go:112] GRPC response: publish_context: I0708 02:07:49.141956 1 utils.go:106] GRPC call: /csi.v1.Identity/Probe I0708 02:07:49.141971 1 utils.go:107] GRPC request: @@ -109,8 +109,8 @@ I0708 02:09:39.452064 1 utils.go:106] GRPC call: /csi.v1.Controller/Contro I0708 02:09:39.452163 1 utils.go:107] GRPC request: volume_id:"/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/disks/pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d" node_id:"aks-agentpool-41197296-0" I0708 02:09:39.452211 1 controllerserver.go:352] ControllerUnpublishVolume: called with args {VolumeId:/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/disks/pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d NodeId:aks-agentpool-41197296-0 Secrets:map[] XXX_NoUnkeyedLiteral:{} XXX_unrecognized:[] XXX_sizecache:0} I0708 02:09:39.452246 1 expiration_cache.go:98] Entry InstanceMetadata: &{key:InstanceMetadata data:0xc000673cd0 lock:{state:0 sema:0}} has expired -I0708 02:09:39.457483 1 hashed.go:47] hashedKeyMutex.LockKey(...) called for id "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0" -I0708 02:09:39.457529 1 hashed.go:49] hashedKeyMutex.LockKey(...) for id "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0" completed. +I0708 02:09:39.457483 1 hashed.go:47] hashedKeyMutex.LockKey(...) called for id "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0" +I0708 02:09:39.457529 1 hashed.go:49] hashedKeyMutex.LockKey(...) for id "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0" completed. I0708 02:09:39.457618 1 controllerserver.go:381] Trying to detach volume /subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/disks/pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d from node aks-agentpool-41197296-0 I0708 02:09:39.551551 1 azure_controller_standard.go:122] azureDisk - detach disk: name "pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d" uri "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/disks/pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d" I0708 02:09:39.551585 1 azure_controller_standard.go:142] azureDisk - update(MC_andy-virtualnode_andy-virtualnode_eastus2): vm(aks-agentpool-41197296-0) - detach disk(pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d) @@ -129,7 +129,7 @@ I0708 02:09:41.779760 1 utils.go:112] GRPC response: name:"disk.csi.azure. I0708 02:09:41.787775 1 utils.go:106] GRPC call: /csi.v1.Controller/DeleteVolume I0708 02:09:41.787786 1 utils.go:107] GRPC request: volume_id:"/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/disks/pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d" I0708 02:09:41.787824 1 controllerserver.go:250] deleting azure disk(/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/disks/pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d) -E0708 02:09:42.024138 1 utils.go:110] GRPC error: compute.DisksClient#Delete: Failure sending request: StatusCode=0 -- Original Error: autorest/azure: Service returned an error. Status= Code="OperationNotAllowed" Message="Disk pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d is attached to VM /subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0." +E0708 02:09:42.024138 1 utils.go:110] GRPC error: armcompute.DisksClient#Delete: Failure sending request: StatusCode=0 -- Original Error: autorest/azure: Service returned an error. Status= Code="OperationNotAllowed" Message="Disk pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d is attached to VM /subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0." I0708 02:09:49.141968 1 utils.go:106] GRPC call: /csi.v1.Identity/Probe I0708 02:09:49.142137 1 utils.go:107] GRPC request: I0708 02:09:49.142212 1 utils.go:112] GRPC response: ready: @@ -148,22 +148,22 @@ I0708 02:09:57.038401 1 utils.go:112] GRPC response: name:"disk.csi.azure. I0708 02:09:57.046245 1 utils.go:106] GRPC call: /csi.v1.Controller/DeleteVolume I0708 02:09:57.046463 1 utils.go:107] GRPC request: volume_id:"/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/disks/pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d" I0708 02:09:57.046516 1 controllerserver.go:250] deleting azure disk(/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/disks/pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d) -E0708 02:09:57.295441 1 utils.go:110] GRPC error: compute.DisksClient#Delete: Failure sending request: StatusCode=0 -- Original Error: autorest/azure: Service returned an error. Status= Code="OperationNotAllowed" Message="Disk pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d is attached to VM /subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0." +E0708 02:09:57.295441 1 utils.go:110] GRPC error: armcompute.DisksClient#Delete: Failure sending request: StatusCode=0 -- Original Error: autorest/azure: Service returned an error. Status= Code="OperationNotAllowed" Message="Disk pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d is attached to VM /subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0." I0708 02:10:04.901291 1 azure_controller_standard.go:161] azureDisk - detach disk(pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d, /subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/disks/pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d) succeeded I0708 02:10:04.901326 1 controllerserver.go:389] detach volume /subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/disks/pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d from node aks-agentpool-41197296-0 successfully -I0708 02:10:04.901334 1 hashed.go:54] hashedKeyMutex.UnlockKey(...) called for id "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0" -I0708 02:10:04.901346 1 hashed.go:56] hashedKeyMutex.UnlockKey(...) for id "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0" completed. +I0708 02:10:04.901334 1 hashed.go:54] hashedKeyMutex.UnlockKey(...) called for id "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0" +I0708 02:10:04.901346 1 hashed.go:56] hashedKeyMutex.UnlockKey(...) for id "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0" completed. I0708 02:10:04.901356 1 utils.go:112] GRPC response: I0708 02:10:04.917031 1 utils.go:106] GRPC call: /csi.v1.Controller/ControllerUnpublishVolume I0708 02:10:04.917044 1 utils.go:107] GRPC request: volume_id:"/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/disks/pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d" node_id:"aks-agentpool-41197296-0" I0708 02:10:04.917083 1 controllerserver.go:352] ControllerUnpublishVolume: called with args {VolumeId:/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/disks/pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d NodeId:aks-agentpool-41197296-0 Secrets:map[] XXX_NoUnkeyedLiteral:{} XXX_unrecognized:[] XXX_sizecache:0} -I0708 02:10:04.917163 1 hashed.go:47] hashedKeyMutex.LockKey(...) called for id "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0" -I0708 02:10:04.917176 1 hashed.go:49] hashedKeyMutex.LockKey(...) for id "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0" completed. +I0708 02:10:04.917163 1 hashed.go:47] hashedKeyMutex.LockKey(...) called for id "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0" +I0708 02:10:04.917176 1 hashed.go:49] hashedKeyMutex.LockKey(...) for id "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0" completed. I0708 02:10:04.917190 1 controllerserver.go:381] Trying to detach volume /subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/disks/pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d from node aks-agentpool-41197296-0 W0708 02:10:04.995319 1 controllerserver.go:384] volume /subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/disks/pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d already detached from node aks-agentpool-41197296-0 I0708 02:10:04.995374 1 controllerserver.go:389] detach volume /subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/disks/pvc-disk-dynamic-fac4b8e4-a124-11e9-8c50-c6e5c33cad5d from node aks-agentpool-41197296-0 successfully -I0708 02:10:04.995381 1 hashed.go:54] hashedKeyMutex.UnlockKey(...) called for id "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0" -I0708 02:10:04.995395 1 hashed.go:56] hashedKeyMutex.UnlockKey(...) for id "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0" completed. +I0708 02:10:04.995381 1 hashed.go:54] hashedKeyMutex.UnlockKey(...) called for id "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0" +I0708 02:10:04.995395 1 hashed.go:56] hashedKeyMutex.UnlockKey(...) for id "/subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0" completed. I0708 02:10:04.995404 1 utils.go:112] GRPC response: I0708 02:10:19.141759 1 utils.go:106] GRPC call: /csi.v1.Identity/Probe I0708 02:10:19.141896 1 utils.go:107] GRPC request: diff --git a/deploy/example/logs/0.3.0/csi-azuredisk-controller.log b/deploy/example/logs/0.3.0/csi-azuredisk-controller.log index aa2cb48da..cd73b0fe8 100644 --- a/deploy/example/logs/0.3.0/csi-azuredisk-controller.log +++ b/deploy/example/logs/0.3.0/csi-azuredisk-controller.log @@ -142,7 +142,7 @@ I0708 09:18:06.849897 1 utils.go:119] GRPC response: name:"disk.csi.azure. I0708 09:18:06.859929 1 utils.go:112] GRPC call: /csi.v1.Controller/DeleteVolume I0708 09:18:06.859949 1 utils.go:113] GRPC request: volume_id:"/subscriptions/.../resourceGroups/mc_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/disks/pvc-disk-dynamic-e7e22319-a160-11e9-b723-f2729dcdb631" I0708 09:18:06.859959 1 controllerserver.go:257] deleting azure disk(/subscriptions/.../resourceGroups/mc_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/disks/pvc-disk-dynamic-e7e22319-a160-11e9-b723-f2729dcdb631) -E0708 09:18:07.117134 1 utils.go:117] GRPC error: compute.DisksClient#Delete: Failure sending request: StatusCode=0 -- Original Error: autorest/azure: Service returned an error. Status= Code="OperationNotAllowed" Message="Disk pvc-disk-dynamic-e7e22319-a160-11e9-b723-f2729dcdb631 is attached to VM /subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0." +E0708 09:18:07.117134 1 utils.go:117] GRPC error: armcompute.DisksClient#Delete: Failure sending request: StatusCode=0 -- Original Error: autorest/azure: Service returned an error. Status= Code="OperationNotAllowed" Message="Disk pvc-disk-dynamic-e7e22319-a160-11e9-b723-f2729dcdb631 is attached to VM /subscriptions/.../resourceGroups/MC_andy-virtualnode_andy-virtualnode_eastus2/providers/Microsoft.Compute/virtualMachines/aks-agentpool-41197296-0." I0708 09:18:13.587579 1 utils.go:112] GRPC call: /csi.v1.Identity/Probe I0708 09:18:13.587699 1 utils.go:113] GRPC request: I0708 09:18:13.587788 1 utils.go:119] GRPC response: ready: diff --git a/deploy/example/nginx-pod-azuredisk.yaml b/deploy/example/nginx-pod-azuredisk.yaml index 6537d35b9..cf4eed9e3 100644 --- a/deploy/example/nginx-pod-azuredisk.yaml +++ b/deploy/example/nginx-pod-azuredisk.yaml @@ -16,6 +16,7 @@ spec: volumeMounts: - name: azuredisk01 mountPath: "/mnt/azuredisk" + readOnly: false volumes: - name: azuredisk01 persistentVolumeClaim: diff --git a/deploy/example/nginx-pod-ephemeral.yaml b/deploy/example/nginx-pod-ephemeral.yaml index e7c13f18d..d372d8bd1 100644 --- a/deploy/example/nginx-pod-ephemeral.yaml +++ b/deploy/example/nginx-pod-ephemeral.yaml @@ -14,6 +14,7 @@ spec: volumeMounts: - mountPath: "/mnt/azuredisk" name: azuredisk01 + readOnly: false volumes: - name: azuredisk01 ephemeral: diff --git a/deploy/example/snapshot/README.md b/deploy/example/snapshot/README.md index dce9d3673..e7f6cfaf0 100644 --- a/deploy/example/snapshot/README.md +++ b/deploy/example/snapshot/README.md @@ -90,7 +90,7 @@ outfile ### Use snapshot feature to create a copy of a disk with a different SKU > The disk SKU change can be from LRS to ZRS, from standard to premium, or even across zones, however cross-region changes are not supported. -> For information on storage class settings with cross-zone support, please refer to [allowed-topology storage class](../storageclass-azuredisk-csi-allowed-topology.yaml) +> For information on storage class settings with cross-zone support, please refer to [allowed-topology storage class](./storageclass-azuredisk-csi-allowed-topology.yaml) - Before proceeding, ensure that the application is not writing data to the source disk. - Take a snapshot of the existing disk PVC. diff --git a/deploy/example/statefulset-nonroot.yaml b/deploy/example/statefulset-nonroot.yaml index a170825b6..9268a5cb1 100644 --- a/deploy/example/statefulset-nonroot.yaml +++ b/deploy/example/statefulset-nonroot.yaml @@ -30,6 +30,7 @@ spec: volumeMounts: - name: persistent-storage mountPath: /mnt/azuredisk + readOnly: false updateStrategy: type: RollingUpdate selector: diff --git a/deploy/example/statefulset-nozone.yaml b/deploy/example/statefulset-nozone.yaml index d27c08c47..0398d9dd9 100644 --- a/deploy/example/statefulset-nozone.yaml +++ b/deploy/example/statefulset-nozone.yaml @@ -39,6 +39,7 @@ spec: volumeMounts: - name: persistent-storage mountPath: /mnt/azuredisk + readOnly: false updateStrategy: type: RollingUpdate selector: diff --git a/deploy/example/statefulset.yaml b/deploy/example/statefulset.yaml index c82cf00b9..f381505c8 100644 --- a/deploy/example/statefulset.yaml +++ b/deploy/example/statefulset.yaml @@ -26,6 +26,7 @@ spec: volumeMounts: - name: persistent-storage mountPath: /mnt/azuredisk + readOnly: false updateStrategy: type: RollingUpdate selector: diff --git a/deploy/example/storageclass-azuredisk-csi-allowed-topology.yaml b/deploy/example/storageclass-azuredisk-csi-allowed-topology.yaml index a6699e5e1..62ac697f4 100644 --- a/deploy/example/storageclass-azuredisk-csi-allowed-topology.yaml +++ b/deploy/example/storageclass-azuredisk-csi-allowed-topology.yaml @@ -5,7 +5,7 @@ metadata: name: managed-csi provisioner: azuredisk.csi.confidential.cloud parameters: - skuName: StandardSSD_LRS # alias: storageaccounttype, available values: Standard_LRS, Premium_LRS, StandardSSD_LRS, UltraSSD_LRS + skuName: StandardSSD_LRS # available values: StandardSSD_LRS, StandardSSD_ZRS, Premium_LRS, Premium_ZRS, etc. reclaimPolicy: Delete allowedTopologies: - matchLabelExpressions: diff --git a/deploy/example/storageclass-azuredisk-csi.yaml b/deploy/example/storageclass-azuredisk-csi.yaml index fa4ac678a..045ddef84 100644 --- a/deploy/example/storageclass-azuredisk-csi.yaml +++ b/deploy/example/storageclass-azuredisk-csi.yaml @@ -5,7 +5,7 @@ metadata: name: managed-csi provisioner: azuredisk.csi.confidential.cloud parameters: - skuName: StandardSSD_LRS # alias: storageaccounttype, available values: Standard_LRS, Premium_LRS, StandardSSD_LRS, UltraSSD_LRS + skuName: StandardSSD_LRS # available values: StandardSSD_LRS, StandardSSD_ZRS, Premium_LRS, Premium_ZRS, etc. reclaimPolicy: Delete volumeBindingMode: Immediate allowVolumeExpansion: true diff --git a/deploy/v1.26.7/crd-csi-snapshot.yaml b/deploy/v1.26.7/crd-csi-snapshot.yaml new file mode 100644 index 000000000..18d97e6b7 --- /dev/null +++ b/deploy/v1.26.7/crd-csi-snapshot.yaml @@ -0,0 +1,659 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/419" + creationTimestamp: null + name: volumesnapshots.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshot + listKind: VolumeSnapshotList + plural: volumesnapshots + shortNames: + - vs + singular: volumesnapshot + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: If a new snapshot needs to be created, this contains the name of the source PVC from which this snapshot was (or will be) created. + jsonPath: .spec.source.persistentVolumeClaimName + name: SourcePVC + type: string + - description: If a snapshot already exists, this contains the name of the existing VolumeSnapshotContent object representing the existing snapshot. + jsonPath: .spec.source.volumeSnapshotContentName + name: SourceSnapshotContent + type: string + - description: Represents the minimum size of volume required to rehydrate from this snapshot. + jsonPath: .status.restoreSize + name: RestoreSize + type: string + - description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot. + jsonPath: .spec.volumeSnapshotClassName + name: SnapshotClass + type: string + - description: Name of the VolumeSnapshotContent object to which the VolumeSnapshot object intends to bind to. Please note that verification of binding actually requires checking both VolumeSnapshot and VolumeSnapshotContent to ensure both are pointing at each other. Binding MUST be verified prior to usage of this object. + jsonPath: .status.boundVolumeSnapshotContentName + name: SnapshotContent + type: string + - description: Timestamp when the point-in-time snapshot was taken by the underlying storage system. + jsonPath: .status.creationTime + name: CreationTime + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshot is a user's request for either creating a point-in-time snapshot of a persistent volume, or binding to a pre-existing snapshot. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: 'spec defines the desired characteristics of a snapshot requested by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots Required.' + properties: + source: + description: source specifies where a snapshot will be created from. This field is immutable after creation. Required. + properties: + persistentVolumeClaimName: + description: persistentVolumeClaimName specifies the name of the PersistentVolumeClaim object representing the volume from which a snapshot should be created. This PVC is assumed to be in the same namespace as the VolumeSnapshot object. This field should be set if the snapshot does not exists, and needs to be created. This field is immutable. + type: string + volumeSnapshotContentName: + description: volumeSnapshotContentName specifies the name of a pre-existing VolumeSnapshotContent object representing an existing volume snapshot. This field should be set if the snapshot already exists and only needs a representation in Kubernetes. This field is immutable. + type: string + type: object + oneOf: + - required: ["persistentVolumeClaimName"] + - required: ["volumeSnapshotContentName"] + volumeSnapshotClassName: + description: 'VolumeSnapshotClassName is the name of the VolumeSnapshotClass requested by the VolumeSnapshot. VolumeSnapshotClassName may be left nil to indicate that the default SnapshotClass should be used. A given cluster may have multiple default Volume SnapshotClasses: one default per CSI Driver. If a VolumeSnapshot does not specify a SnapshotClass, VolumeSnapshotSource will be checked to figure out what the associated CSI Driver is, and the default VolumeSnapshotClass associated with that CSI Driver will be used. If more than one VolumeSnapshotClass exist for a given CSI Driver and more than one have been marked as default, CreateSnapshot will fail and generate an event. Empty string is not allowed for this field.' + type: string + required: + - source + type: object + status: + description: status represents the current information of a snapshot. Consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object. + properties: + boundVolumeSnapshotContentName: + description: 'boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent object to which this VolumeSnapshot object intends to bind to. If not specified, it indicates that the VolumeSnapshot object has not been successfully bound to a VolumeSnapshotContent object yet. NOTE: To avoid possible security issues, consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object.' + type: string + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it may indicate that the creation time of the snapshot is unknown. + format: date-time + type: string + error: + description: error is the last observed error during snapshot creation, if any. This field could be helpful to upper level controllers(i.e., application controller) to decide whether they should continue on waiting for the snapshot to be created based on the type of error reported. The snapshot controller will keep retrying when an error occurrs during the snapshot creation. Upon success, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if the snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + type: string + description: restoreSize represents the minimum size of volume required to create a volume from this snapshot. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: If a new snapshot needs to be created, this contains the name of the source PVC from which this snapshot was (or will be) created. + jsonPath: .spec.source.persistentVolumeClaimName + name: SourcePVC + type: string + - description: If a snapshot already exists, this contains the name of the existing VolumeSnapshotContent object representing the existing snapshot. + jsonPath: .spec.source.volumeSnapshotContentName + name: SourceSnapshotContent + type: string + - description: Represents the minimum size of volume required to rehydrate from this snapshot. + jsonPath: .status.restoreSize + name: RestoreSize + type: string + - description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot. + jsonPath: .spec.volumeSnapshotClassName + name: SnapshotClass + type: string + - description: Name of the VolumeSnapshotContent object to which the VolumeSnapshot object intends to bind to. Please note that verification of binding actually requires checking both VolumeSnapshot and VolumeSnapshotContent to ensure both are pointing at each other. Binding MUST be verified prior to usage of this object. + jsonPath: .status.boundVolumeSnapshotContentName + name: SnapshotContent + type: string + - description: Timestamp when the point-in-time snapshot was taken by the underlying storage system. + jsonPath: .status.creationTime + name: CreationTime + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshot is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshot" + schema: + openAPIV3Schema: + description: VolumeSnapshot is a user's request for either creating a point-in-time snapshot of a persistent volume, or binding to a pre-existing snapshot. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: 'spec defines the desired characteristics of a snapshot requested by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots Required.' + properties: + source: + description: source specifies where a snapshot will be created from. This field is immutable after creation. Required. + properties: + persistentVolumeClaimName: + description: persistentVolumeClaimName specifies the name of the PersistentVolumeClaim object representing the volume from which a snapshot should be created. This PVC is assumed to be in the same namespace as the VolumeSnapshot object. This field should be set if the snapshot does not exists, and needs to be created. This field is immutable. + type: string + volumeSnapshotContentName: + description: volumeSnapshotContentName specifies the name of a pre-existing VolumeSnapshotContent object representing an existing volume snapshot. This field should be set if the snapshot already exists and only needs a representation in Kubernetes. This field is immutable. + type: string + type: object + volumeSnapshotClassName: + description: 'VolumeSnapshotClassName is the name of the VolumeSnapshotClass requested by the VolumeSnapshot. VolumeSnapshotClassName may be left nil to indicate that the default SnapshotClass should be used. A given cluster may have multiple default Volume SnapshotClasses: one default per CSI Driver. If a VolumeSnapshot does not specify a SnapshotClass, VolumeSnapshotSource will be checked to figure out what the associated CSI Driver is, and the default VolumeSnapshotClass associated with that CSI Driver will be used. If more than one VolumeSnapshotClass exist for a given CSI Driver and more than one have been marked as default, CreateSnapshot will fail and generate an event. Empty string is not allowed for this field.' + type: string + required: + - source + type: object + status: + description: status represents the current information of a snapshot. Consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object. + properties: + boundVolumeSnapshotContentName: + description: 'boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent object to which this VolumeSnapshot object intends to bind to. If not specified, it indicates that the VolumeSnapshot object has not been successfully bound to a VolumeSnapshotContent object yet. NOTE: To avoid possible security issues, consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object.' + type: string + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it may indicate that the creation time of the snapshot is unknown. + format: date-time + type: string + error: + description: error is the last observed error during snapshot creation, if any. This field could be helpful to upper level controllers(i.e., application controller) to decide whether they should continue on waiting for the snapshot to be created based on the type of error reported. The snapshot controller will keep retrying when an error occurrs during the snapshot creation. Upon success, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if the snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + type: string + description: restoreSize represents the minimum size of volume required to create a volume from this snapshot. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/419" + creationTimestamp: null + name: volumesnapshotclasses.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotClass + listKind: VolumeSnapshotClassList + plural: volumesnapshotclasses + shortNames: + - vsclass + - vsclasses + singular: volumesnapshotclass + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .driver + name: Driver + type: string + - description: Determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .deletionPolicy + name: DeletionPolicy + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshotClass specifies parameters that a underlying storage system uses when creating a volume snapshot. A specific VolumeSnapshotClass is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses are non-namespaced + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + deletionPolicy: + description: deletionPolicy determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the storage driver that handles this VolumeSnapshotClass. Required. + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + parameters: + additionalProperties: + type: string + description: parameters is a key-value map with storage driver specific parameters for creating snapshots. These values are opaque to Kubernetes. + type: object + required: + - deletionPolicy + - driver + type: object + served: true + storage: true + subresources: {} + - additionalPrinterColumns: + - jsonPath: .driver + name: Driver + type: string + - description: Determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .deletionPolicy + name: DeletionPolicy + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshotClass is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshotClass" + schema: + openAPIV3Schema: + description: VolumeSnapshotClass specifies parameters that a underlying storage system uses when creating a volume snapshot. A specific VolumeSnapshotClass is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses are non-namespaced + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + deletionPolicy: + description: deletionPolicy determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the storage driver that handles this VolumeSnapshotClass. Required. + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + parameters: + additionalProperties: + type: string + description: parameters is a key-value map with storage driver specific parameters for creating snapshots. These values are opaque to Kubernetes. + type: object + required: + - deletionPolicy + - driver + type: object + served: true + storage: false + subresources: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/419" + creationTimestamp: null + name: volumesnapshotcontents.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotContent + listKind: VolumeSnapshotContentList + plural: volumesnapshotcontents + shortNames: + - vsc + - vscs + singular: volumesnapshotcontent + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: Represents the complete size of the snapshot in bytes + jsonPath: .status.restoreSize + name: RestoreSize + type: integer + - description: Determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .spec.deletionPolicy + name: DeletionPolicy + type: string + - description: Name of the CSI driver used to create the physical snapshot on the underlying storage system. + jsonPath: .spec.driver + name: Driver + type: string + - description: Name of the VolumeSnapshotClass to which this snapshot belongs. + jsonPath: .spec.volumeSnapshotClassName + name: VolumeSnapshotClass + type: string + - description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.name + name: VolumeSnapshot + type: string + - description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.namespace + name: VolumeSnapshotNamespace + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshotContent represents the actual "on-disk" snapshot object in the underlying storage system + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: spec defines properties of a VolumeSnapshotContent created by the underlying storage system. Required. + properties: + deletionPolicy: + description: deletionPolicy determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. For dynamically provisioned snapshots, this field will automatically be filled in by the CSI snapshotter sidecar with the "DeletionPolicy" field defined in the corresponding VolumeSnapshotClass. For pre-existing snapshots, users MUST specify this field when creating the VolumeSnapshotContent object. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the CSI driver used to create the physical snapshot on the underlying storage system. This MUST be the same as the name returned by the CSI GetPluginName() call for that driver. Required. + type: string + source: + description: source specifies whether the snapshot is (or should be) dynamically provisioned or already exists, and just requires a Kubernetes object representation. This field is immutable after creation. Required. + properties: + snapshotHandle: + description: snapshotHandle specifies the CSI "snapshot_id" of a pre-existing snapshot on the underlying storage system for which a Kubernetes object representation was (or should be) created. This field is immutable. + type: string + volumeHandle: + description: volumeHandle specifies the CSI "volume_id" of the volume from which a snapshot should be dynamically taken from. This field is immutable. + type: string + type: object + oneOf: + - required: ["snapshotHandle"] + - required: ["volumeHandle"] + volumeSnapshotClassName: + description: name of the VolumeSnapshotClass from which this snapshot was (or will be) created. Note that after provisioning, the VolumeSnapshotClass may be deleted or recreated with different set of values, and as such, should not be referenced post-snapshot creation. + type: string + volumeSnapshotRef: + description: volumeSnapshotRef specifies the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName field must reference to this VolumeSnapshotContent's name for the bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent object, name and namespace of the VolumeSnapshot object MUST be provided for binding to happen. This field is immutable after creation. Required. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + required: + - deletionPolicy + - driver + - source + - volumeSnapshotRef + type: object + status: + description: status represents the current information of a snapshot. + properties: + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it indicates the creation time is unknown. The format of this field is a Unix nanoseconds time encoded as an int64. On Unix, the command `date +%s%N` returns the current time in nanoseconds since 1970-01-01 00:00:00 UTC. + format: int64 + type: integer + error: + description: error is the last observed error during snapshot creation, if any. Upon success after retry, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + description: restoreSize represents the complete size of the snapshot in bytes. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown. + format: int64 + minimum: 0 + type: integer + snapshotHandle: + description: snapshotHandle is the CSI "snapshot_id" of a snapshot on the underlying storage system. If not specified, it indicates that dynamic snapshot creation has either failed or it is still in progress. + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: Represents the complete size of the snapshot in bytes + jsonPath: .status.restoreSize + name: RestoreSize + type: integer + - description: Determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .spec.deletionPolicy + name: DeletionPolicy + type: string + - description: Name of the CSI driver used to create the physical snapshot on the underlying storage system. + jsonPath: .spec.driver + name: Driver + type: string + - description: Name of the VolumeSnapshotClass to which this snapshot belongs. + jsonPath: .spec.volumeSnapshotClassName + name: VolumeSnapshotClass + type: string + - description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.name + name: VolumeSnapshot + type: string + - description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.namespace + name: VolumeSnapshotNamespace + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshotContent is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshotContent" + schema: + openAPIV3Schema: + description: VolumeSnapshotContent represents the actual "on-disk" snapshot object in the underlying storage system + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: spec defines properties of a VolumeSnapshotContent created by the underlying storage system. Required. + properties: + deletionPolicy: + description: deletionPolicy determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. For dynamically provisioned snapshots, this field will automatically be filled in by the CSI snapshotter sidecar with the "DeletionPolicy" field defined in the corresponding VolumeSnapshotClass. For pre-existing snapshots, users MUST specify this field when creating the VolumeSnapshotContent object. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the CSI driver used to create the physical snapshot on the underlying storage system. This MUST be the same as the name returned by the CSI GetPluginName() call for that driver. Required. + type: string + source: + description: source specifies whether the snapshot is (or should be) dynamically provisioned or already exists, and just requires a Kubernetes object representation. This field is immutable after creation. Required. + properties: + snapshotHandle: + description: snapshotHandle specifies the CSI "snapshot_id" of a pre-existing snapshot on the underlying storage system for which a Kubernetes object representation was (or should be) created. This field is immutable. + type: string + volumeHandle: + description: volumeHandle specifies the CSI "volume_id" of the volume from which a snapshot should be dynamically taken from. This field is immutable. + type: string + type: object + volumeSnapshotClassName: + description: name of the VolumeSnapshotClass from which this snapshot was (or will be) created. Note that after provisioning, the VolumeSnapshotClass may be deleted or recreated with different set of values, and as such, should not be referenced post-snapshot creation. + type: string + volumeSnapshotRef: + description: volumeSnapshotRef specifies the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName field must reference to this VolumeSnapshotContent's name for the bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent object, name and namespace of the VolumeSnapshot object MUST be provided for binding to happen. This field is immutable after creation. Required. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + required: + - deletionPolicy + - driver + - source + - volumeSnapshotRef + type: object + status: + description: status represents the current information of a snapshot. + properties: + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it indicates the creation time is unknown. The format of this field is a Unix nanoseconds time encoded as an int64. On Unix, the command `date +%s%N` returns the current time in nanoseconds since 1970-01-01 00:00:00 UTC. + format: int64 + type: integer + error: + description: error is the last observed error during snapshot creation, if any. Upon success after retry, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + description: restoreSize represents the complete size of the snapshot in bytes. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown. + format: int64 + minimum: 0 + type: integer + snapshotHandle: + description: snapshotHandle is the CSI "snapshot_id" of a snapshot on the underlying storage system. If not specified, it indicates that dynamic snapshot creation has either failed or it is still in progress. + type: string + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] \ No newline at end of file diff --git a/deploy/v1.26.7/csi-azuredisk-controller.yaml b/deploy/v1.26.7/csi-azuredisk-controller.yaml new file mode 100644 index 000000000..e9a7876fd --- /dev/null +++ b/deploy/v1.26.7/csi-azuredisk-controller.yaml @@ -0,0 +1,191 @@ +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: csi-azuredisk-controller + namespace: kube-system +spec: + replicas: 2 + selector: + matchLabels: + app: csi-azuredisk-controller + template: + metadata: + labels: + app: csi-azuredisk-controller + spec: + hostNetwork: true + serviceAccountName: csi-azuredisk-controller-sa + nodeSelector: + kubernetes.io/os: linux # add "kubernetes.io/role: master" to run controller on master node + priorityClassName: system-cluster-critical + tolerations: + - key: "node-role.kubernetes.io/master" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/controlplane" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/control-plane" + operator: "Exists" + effect: "NoSchedule" + containers: + - name: csi-provisioner + image: mcr.microsoft.com/oss/kubernetes-csi/csi-provisioner:v3.3.0 + args: + - "--feature-gates=Topology=true" + - "--csi-address=$(ADDRESS)" + - "--v=2" + - "--timeout=30s" + - "--leader-election" + - "--leader-election-namespace=kube-system" + - "--worker-threads=100" + - "--extra-create-metadata=true" + - "--strict-topology=true" + - "--kube-api-qps=50" + - "--kube-api-burst=100" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + resources: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + - name: csi-attacher + image: mcr.microsoft.com/oss/kubernetes-csi/csi-attacher:v4.0.0 + args: + - "-v=2" + - "-csi-address=$(ADDRESS)" + - "-timeout=1200s" + - "-leader-election" + - "--leader-election-namespace=kube-system" + - "-worker-threads=500" + - "-kube-api-qps=50" + - "-kube-api-burst=100" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + resources: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + - name: csi-snapshotter + image: mcr.microsoft.com/oss/kubernetes-csi/csi-snapshotter:v5.0.1 + args: + - "-csi-address=$(ADDRESS)" + - "-leader-election" + - "--leader-election-namespace=kube-system" + - "--v=2" + - "--timeout=600s" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + - name: csi-resizer + image: mcr.microsoft.com/oss/kubernetes-csi/csi-resizer:v1.6.0 + args: + - "-csi-address=$(ADDRESS)" + - "-v=2" + - "-leader-election" + - "--leader-election-namespace=kube-system" + - '-handle-volume-inuse-error=false' + - '-feature-gates=RecoverVolumeExpansionFailure=true' + - "-timeout=240s" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + - name: liveness-probe + image: mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.8.0 + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --health-port=29602 + - --v=2 + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + - name: azuredisk + image: mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.26.7 + imagePullPolicy: IfNotPresent + args: + - "--v=5" + - "--endpoint=$(CSI_ENDPOINT)" + - "--metrics-address=0.0.0.0:29604" + - "--user-agent-suffix=OSS-kubectl" + - "--disable-avset-nodes=false" + - "--allow-empty-cloud-config=false" + ports: + - containerPort: 29602 + name: healthz + protocol: TCP + - containerPort: 29604 + name: metrics + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path + optional: true + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /etc/kubernetes/ + name: azure-cred + resources: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + volumes: + - name: socket-dir + emptyDir: {} + - name: azure-cred + hostPath: + path: /etc/kubernetes/ + type: DirectoryOrCreate diff --git a/deploy/v1.26.7/csi-azuredisk-driver.yaml b/deploy/v1.26.7/csi-azuredisk-driver.yaml new file mode 100644 index 000000000..bffff876a --- /dev/null +++ b/deploy/v1.26.7/csi-azuredisk-driver.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: disk.csi.azure.com + annotations: + csiDriver: v1.26.2 + snapshot: v5.0.1 +spec: + attachRequired: true + podInfoOnMount: false + fsGroupPolicy: File diff --git a/deploy/v1.26.7/csi-azuredisk-node-windows-hostprocess.yaml b/deploy/v1.26.7/csi-azuredisk-node-windows-hostprocess.yaml new file mode 100644 index 000000000..e6ac574a0 --- /dev/null +++ b/deploy/v1.26.7/csi-azuredisk-node-windows-hostprocess.yaml @@ -0,0 +1,150 @@ +--- +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: csi-azuredisk-node-win + namespace: kube-system +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + selector: + matchLabels: + app: csi-azuredisk-node-win + template: + metadata: + labels: + app: csi-azuredisk-node-win + spec: + serviceAccountName: csi-azuredisk-node-sa + tolerations: + - key: "node.kubernetes.io/os" + operator: "Exists" + effect: "NoSchedule" + nodeSelector: + kubernetes.io/os: windows + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: type + operator: NotIn + values: + - virtual-kubelet + priorityClassName: system-node-critical + securityContext: + windowsOptions: + hostProcess: true + runAsUserName: "NT AUTHORITY\\SYSTEM" + hostNetwork: true + initContainers: + - name: init + image: mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.26.7 + imagePullPolicy: IfNotPresent + command: + - "powershell.exe" + - "-c" + - "New-Item" + - "-ItemType" + - "Directory" + - "-Path" + - "C:\\var\\lib\\kubelet\\plugins\\disk.csi.azure.com\\" + - "-Force" + containers: + - name: liveness-probe + image: mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.8.0 + command: + - "livenessprobe.exe" + args: + - "--csi-address=$(CSI_ENDPOINT)" + - "--probe-timeout=3s" + - "--health-port=29603" + - "--v=2" + env: + - name: CSI_ENDPOINT + value: unix://C:\\var\\lib\\kubelet\\plugins\\disk.csi.azure.com\\csi.sock + resources: + limits: + memory: 150Mi + requests: + cpu: 10m + memory: 40Mi + - name: node-driver-registrar + image: mcr.microsoft.com/oss/kubernetes-csi/csi-node-driver-registrar:v2.6.2 + command: + - "csi-node-driver-registrar.exe" + args: + - "--v=2" + - "--csi-address=$(CSI_ENDPOINT)" + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" + - "--plugin-registration-path=$(PLUGIN_REG_DIR)" + livenessProbe: + exec: + command: + - csi-node-driver-registrar.exe + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --mode=kubelet-registration-probe + initialDelaySeconds: 60 + timeoutSeconds: 30 + env: + - name: CSI_ENDPOINT + value: unix://C:\\var\\lib\\kubelet\\plugins\\disk.csi.azure.com\\csi.sock + - name: DRIVER_REG_SOCK_PATH + value: C:\\var\\lib\\kubelet\\plugins\\disk.csi.azure.com\\csi.sock + - name: PLUGIN_REG_DIR + value: C:\\var\\lib\\kubelet\\plugins_registry\\ + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + resources: + limits: + memory: 150Mi + requests: + cpu: 30m + memory: 40Mi + - name: azuredisk + image: mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.26.7 + imagePullPolicy: IfNotPresent + command: + - "azurediskplugin.exe" + args: + - "--v=5" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--metrics-address=0.0.0.0:29605" + - "--allow-empty-cloud-config=true" + ports: + - containerPort: 29603 + name: healthz + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path-windows + optional: true + - name: CSI_ENDPOINT + value: unix://C:\\var\\lib\\kubelet\\plugins\\disk.csi.azure.com\\csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + resources: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 40Mi diff --git a/deploy/v1.26.7/csi-azuredisk-node-windows.yaml b/deploy/v1.26.7/csi-azuredisk-node-windows.yaml new file mode 100644 index 000000000..08ad661dd --- /dev/null +++ b/deploy/v1.26.7/csi-azuredisk-node-windows.yaml @@ -0,0 +1,192 @@ +--- +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: csi-azuredisk-node-win + namespace: kube-system +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + selector: + matchLabels: + app: csi-azuredisk-node-win + template: + metadata: + labels: + app: csi-azuredisk-node-win + spec: + serviceAccountName: csi-azuredisk-node-sa + tolerations: + - key: "node.kubernetes.io/os" + operator: "Exists" + effect: "NoSchedule" + nodeSelector: + kubernetes.io/os: windows + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: type + operator: NotIn + values: + - virtual-kubelet + priorityClassName: system-node-critical + containers: + - name: liveness-probe + volumeMounts: + - mountPath: C:\csi + name: plugin-dir + image: mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.8.0 + args: + - "--csi-address=$(CSI_ENDPOINT)" + - "--probe-timeout=3s" + - "--health-port=29603" + - "--v=2" + env: + - name: CSI_ENDPOINT + value: unix://C:\\csi\\csi.sock + resources: + limits: + memory: 150Mi + requests: + cpu: 10m + memory: 40Mi + - name: node-driver-registrar + image: mcr.microsoft.com/oss/kubernetes-csi/csi-node-driver-registrar:v2.6.2 + args: + - "--v=2" + - "--csi-address=$(CSI_ENDPOINT)" + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" + livenessProbe: + exec: + command: + - /csi-node-driver-registrar.exe + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --mode=kubelet-registration-probe + initialDelaySeconds: 60 + timeoutSeconds: 30 + env: + - name: CSI_ENDPOINT + value: unix://C:\\csi\\csi.sock + - name: DRIVER_REG_SOCK_PATH + value: C:\\var\\lib\\kubelet\\plugins\\disk.csi.azure.com\\csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - name: kubelet-dir + mountPath: "C:\\var\\lib\\kubelet" + - name: plugin-dir + mountPath: C:\csi + - name: registration-dir + mountPath: C:\registration + resources: + limits: + memory: 150Mi + requests: + cpu: 30m + memory: 40Mi + - name: azuredisk + image: mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.26.7 + imagePullPolicy: IfNotPresent + args: + - "--v=5" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--metrics-address=0.0.0.0:29605" + - "--allow-empty-cloud-config=true" + - "--get-node-info-from-labels=false" + ports: + - containerPort: 29603 + name: healthz + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path-windows + optional: true + - name: CSI_ENDPOINT + value: unix://C:\\csi\\csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + volumeMounts: + - name: kubelet-dir + mountPath: "C:\\var\\lib\\kubelet" + - name: plugin-dir + mountPath: C:\csi + - name: azure-config + mountPath: C:\k + - name: csi-proxy-fs-pipe-v1 + mountPath: \\.\pipe\csi-proxy-filesystem-v1 + - name: csi-proxy-disk-pipe-v1 + mountPath: \\.\pipe\csi-proxy-disk-v1 + - name: csi-proxy-volume-pipe-v1 + mountPath: \\.\pipe\csi-proxy-volume-v1 + # these paths are still included for compatibility, they're used + # only if the node has still the beta version of the CSI proxy + - name: csi-proxy-fs-pipe-v1beta1 + mountPath: \\.\pipe\csi-proxy-filesystem-v1beta1 + - name: csi-proxy-disk-pipe-v1beta2 + mountPath: \\.\pipe\csi-proxy-disk-v1beta2 + - name: csi-proxy-volume-pipe-v1beta2 + mountPath: \\.\pipe\csi-proxy-volume-v1beta2 + resources: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 40Mi + volumes: + - name: csi-proxy-fs-pipe-v1 + hostPath: + path: \\.\pipe\csi-proxy-filesystem-v1 + - name: csi-proxy-disk-pipe-v1 + hostPath: + path: \\.\pipe\csi-proxy-disk-v1 + - name: csi-proxy-volume-pipe-v1 + hostPath: + path: \\.\pipe\csi-proxy-volume-v1 + # these paths are still included for compatibility, they're used + # only if the node has still the beta version of the CSI proxy + - name: csi-proxy-fs-pipe-v1beta1 + hostPath: + path: \\.\pipe\csi-proxy-filesystem-v1beta1 + - name: csi-proxy-disk-pipe-v1beta2 + hostPath: + path: \\.\pipe\csi-proxy-disk-v1beta2 + - name: csi-proxy-volume-pipe-v1beta2 + hostPath: + path: \\.\pipe\csi-proxy-volume-v1beta2 + - name: registration-dir + hostPath: + path: C:\var\lib\kubelet\plugins_registry\ + type: Directory + - name: kubelet-dir + hostPath: + path: C:\var\lib\kubelet\ + type: Directory + - name: plugin-dir + hostPath: + path: C:\var\lib\kubelet\plugins\disk.csi.azure.com\ + type: DirectoryOrCreate + - name: azure-config + hostPath: + path: C:\k + type: DirectoryOrCreate diff --git a/deploy/v1.26.7/csi-azuredisk-node.yaml b/deploy/v1.26.7/csi-azuredisk-node.yaml new file mode 100644 index 000000000..066bd0049 --- /dev/null +++ b/deploy/v1.26.7/csi-azuredisk-node.yaml @@ -0,0 +1,172 @@ +--- +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: csi-azuredisk-node + namespace: kube-system +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + selector: + matchLabels: + app: csi-azuredisk-node + template: + metadata: + labels: + app: csi-azuredisk-node + spec: + hostNetwork: true + dnsPolicy: Default + serviceAccountName: csi-azuredisk-node-sa + nodeSelector: + kubernetes.io/os: linux + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: type + operator: NotIn + values: + - virtual-kubelet + priorityClassName: system-node-critical + tolerations: + - operator: "Exists" + containers: + - name: liveness-probe + volumeMounts: + - mountPath: /csi + name: socket-dir + image: mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.8.0 + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --health-port=29603 + - --v=2 + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + - name: node-driver-registrar + image: mcr.microsoft.com/oss/kubernetes-csi/csi-node-driver-registrar:v2.6.2 + args: + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --v=2 + livenessProbe: + exec: + command: + - /csi-node-driver-registrar + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --mode=kubelet-registration-probe + initialDelaySeconds: 30 + timeoutSeconds: 15 + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/disk.csi.azure.com/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: registration-dir + mountPath: /registration + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + - name: azuredisk + image: mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.26.7 + imagePullPolicy: IfNotPresent + args: + - "--v=5" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--metrics-address=0.0.0.0:29605" + - "--enable-perf-optimization=true" + - "--allow-empty-cloud-config=true" + - "--get-node-info-from-labels=false" + ports: + - containerPort: 29603 + name: healthz + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path + optional: true + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + securityContext: + privileged: true + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /var/lib/kubelet/ + mountPropagation: Bidirectional + name: mountpoint-dir + - mountPath: /etc/kubernetes/ + name: azure-cred + - mountPath: /dev + name: device-dir + - mountPath: /sys/bus/scsi/devices + name: sys-devices-dir + - mountPath: /sys/class/ + name: sys-class + resources: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 20Mi + volumes: + - hostPath: + path: /var/lib/kubelet/plugins/disk.csi.azure.com + type: DirectoryOrCreate + name: socket-dir + - hostPath: + path: /var/lib/kubelet/ + type: DirectoryOrCreate + name: mountpoint-dir + - hostPath: + path: /var/lib/kubelet/plugins_registry/ + type: DirectoryOrCreate + name: registration-dir + - hostPath: + path: /etc/kubernetes/ + type: DirectoryOrCreate + name: azure-cred + - hostPath: + path: /dev + type: Directory + name: device-dir + - hostPath: + path: /sys/bus/scsi/devices + type: Directory + name: sys-devices-dir + - hostPath: + path: /sys/class/ + type: Directory + name: sys-class +--- diff --git a/deploy/v1.26.7/csi-snapshot-controller.yaml b/deploy/v1.26.7/csi-snapshot-controller.yaml new file mode 100644 index 000000000..79c7483bb --- /dev/null +++ b/deploy/v1.26.7/csi-snapshot-controller.yaml @@ -0,0 +1,46 @@ +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: csi-snapshot-controller + namespace: kube-system +spec: + replicas: 2 + selector: + matchLabels: + app: csi-snapshot-controller + template: + metadata: + labels: + app: csi-snapshot-controller + spec: + serviceAccountName: csi-snapshot-controller-sa + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + tolerations: + - key: "node-role.kubernetes.io/master" + operator: "Equal" + value: "true" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/controlplane" + operator: "Equal" + value: "true" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/control-plane" + operator: "Equal" + value: "true" + effect: "NoSchedule" + containers: + - name: csi-snapshot-controller + image: mcr.microsoft.com/oss/kubernetes-csi/snapshot-controller:v5.0.1 + args: + - "--v=2" + - "--leader-election=true" + - "--leader-election-namespace=kube-system" + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi diff --git a/deploy/v1.26.7/rbac-csi-azuredisk-controller.yaml b/deploy/v1.26.7/rbac-csi-azuredisk-controller.yaml new file mode 100644 index 000000000..0e59ad15b --- /dev/null +++ b/deploy/v1.26.7/rbac-csi-azuredisk-controller.yaml @@ -0,0 +1,196 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-azuredisk-controller-sa + namespace: kube-system +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-external-provisioner-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["get", "list"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-csi-provisioner-binding +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: azuredisk-external-provisioner-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-external-attacher-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["csi.storage.k8s.io"] + resources: ["csinodeinfos"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments/status"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-csi-attacher-binding +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: azuredisk-external-attacher-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-external-snapshotter-role +rules: + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-csi-snapshotter-binding +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: azuredisk-external-snapshotter-role + apiGroup: rbac.authorization.k8s.io +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-external-resizer-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-csi-resizer-role +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: azuredisk-external-resizer-role + apiGroup: rbac.authorization.k8s.io + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-azuredisk-controller-secret-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-azuredisk-controller-secret-binding +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-azuredisk-controller-secret-role + apiGroup: rbac.authorization.k8s.io diff --git a/deploy/v1.26.7/rbac-csi-azuredisk-node.yaml b/deploy/v1.26.7/rbac-csi-azuredisk-node.yaml new file mode 100644 index 000000000..c77181ee1 --- /dev/null +++ b/deploy/v1.26.7/rbac-csi-azuredisk-node.yaml @@ -0,0 +1,33 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-azuredisk-node-sa + namespace: kube-system + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-azuredisk-node-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-azuredisk-node-secret-binding +subjects: + - kind: ServiceAccount + name: csi-azuredisk-node-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-azuredisk-node-role + apiGroup: rbac.authorization.k8s.io diff --git a/deploy/v1.26.7/rbac-csi-snapshot-controller.yaml b/deploy/v1.26.7/rbac-csi-snapshot-controller.yaml new file mode 100644 index 000000000..03af76542 --- /dev/null +++ b/deploy/v1.26.7/rbac-csi-snapshot-controller.yaml @@ -0,0 +1,78 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-snapshot-controller-sa + namespace: kube-system + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots/status"] + verbs: ["update", "patch"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-binding +subjects: + - kind: ServiceAccount + name: csi-snapshot-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-snapshot-controller-role + apiGroup: rbac.authorization.k8s.io + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-leaderelection-role +rules: + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-leaderelection-binding +subjects: + - kind: ServiceAccount + name: csi-snapshot-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-snapshot-controller-leaderelection-role + apiGroup: rbac.authorization.k8s.io diff --git a/deploy/v1.26.8/crd-csi-snapshot.yaml b/deploy/v1.26.8/crd-csi-snapshot.yaml new file mode 100644 index 000000000..18d97e6b7 --- /dev/null +++ b/deploy/v1.26.8/crd-csi-snapshot.yaml @@ -0,0 +1,659 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/419" + creationTimestamp: null + name: volumesnapshots.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshot + listKind: VolumeSnapshotList + plural: volumesnapshots + shortNames: + - vs + singular: volumesnapshot + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: If a new snapshot needs to be created, this contains the name of the source PVC from which this snapshot was (or will be) created. + jsonPath: .spec.source.persistentVolumeClaimName + name: SourcePVC + type: string + - description: If a snapshot already exists, this contains the name of the existing VolumeSnapshotContent object representing the existing snapshot. + jsonPath: .spec.source.volumeSnapshotContentName + name: SourceSnapshotContent + type: string + - description: Represents the minimum size of volume required to rehydrate from this snapshot. + jsonPath: .status.restoreSize + name: RestoreSize + type: string + - description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot. + jsonPath: .spec.volumeSnapshotClassName + name: SnapshotClass + type: string + - description: Name of the VolumeSnapshotContent object to which the VolumeSnapshot object intends to bind to. Please note that verification of binding actually requires checking both VolumeSnapshot and VolumeSnapshotContent to ensure both are pointing at each other. Binding MUST be verified prior to usage of this object. + jsonPath: .status.boundVolumeSnapshotContentName + name: SnapshotContent + type: string + - description: Timestamp when the point-in-time snapshot was taken by the underlying storage system. + jsonPath: .status.creationTime + name: CreationTime + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshot is a user's request for either creating a point-in-time snapshot of a persistent volume, or binding to a pre-existing snapshot. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: 'spec defines the desired characteristics of a snapshot requested by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots Required.' + properties: + source: + description: source specifies where a snapshot will be created from. This field is immutable after creation. Required. + properties: + persistentVolumeClaimName: + description: persistentVolumeClaimName specifies the name of the PersistentVolumeClaim object representing the volume from which a snapshot should be created. This PVC is assumed to be in the same namespace as the VolumeSnapshot object. This field should be set if the snapshot does not exists, and needs to be created. This field is immutable. + type: string + volumeSnapshotContentName: + description: volumeSnapshotContentName specifies the name of a pre-existing VolumeSnapshotContent object representing an existing volume snapshot. This field should be set if the snapshot already exists and only needs a representation in Kubernetes. This field is immutable. + type: string + type: object + oneOf: + - required: ["persistentVolumeClaimName"] + - required: ["volumeSnapshotContentName"] + volumeSnapshotClassName: + description: 'VolumeSnapshotClassName is the name of the VolumeSnapshotClass requested by the VolumeSnapshot. VolumeSnapshotClassName may be left nil to indicate that the default SnapshotClass should be used. A given cluster may have multiple default Volume SnapshotClasses: one default per CSI Driver. If a VolumeSnapshot does not specify a SnapshotClass, VolumeSnapshotSource will be checked to figure out what the associated CSI Driver is, and the default VolumeSnapshotClass associated with that CSI Driver will be used. If more than one VolumeSnapshotClass exist for a given CSI Driver and more than one have been marked as default, CreateSnapshot will fail and generate an event. Empty string is not allowed for this field.' + type: string + required: + - source + type: object + status: + description: status represents the current information of a snapshot. Consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object. + properties: + boundVolumeSnapshotContentName: + description: 'boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent object to which this VolumeSnapshot object intends to bind to. If not specified, it indicates that the VolumeSnapshot object has not been successfully bound to a VolumeSnapshotContent object yet. NOTE: To avoid possible security issues, consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object.' + type: string + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it may indicate that the creation time of the snapshot is unknown. + format: date-time + type: string + error: + description: error is the last observed error during snapshot creation, if any. This field could be helpful to upper level controllers(i.e., application controller) to decide whether they should continue on waiting for the snapshot to be created based on the type of error reported. The snapshot controller will keep retrying when an error occurrs during the snapshot creation. Upon success, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if the snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + type: string + description: restoreSize represents the minimum size of volume required to create a volume from this snapshot. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: If a new snapshot needs to be created, this contains the name of the source PVC from which this snapshot was (or will be) created. + jsonPath: .spec.source.persistentVolumeClaimName + name: SourcePVC + type: string + - description: If a snapshot already exists, this contains the name of the existing VolumeSnapshotContent object representing the existing snapshot. + jsonPath: .spec.source.volumeSnapshotContentName + name: SourceSnapshotContent + type: string + - description: Represents the minimum size of volume required to rehydrate from this snapshot. + jsonPath: .status.restoreSize + name: RestoreSize + type: string + - description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot. + jsonPath: .spec.volumeSnapshotClassName + name: SnapshotClass + type: string + - description: Name of the VolumeSnapshotContent object to which the VolumeSnapshot object intends to bind to. Please note that verification of binding actually requires checking both VolumeSnapshot and VolumeSnapshotContent to ensure both are pointing at each other. Binding MUST be verified prior to usage of this object. + jsonPath: .status.boundVolumeSnapshotContentName + name: SnapshotContent + type: string + - description: Timestamp when the point-in-time snapshot was taken by the underlying storage system. + jsonPath: .status.creationTime + name: CreationTime + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshot is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshot" + schema: + openAPIV3Schema: + description: VolumeSnapshot is a user's request for either creating a point-in-time snapshot of a persistent volume, or binding to a pre-existing snapshot. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: 'spec defines the desired characteristics of a snapshot requested by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots Required.' + properties: + source: + description: source specifies where a snapshot will be created from. This field is immutable after creation. Required. + properties: + persistentVolumeClaimName: + description: persistentVolumeClaimName specifies the name of the PersistentVolumeClaim object representing the volume from which a snapshot should be created. This PVC is assumed to be in the same namespace as the VolumeSnapshot object. This field should be set if the snapshot does not exists, and needs to be created. This field is immutable. + type: string + volumeSnapshotContentName: + description: volumeSnapshotContentName specifies the name of a pre-existing VolumeSnapshotContent object representing an existing volume snapshot. This field should be set if the snapshot already exists and only needs a representation in Kubernetes. This field is immutable. + type: string + type: object + volumeSnapshotClassName: + description: 'VolumeSnapshotClassName is the name of the VolumeSnapshotClass requested by the VolumeSnapshot. VolumeSnapshotClassName may be left nil to indicate that the default SnapshotClass should be used. A given cluster may have multiple default Volume SnapshotClasses: one default per CSI Driver. If a VolumeSnapshot does not specify a SnapshotClass, VolumeSnapshotSource will be checked to figure out what the associated CSI Driver is, and the default VolumeSnapshotClass associated with that CSI Driver will be used. If more than one VolumeSnapshotClass exist for a given CSI Driver and more than one have been marked as default, CreateSnapshot will fail and generate an event. Empty string is not allowed for this field.' + type: string + required: + - source + type: object + status: + description: status represents the current information of a snapshot. Consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object. + properties: + boundVolumeSnapshotContentName: + description: 'boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent object to which this VolumeSnapshot object intends to bind to. If not specified, it indicates that the VolumeSnapshot object has not been successfully bound to a VolumeSnapshotContent object yet. NOTE: To avoid possible security issues, consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object.' + type: string + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it may indicate that the creation time of the snapshot is unknown. + format: date-time + type: string + error: + description: error is the last observed error during snapshot creation, if any. This field could be helpful to upper level controllers(i.e., application controller) to decide whether they should continue on waiting for the snapshot to be created based on the type of error reported. The snapshot controller will keep retrying when an error occurrs during the snapshot creation. Upon success, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if the snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + type: string + description: restoreSize represents the minimum size of volume required to create a volume from this snapshot. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/419" + creationTimestamp: null + name: volumesnapshotclasses.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotClass + listKind: VolumeSnapshotClassList + plural: volumesnapshotclasses + shortNames: + - vsclass + - vsclasses + singular: volumesnapshotclass + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .driver + name: Driver + type: string + - description: Determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .deletionPolicy + name: DeletionPolicy + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshotClass specifies parameters that a underlying storage system uses when creating a volume snapshot. A specific VolumeSnapshotClass is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses are non-namespaced + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + deletionPolicy: + description: deletionPolicy determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the storage driver that handles this VolumeSnapshotClass. Required. + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + parameters: + additionalProperties: + type: string + description: parameters is a key-value map with storage driver specific parameters for creating snapshots. These values are opaque to Kubernetes. + type: object + required: + - deletionPolicy + - driver + type: object + served: true + storage: true + subresources: {} + - additionalPrinterColumns: + - jsonPath: .driver + name: Driver + type: string + - description: Determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .deletionPolicy + name: DeletionPolicy + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshotClass is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshotClass" + schema: + openAPIV3Schema: + description: VolumeSnapshotClass specifies parameters that a underlying storage system uses when creating a volume snapshot. A specific VolumeSnapshotClass is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses are non-namespaced + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + deletionPolicy: + description: deletionPolicy determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the storage driver that handles this VolumeSnapshotClass. Required. + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + parameters: + additionalProperties: + type: string + description: parameters is a key-value map with storage driver specific parameters for creating snapshots. These values are opaque to Kubernetes. + type: object + required: + - deletionPolicy + - driver + type: object + served: true + storage: false + subresources: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/419" + creationTimestamp: null + name: volumesnapshotcontents.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotContent + listKind: VolumeSnapshotContentList + plural: volumesnapshotcontents + shortNames: + - vsc + - vscs + singular: volumesnapshotcontent + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: Represents the complete size of the snapshot in bytes + jsonPath: .status.restoreSize + name: RestoreSize + type: integer + - description: Determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .spec.deletionPolicy + name: DeletionPolicy + type: string + - description: Name of the CSI driver used to create the physical snapshot on the underlying storage system. + jsonPath: .spec.driver + name: Driver + type: string + - description: Name of the VolumeSnapshotClass to which this snapshot belongs. + jsonPath: .spec.volumeSnapshotClassName + name: VolumeSnapshotClass + type: string + - description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.name + name: VolumeSnapshot + type: string + - description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.namespace + name: VolumeSnapshotNamespace + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshotContent represents the actual "on-disk" snapshot object in the underlying storage system + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: spec defines properties of a VolumeSnapshotContent created by the underlying storage system. Required. + properties: + deletionPolicy: + description: deletionPolicy determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. For dynamically provisioned snapshots, this field will automatically be filled in by the CSI snapshotter sidecar with the "DeletionPolicy" field defined in the corresponding VolumeSnapshotClass. For pre-existing snapshots, users MUST specify this field when creating the VolumeSnapshotContent object. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the CSI driver used to create the physical snapshot on the underlying storage system. This MUST be the same as the name returned by the CSI GetPluginName() call for that driver. Required. + type: string + source: + description: source specifies whether the snapshot is (or should be) dynamically provisioned or already exists, and just requires a Kubernetes object representation. This field is immutable after creation. Required. + properties: + snapshotHandle: + description: snapshotHandle specifies the CSI "snapshot_id" of a pre-existing snapshot on the underlying storage system for which a Kubernetes object representation was (or should be) created. This field is immutable. + type: string + volumeHandle: + description: volumeHandle specifies the CSI "volume_id" of the volume from which a snapshot should be dynamically taken from. This field is immutable. + type: string + type: object + oneOf: + - required: ["snapshotHandle"] + - required: ["volumeHandle"] + volumeSnapshotClassName: + description: name of the VolumeSnapshotClass from which this snapshot was (or will be) created. Note that after provisioning, the VolumeSnapshotClass may be deleted or recreated with different set of values, and as such, should not be referenced post-snapshot creation. + type: string + volumeSnapshotRef: + description: volumeSnapshotRef specifies the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName field must reference to this VolumeSnapshotContent's name for the bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent object, name and namespace of the VolumeSnapshot object MUST be provided for binding to happen. This field is immutable after creation. Required. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + required: + - deletionPolicy + - driver + - source + - volumeSnapshotRef + type: object + status: + description: status represents the current information of a snapshot. + properties: + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it indicates the creation time is unknown. The format of this field is a Unix nanoseconds time encoded as an int64. On Unix, the command `date +%s%N` returns the current time in nanoseconds since 1970-01-01 00:00:00 UTC. + format: int64 + type: integer + error: + description: error is the last observed error during snapshot creation, if any. Upon success after retry, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + description: restoreSize represents the complete size of the snapshot in bytes. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown. + format: int64 + minimum: 0 + type: integer + snapshotHandle: + description: snapshotHandle is the CSI "snapshot_id" of a snapshot on the underlying storage system. If not specified, it indicates that dynamic snapshot creation has either failed or it is still in progress. + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: Represents the complete size of the snapshot in bytes + jsonPath: .status.restoreSize + name: RestoreSize + type: integer + - description: Determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .spec.deletionPolicy + name: DeletionPolicy + type: string + - description: Name of the CSI driver used to create the physical snapshot on the underlying storage system. + jsonPath: .spec.driver + name: Driver + type: string + - description: Name of the VolumeSnapshotClass to which this snapshot belongs. + jsonPath: .spec.volumeSnapshotClassName + name: VolumeSnapshotClass + type: string + - description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.name + name: VolumeSnapshot + type: string + - description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.namespace + name: VolumeSnapshotNamespace + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshotContent is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshotContent" + schema: + openAPIV3Schema: + description: VolumeSnapshotContent represents the actual "on-disk" snapshot object in the underlying storage system + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: spec defines properties of a VolumeSnapshotContent created by the underlying storage system. Required. + properties: + deletionPolicy: + description: deletionPolicy determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. For dynamically provisioned snapshots, this field will automatically be filled in by the CSI snapshotter sidecar with the "DeletionPolicy" field defined in the corresponding VolumeSnapshotClass. For pre-existing snapshots, users MUST specify this field when creating the VolumeSnapshotContent object. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the CSI driver used to create the physical snapshot on the underlying storage system. This MUST be the same as the name returned by the CSI GetPluginName() call for that driver. Required. + type: string + source: + description: source specifies whether the snapshot is (or should be) dynamically provisioned or already exists, and just requires a Kubernetes object representation. This field is immutable after creation. Required. + properties: + snapshotHandle: + description: snapshotHandle specifies the CSI "snapshot_id" of a pre-existing snapshot on the underlying storage system for which a Kubernetes object representation was (or should be) created. This field is immutable. + type: string + volumeHandle: + description: volumeHandle specifies the CSI "volume_id" of the volume from which a snapshot should be dynamically taken from. This field is immutable. + type: string + type: object + volumeSnapshotClassName: + description: name of the VolumeSnapshotClass from which this snapshot was (or will be) created. Note that after provisioning, the VolumeSnapshotClass may be deleted or recreated with different set of values, and as such, should not be referenced post-snapshot creation. + type: string + volumeSnapshotRef: + description: volumeSnapshotRef specifies the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName field must reference to this VolumeSnapshotContent's name for the bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent object, name and namespace of the VolumeSnapshot object MUST be provided for binding to happen. This field is immutable after creation. Required. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + required: + - deletionPolicy + - driver + - source + - volumeSnapshotRef + type: object + status: + description: status represents the current information of a snapshot. + properties: + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it indicates the creation time is unknown. The format of this field is a Unix nanoseconds time encoded as an int64. On Unix, the command `date +%s%N` returns the current time in nanoseconds since 1970-01-01 00:00:00 UTC. + format: int64 + type: integer + error: + description: error is the last observed error during snapshot creation, if any. Upon success after retry, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + description: restoreSize represents the complete size of the snapshot in bytes. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown. + format: int64 + minimum: 0 + type: integer + snapshotHandle: + description: snapshotHandle is the CSI "snapshot_id" of a snapshot on the underlying storage system. If not specified, it indicates that dynamic snapshot creation has either failed or it is still in progress. + type: string + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] \ No newline at end of file diff --git a/deploy/v1.26.8/csi-azuredisk-controller.yaml b/deploy/v1.26.8/csi-azuredisk-controller.yaml new file mode 100644 index 000000000..500492521 --- /dev/null +++ b/deploy/v1.26.8/csi-azuredisk-controller.yaml @@ -0,0 +1,191 @@ +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: csi-azuredisk-controller + namespace: kube-system +spec: + replicas: 2 + selector: + matchLabels: + app: csi-azuredisk-controller + template: + metadata: + labels: + app: csi-azuredisk-controller + spec: + hostNetwork: true + serviceAccountName: csi-azuredisk-controller-sa + nodeSelector: + kubernetes.io/os: linux # add "kubernetes.io/role: master" to run controller on master node + priorityClassName: system-cluster-critical + tolerations: + - key: "node-role.kubernetes.io/master" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/controlplane" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/control-plane" + operator: "Exists" + effect: "NoSchedule" + containers: + - name: csi-provisioner + image: mcr.microsoft.com/oss/kubernetes-csi/csi-provisioner:v3.3.0 + args: + - "--feature-gates=Topology=true" + - "--csi-address=$(ADDRESS)" + - "--v=2" + - "--timeout=30s" + - "--leader-election" + - "--leader-election-namespace=kube-system" + - "--worker-threads=100" + - "--extra-create-metadata=true" + - "--strict-topology=true" + - "--kube-api-qps=50" + - "--kube-api-burst=100" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + resources: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + - name: csi-attacher + image: mcr.microsoft.com/oss/kubernetes-csi/csi-attacher:v4.0.0 + args: + - "-v=2" + - "-csi-address=$(ADDRESS)" + - "-timeout=1200s" + - "-leader-election" + - "--leader-election-namespace=kube-system" + - "-worker-threads=500" + - "-kube-api-qps=50" + - "-kube-api-burst=100" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + resources: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + - name: csi-snapshotter + image: mcr.microsoft.com/oss/kubernetes-csi/csi-snapshotter:v5.0.1 + args: + - "-csi-address=$(ADDRESS)" + - "-leader-election" + - "--leader-election-namespace=kube-system" + - "--v=2" + - "--timeout=600s" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + - name: csi-resizer + image: mcr.microsoft.com/oss/kubernetes-csi/csi-resizer:v1.6.0 + args: + - "-csi-address=$(ADDRESS)" + - "-v=2" + - "-leader-election" + - "--leader-election-namespace=kube-system" + - '-handle-volume-inuse-error=false' + - '-feature-gates=RecoverVolumeExpansionFailure=true' + - "-timeout=240s" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + - name: liveness-probe + image: mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.8.0 + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --health-port=29602 + - --v=2 + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + - name: azuredisk + image: mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.26.8 + imagePullPolicy: IfNotPresent + args: + - "--v=5" + - "--endpoint=$(CSI_ENDPOINT)" + - "--metrics-address=0.0.0.0:29604" + - "--user-agent-suffix=OSS-kubectl" + - "--disable-avset-nodes=false" + - "--allow-empty-cloud-config=false" + ports: + - containerPort: 29602 + name: healthz + protocol: TCP + - containerPort: 29604 + name: metrics + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path + optional: true + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /etc/kubernetes/ + name: azure-cred + resources: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + volumes: + - name: socket-dir + emptyDir: {} + - name: azure-cred + hostPath: + path: /etc/kubernetes/ + type: DirectoryOrCreate diff --git a/deploy/v1.26.8/csi-azuredisk-driver.yaml b/deploy/v1.26.8/csi-azuredisk-driver.yaml new file mode 100644 index 000000000..bffff876a --- /dev/null +++ b/deploy/v1.26.8/csi-azuredisk-driver.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: disk.csi.azure.com + annotations: + csiDriver: v1.26.2 + snapshot: v5.0.1 +spec: + attachRequired: true + podInfoOnMount: false + fsGroupPolicy: File diff --git a/deploy/v1.26.8/csi-azuredisk-node-windows-hostprocess.yaml b/deploy/v1.26.8/csi-azuredisk-node-windows-hostprocess.yaml new file mode 100644 index 000000000..7e44370aa --- /dev/null +++ b/deploy/v1.26.8/csi-azuredisk-node-windows-hostprocess.yaml @@ -0,0 +1,150 @@ +--- +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: csi-azuredisk-node-win + namespace: kube-system +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + selector: + matchLabels: + app: csi-azuredisk-node-win + template: + metadata: + labels: + app: csi-azuredisk-node-win + spec: + serviceAccountName: csi-azuredisk-node-sa + tolerations: + - key: "node.kubernetes.io/os" + operator: "Exists" + effect: "NoSchedule" + nodeSelector: + kubernetes.io/os: windows + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: type + operator: NotIn + values: + - virtual-kubelet + priorityClassName: system-node-critical + securityContext: + windowsOptions: + hostProcess: true + runAsUserName: "NT AUTHORITY\\SYSTEM" + hostNetwork: true + initContainers: + - name: init + image: mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.26.8 + imagePullPolicy: IfNotPresent + command: + - "powershell.exe" + - "-c" + - "New-Item" + - "-ItemType" + - "Directory" + - "-Path" + - "C:\\var\\lib\\kubelet\\plugins\\disk.csi.azure.com\\" + - "-Force" + containers: + - name: liveness-probe + image: mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.8.0 + command: + - "livenessprobe.exe" + args: + - "--csi-address=$(CSI_ENDPOINT)" + - "--probe-timeout=3s" + - "--health-port=29603" + - "--v=2" + env: + - name: CSI_ENDPOINT + value: unix://C:\\var\\lib\\kubelet\\plugins\\disk.csi.azure.com\\csi.sock + resources: + limits: + memory: 150Mi + requests: + cpu: 10m + memory: 40Mi + - name: node-driver-registrar + image: mcr.microsoft.com/oss/kubernetes-csi/csi-node-driver-registrar:v2.6.2 + command: + - "csi-node-driver-registrar.exe" + args: + - "--v=2" + - "--csi-address=$(CSI_ENDPOINT)" + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" + - "--plugin-registration-path=$(PLUGIN_REG_DIR)" + livenessProbe: + exec: + command: + - csi-node-driver-registrar.exe + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --mode=kubelet-registration-probe + initialDelaySeconds: 60 + timeoutSeconds: 30 + env: + - name: CSI_ENDPOINT + value: unix://C:\\var\\lib\\kubelet\\plugins\\disk.csi.azure.com\\csi.sock + - name: DRIVER_REG_SOCK_PATH + value: C:\\var\\lib\\kubelet\\plugins\\disk.csi.azure.com\\csi.sock + - name: PLUGIN_REG_DIR + value: C:\\var\\lib\\kubelet\\plugins_registry\\ + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + resources: + limits: + memory: 150Mi + requests: + cpu: 30m + memory: 40Mi + - name: azuredisk + image: mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.26.8 + imagePullPolicy: IfNotPresent + command: + - "azurediskplugin.exe" + args: + - "--v=5" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--metrics-address=0.0.0.0:29605" + - "--allow-empty-cloud-config=true" + ports: + - containerPort: 29603 + name: healthz + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path-windows + optional: true + - name: CSI_ENDPOINT + value: unix://C:\\var\\lib\\kubelet\\plugins\\disk.csi.azure.com\\csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + resources: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 40Mi diff --git a/deploy/v1.26.8/csi-azuredisk-node-windows.yaml b/deploy/v1.26.8/csi-azuredisk-node-windows.yaml new file mode 100644 index 000000000..d33191e1d --- /dev/null +++ b/deploy/v1.26.8/csi-azuredisk-node-windows.yaml @@ -0,0 +1,192 @@ +--- +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: csi-azuredisk-node-win + namespace: kube-system +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + selector: + matchLabels: + app: csi-azuredisk-node-win + template: + metadata: + labels: + app: csi-azuredisk-node-win + spec: + serviceAccountName: csi-azuredisk-node-sa + tolerations: + - key: "node.kubernetes.io/os" + operator: "Exists" + effect: "NoSchedule" + nodeSelector: + kubernetes.io/os: windows + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: type + operator: NotIn + values: + - virtual-kubelet + priorityClassName: system-node-critical + containers: + - name: liveness-probe + volumeMounts: + - mountPath: C:\csi + name: plugin-dir + image: mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.8.0 + args: + - "--csi-address=$(CSI_ENDPOINT)" + - "--probe-timeout=3s" + - "--health-port=29603" + - "--v=2" + env: + - name: CSI_ENDPOINT + value: unix://C:\\csi\\csi.sock + resources: + limits: + memory: 150Mi + requests: + cpu: 10m + memory: 40Mi + - name: node-driver-registrar + image: mcr.microsoft.com/oss/kubernetes-csi/csi-node-driver-registrar:v2.6.2 + args: + - "--v=2" + - "--csi-address=$(CSI_ENDPOINT)" + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" + livenessProbe: + exec: + command: + - /csi-node-driver-registrar.exe + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --mode=kubelet-registration-probe + initialDelaySeconds: 60 + timeoutSeconds: 30 + env: + - name: CSI_ENDPOINT + value: unix://C:\\csi\\csi.sock + - name: DRIVER_REG_SOCK_PATH + value: C:\\var\\lib\\kubelet\\plugins\\disk.csi.azure.com\\csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - name: kubelet-dir + mountPath: "C:\\var\\lib\\kubelet" + - name: plugin-dir + mountPath: C:\csi + - name: registration-dir + mountPath: C:\registration + resources: + limits: + memory: 150Mi + requests: + cpu: 30m + memory: 40Mi + - name: azuredisk + image: mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.26.8 + imagePullPolicy: IfNotPresent + args: + - "--v=5" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--metrics-address=0.0.0.0:29605" + - "--allow-empty-cloud-config=true" + - "--get-node-info-from-labels=false" + ports: + - containerPort: 29603 + name: healthz + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path-windows + optional: true + - name: CSI_ENDPOINT + value: unix://C:\\csi\\csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + volumeMounts: + - name: kubelet-dir + mountPath: "C:\\var\\lib\\kubelet" + - name: plugin-dir + mountPath: C:\csi + - name: azure-config + mountPath: C:\k + - name: csi-proxy-fs-pipe-v1 + mountPath: \\.\pipe\csi-proxy-filesystem-v1 + - name: csi-proxy-disk-pipe-v1 + mountPath: \\.\pipe\csi-proxy-disk-v1 + - name: csi-proxy-volume-pipe-v1 + mountPath: \\.\pipe\csi-proxy-volume-v1 + # these paths are still included for compatibility, they're used + # only if the node has still the beta version of the CSI proxy + - name: csi-proxy-fs-pipe-v1beta1 + mountPath: \\.\pipe\csi-proxy-filesystem-v1beta1 + - name: csi-proxy-disk-pipe-v1beta2 + mountPath: \\.\pipe\csi-proxy-disk-v1beta2 + - name: csi-proxy-volume-pipe-v1beta2 + mountPath: \\.\pipe\csi-proxy-volume-v1beta2 + resources: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 40Mi + volumes: + - name: csi-proxy-fs-pipe-v1 + hostPath: + path: \\.\pipe\csi-proxy-filesystem-v1 + - name: csi-proxy-disk-pipe-v1 + hostPath: + path: \\.\pipe\csi-proxy-disk-v1 + - name: csi-proxy-volume-pipe-v1 + hostPath: + path: \\.\pipe\csi-proxy-volume-v1 + # these paths are still included for compatibility, they're used + # only if the node has still the beta version of the CSI proxy + - name: csi-proxy-fs-pipe-v1beta1 + hostPath: + path: \\.\pipe\csi-proxy-filesystem-v1beta1 + - name: csi-proxy-disk-pipe-v1beta2 + hostPath: + path: \\.\pipe\csi-proxy-disk-v1beta2 + - name: csi-proxy-volume-pipe-v1beta2 + hostPath: + path: \\.\pipe\csi-proxy-volume-v1beta2 + - name: registration-dir + hostPath: + path: C:\var\lib\kubelet\plugins_registry\ + type: Directory + - name: kubelet-dir + hostPath: + path: C:\var\lib\kubelet\ + type: Directory + - name: plugin-dir + hostPath: + path: C:\var\lib\kubelet\plugins\disk.csi.azure.com\ + type: DirectoryOrCreate + - name: azure-config + hostPath: + path: C:\k + type: DirectoryOrCreate diff --git a/deploy/v1.26.8/csi-azuredisk-node.yaml b/deploy/v1.26.8/csi-azuredisk-node.yaml new file mode 100644 index 000000000..a1fd214e6 --- /dev/null +++ b/deploy/v1.26.8/csi-azuredisk-node.yaml @@ -0,0 +1,172 @@ +--- +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: csi-azuredisk-node + namespace: kube-system +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + selector: + matchLabels: + app: csi-azuredisk-node + template: + metadata: + labels: + app: csi-azuredisk-node + spec: + hostNetwork: true + dnsPolicy: Default + serviceAccountName: csi-azuredisk-node-sa + nodeSelector: + kubernetes.io/os: linux + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: type + operator: NotIn + values: + - virtual-kubelet + priorityClassName: system-node-critical + tolerations: + - operator: "Exists" + containers: + - name: liveness-probe + volumeMounts: + - mountPath: /csi + name: socket-dir + image: mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.8.0 + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --health-port=29603 + - --v=2 + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + - name: node-driver-registrar + image: mcr.microsoft.com/oss/kubernetes-csi/csi-node-driver-registrar:v2.6.2 + args: + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --v=2 + livenessProbe: + exec: + command: + - /csi-node-driver-registrar + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --mode=kubelet-registration-probe + initialDelaySeconds: 30 + timeoutSeconds: 15 + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/disk.csi.azure.com/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: registration-dir + mountPath: /registration + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + - name: azuredisk + image: mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.26.8 + imagePullPolicy: IfNotPresent + args: + - "--v=5" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--metrics-address=0.0.0.0:29605" + - "--enable-perf-optimization=true" + - "--allow-empty-cloud-config=true" + - "--get-node-info-from-labels=false" + ports: + - containerPort: 29603 + name: healthz + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path + optional: true + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + securityContext: + privileged: true + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /var/lib/kubelet/ + mountPropagation: Bidirectional + name: mountpoint-dir + - mountPath: /etc/kubernetes/ + name: azure-cred + - mountPath: /dev + name: device-dir + - mountPath: /sys/bus/scsi/devices + name: sys-devices-dir + - mountPath: /sys/class/ + name: sys-class + resources: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 20Mi + volumes: + - hostPath: + path: /var/lib/kubelet/plugins/disk.csi.azure.com + type: DirectoryOrCreate + name: socket-dir + - hostPath: + path: /var/lib/kubelet/ + type: DirectoryOrCreate + name: mountpoint-dir + - hostPath: + path: /var/lib/kubelet/plugins_registry/ + type: DirectoryOrCreate + name: registration-dir + - hostPath: + path: /etc/kubernetes/ + type: DirectoryOrCreate + name: azure-cred + - hostPath: + path: /dev + type: Directory + name: device-dir + - hostPath: + path: /sys/bus/scsi/devices + type: Directory + name: sys-devices-dir + - hostPath: + path: /sys/class/ + type: Directory + name: sys-class +--- diff --git a/deploy/v1.26.8/csi-snapshot-controller.yaml b/deploy/v1.26.8/csi-snapshot-controller.yaml new file mode 100644 index 000000000..79c7483bb --- /dev/null +++ b/deploy/v1.26.8/csi-snapshot-controller.yaml @@ -0,0 +1,46 @@ +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: csi-snapshot-controller + namespace: kube-system +spec: + replicas: 2 + selector: + matchLabels: + app: csi-snapshot-controller + template: + metadata: + labels: + app: csi-snapshot-controller + spec: + serviceAccountName: csi-snapshot-controller-sa + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + tolerations: + - key: "node-role.kubernetes.io/master" + operator: "Equal" + value: "true" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/controlplane" + operator: "Equal" + value: "true" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/control-plane" + operator: "Equal" + value: "true" + effect: "NoSchedule" + containers: + - name: csi-snapshot-controller + image: mcr.microsoft.com/oss/kubernetes-csi/snapshot-controller:v5.0.1 + args: + - "--v=2" + - "--leader-election=true" + - "--leader-election-namespace=kube-system" + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi diff --git a/deploy/v1.26.8/rbac-csi-azuredisk-controller.yaml b/deploy/v1.26.8/rbac-csi-azuredisk-controller.yaml new file mode 100644 index 000000000..0e59ad15b --- /dev/null +++ b/deploy/v1.26.8/rbac-csi-azuredisk-controller.yaml @@ -0,0 +1,196 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-azuredisk-controller-sa + namespace: kube-system +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-external-provisioner-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["get", "list"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-csi-provisioner-binding +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: azuredisk-external-provisioner-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-external-attacher-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["csi.storage.k8s.io"] + resources: ["csinodeinfos"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments/status"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-csi-attacher-binding +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: azuredisk-external-attacher-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-external-snapshotter-role +rules: + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-csi-snapshotter-binding +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: azuredisk-external-snapshotter-role + apiGroup: rbac.authorization.k8s.io +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-external-resizer-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-csi-resizer-role +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: azuredisk-external-resizer-role + apiGroup: rbac.authorization.k8s.io + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-azuredisk-controller-secret-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-azuredisk-controller-secret-binding +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-azuredisk-controller-secret-role + apiGroup: rbac.authorization.k8s.io diff --git a/deploy/v1.26.8/rbac-csi-azuredisk-node.yaml b/deploy/v1.26.8/rbac-csi-azuredisk-node.yaml new file mode 100644 index 000000000..c77181ee1 --- /dev/null +++ b/deploy/v1.26.8/rbac-csi-azuredisk-node.yaml @@ -0,0 +1,33 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-azuredisk-node-sa + namespace: kube-system + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-azuredisk-node-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-azuredisk-node-secret-binding +subjects: + - kind: ServiceAccount + name: csi-azuredisk-node-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-azuredisk-node-role + apiGroup: rbac.authorization.k8s.io diff --git a/deploy/v1.26.8/rbac-csi-snapshot-controller.yaml b/deploy/v1.26.8/rbac-csi-snapshot-controller.yaml new file mode 100644 index 000000000..03af76542 --- /dev/null +++ b/deploy/v1.26.8/rbac-csi-snapshot-controller.yaml @@ -0,0 +1,78 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-snapshot-controller-sa + namespace: kube-system + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots/status"] + verbs: ["update", "patch"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-binding +subjects: + - kind: ServiceAccount + name: csi-snapshot-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-snapshot-controller-role + apiGroup: rbac.authorization.k8s.io + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-leaderelection-role +rules: + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-leaderelection-binding +subjects: + - kind: ServiceAccount + name: csi-snapshot-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-snapshot-controller-leaderelection-role + apiGroup: rbac.authorization.k8s.io diff --git a/deploy/v1.28.4/crd-csi-snapshot.yaml b/deploy/v1.28.4/crd-csi-snapshot.yaml new file mode 100644 index 000000000..d4b90b266 --- /dev/null +++ b/deploy/v1.28.4/crd-csi-snapshot.yaml @@ -0,0 +1,838 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/665" + creationTimestamp: null + name: volumesnapshots.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshot + listKind: VolumeSnapshotList + plural: volumesnapshots + shortNames: + - vs + singular: volumesnapshot + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: If a new snapshot needs to be created, this contains the name of + the source PVC from which this snapshot was (or will be) created. + jsonPath: .spec.source.persistentVolumeClaimName + name: SourcePVC + type: string + - description: If a snapshot already exists, this contains the name of the existing + VolumeSnapshotContent object representing the existing snapshot. + jsonPath: .spec.source.volumeSnapshotContentName + name: SourceSnapshotContent + type: string + - description: Represents the minimum size of volume required to rehydrate from + this snapshot. + jsonPath: .status.restoreSize + name: RestoreSize + type: string + - description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot. + jsonPath: .spec.volumeSnapshotClassName + name: SnapshotClass + type: string + - description: Name of the VolumeSnapshotContent object to which the VolumeSnapshot + object intends to bind to. Please note that verification of binding actually + requires checking both VolumeSnapshot and VolumeSnapshotContent to ensure + both are pointing at each other. Binding MUST be verified prior to usage of + this object. + jsonPath: .status.boundVolumeSnapshotContentName + name: SnapshotContent + type: string + - description: Timestamp when the point-in-time snapshot was taken by the underlying + storage system. + jsonPath: .status.creationTime + name: CreationTime + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshot is a user's request for either creating a point-in-time + snapshot of a persistent volume, or binding to a pre-existing snapshot. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: 'spec defines the desired characteristics of a snapshot requested + by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots + Required.' + properties: + source: + description: source specifies where a snapshot will be created from. + This field is immutable after creation. Required. + properties: + persistentVolumeClaimName: + description: persistentVolumeClaimName specifies the name of the + PersistentVolumeClaim object representing the volume from which + a snapshot should be created. This PVC is assumed to be in the + same namespace as the VolumeSnapshot object. This field should + be set if the snapshot does not exists, and needs to be created. + This field is immutable. + type: string + volumeSnapshotContentName: + description: volumeSnapshotContentName specifies the name of a + pre-existing VolumeSnapshotContent object representing an existing + volume snapshot. This field should be set if the snapshot already + exists and only needs a representation in Kubernetes. This field + is immutable. + type: string + type: object + oneOf: + - required: ["persistentVolumeClaimName"] + - required: ["volumeSnapshotContentName"] + volumeSnapshotClassName: + description: 'VolumeSnapshotClassName is the name of the VolumeSnapshotClass + requested by the VolumeSnapshot. VolumeSnapshotClassName may be + left nil to indicate that the default SnapshotClass should be used. + A given cluster may have multiple default Volume SnapshotClasses: + one default per CSI Driver. If a VolumeSnapshot does not specify + a SnapshotClass, VolumeSnapshotSource will be checked to figure + out what the associated CSI Driver is, and the default VolumeSnapshotClass + associated with that CSI Driver will be used. If more than one VolumeSnapshotClass + exist for a given CSI Driver and more than one have been marked + as default, CreateSnapshot will fail and generate an event. Empty + string is not allowed for this field.' + type: string + required: + - source + type: object + status: + description: status represents the current information of a snapshot. + Consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent + objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent + point at each other) before using this object. + properties: + boundVolumeSnapshotContentName: + description: 'boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent + object to which this VolumeSnapshot object intends to bind to. If + not specified, it indicates that the VolumeSnapshot object has not + been successfully bound to a VolumeSnapshotContent object yet. NOTE: + To avoid possible security issues, consumers must verify binding + between VolumeSnapshot and VolumeSnapshotContent objects is successful + (by validating that both VolumeSnapshot and VolumeSnapshotContent + point at each other) before using this object.' + type: string + creationTime: + description: creationTime is the timestamp when the point-in-time + snapshot is taken by the underlying storage system. In dynamic snapshot + creation case, this field will be filled in by the snapshot controller + with the "creation_time" value returned from CSI "CreateSnapshot" + gRPC call. For a pre-existing snapshot, this field will be filled + with the "creation_time" value returned from the CSI "ListSnapshots" + gRPC call if the driver supports it. If not specified, it may indicate + that the creation time of the snapshot is unknown. + format: date-time + type: string + error: + description: error is the last observed error during snapshot creation, + if any. This field could be helpful to upper level controllers(i.e., + application controller) to decide whether they should continue on + waiting for the snapshot to be created based on the type of error + reported. The snapshot controller will keep retrying when an error + occurs during the snapshot creation. Upon success, this error field + will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error + during snapshot creation if specified. NOTE: message may be + logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if the snapshot is ready to be used + to restore a volume. In dynamic snapshot creation case, this field + will be filled in by the snapshot controller with the "ready_to_use" + value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing + snapshot, this field will be filled with the "ready_to_use" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it, otherwise, this field will be set to "True". If not specified, + it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + type: string + description: restoreSize represents the minimum size of volume required + to create a volume from this snapshot. In dynamic snapshot creation + case, this field will be filled in by the snapshot controller with + the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. + For a pre-existing snapshot, this field will be filled with the + "size_bytes" value returned from the CSI "ListSnapshots" gRPC call + if the driver supports it. When restoring a volume from this snapshot, + the size of the volume MUST NOT be smaller than the restoreSize + if it is specified, otherwise the restoration will fail. If not + specified, it indicates that the size is unknown. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: If a new snapshot needs to be created, this contains the name of the source PVC from which this snapshot was (or will be) created. + jsonPath: .spec.source.persistentVolumeClaimName + name: SourcePVC + type: string + - description: If a snapshot already exists, this contains the name of the existing VolumeSnapshotContent object representing the existing snapshot. + jsonPath: .spec.source.volumeSnapshotContentName + name: SourceSnapshotContent + type: string + - description: Represents the minimum size of volume required to rehydrate from this snapshot. + jsonPath: .status.restoreSize + name: RestoreSize + type: string + - description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot. + jsonPath: .spec.volumeSnapshotClassName + name: SnapshotClass + type: string + - description: Name of the VolumeSnapshotContent object to which the VolumeSnapshot object intends to bind to. Please note that verification of binding actually requires checking both VolumeSnapshot and VolumeSnapshotContent to ensure both are pointing at each other. Binding MUST be verified prior to usage of this object. + jsonPath: .status.boundVolumeSnapshotContentName + name: SnapshotContent + type: string + - description: Timestamp when the point-in-time snapshot was taken by the underlying storage system. + jsonPath: .status.creationTime + name: CreationTime + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshot is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshot" + schema: + openAPIV3Schema: + description: VolumeSnapshot is a user's request for either creating a point-in-time snapshot of a persistent volume, or binding to a pre-existing snapshot. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: 'spec defines the desired characteristics of a snapshot requested by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots Required.' + properties: + source: + description: source specifies where a snapshot will be created from. This field is immutable after creation. Required. + properties: + persistentVolumeClaimName: + description: persistentVolumeClaimName specifies the name of the PersistentVolumeClaim object representing the volume from which a snapshot should be created. This PVC is assumed to be in the same namespace as the VolumeSnapshot object. This field should be set if the snapshot does not exists, and needs to be created. This field is immutable. + type: string + volumeSnapshotContentName: + description: volumeSnapshotContentName specifies the name of a pre-existing VolumeSnapshotContent object representing an existing volume snapshot. This field should be set if the snapshot already exists and only needs a representation in Kubernetes. This field is immutable. + type: string + type: object + volumeSnapshotClassName: + description: 'VolumeSnapshotClassName is the name of the VolumeSnapshotClass requested by the VolumeSnapshot. VolumeSnapshotClassName may be left nil to indicate that the default SnapshotClass should be used. A given cluster may have multiple default Volume SnapshotClasses: one default per CSI Driver. If a VolumeSnapshot does not specify a SnapshotClass, VolumeSnapshotSource will be checked to figure out what the associated CSI Driver is, and the default VolumeSnapshotClass associated with that CSI Driver will be used. If more than one VolumeSnapshotClass exist for a given CSI Driver and more than one have been marked as default, CreateSnapshot will fail and generate an event. Empty string is not allowed for this field.' + type: string + required: + - source + type: object + status: + description: status represents the current information of a snapshot. Consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object. + properties: + boundVolumeSnapshotContentName: + description: 'boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent object to which this VolumeSnapshot object intends to bind to. If not specified, it indicates that the VolumeSnapshot object has not been successfully bound to a VolumeSnapshotContent object yet. NOTE: To avoid possible security issues, consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object.' + type: string + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it may indicate that the creation time of the snapshot is unknown. + format: date-time + type: string + error: + description: error is the last observed error during snapshot creation, if any. This field could be helpful to upper level controllers(i.e., application controller) to decide whether they should continue on waiting for the snapshot to be created based on the type of error reported. The snapshot controller will keep retrying when an error occurs during the snapshot creation. Upon success, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if the snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + type: string + description: restoreSize represents the minimum size of volume required to create a volume from this snapshot. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + required: + - spec + type: object + served: false + storage: false + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/665" + creationTimestamp: null + name: volumesnapshotclasses.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotClass + listKind: VolumeSnapshotClassList + plural: volumesnapshotclasses + shortNames: + - vsclass + - vsclasses + singular: volumesnapshotclass + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .driver + name: Driver + type: string + - description: Determines whether a VolumeSnapshotContent created through the + VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .deletionPolicy + name: DeletionPolicy + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshotClass specifies parameters that a underlying storage + system uses when creating a volume snapshot. A specific VolumeSnapshotClass + is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses + are non-namespaced + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + deletionPolicy: + description: deletionPolicy determines whether a VolumeSnapshotContent + created through the VolumeSnapshotClass should be deleted when its bound + VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". + "Retain" means that the VolumeSnapshotContent and its physical snapshot + on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent + and its physical snapshot on underlying storage system are deleted. + Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the storage driver that handles this + VolumeSnapshotClass. Required. + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + parameters: + additionalProperties: + type: string + description: parameters is a key-value map with storage driver specific + parameters for creating snapshots. These values are opaque to Kubernetes. + type: object + required: + - deletionPolicy + - driver + type: object + served: true + storage: true + subresources: {} + - additionalPrinterColumns: + - jsonPath: .driver + name: Driver + type: string + - description: Determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .deletionPolicy + name: DeletionPolicy + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshotClass is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshotClass" + schema: + openAPIV3Schema: + description: VolumeSnapshotClass specifies parameters that a underlying storage system uses when creating a volume snapshot. A specific VolumeSnapshotClass is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses are non-namespaced + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + deletionPolicy: + description: deletionPolicy determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the storage driver that handles this VolumeSnapshotClass. Required. + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + parameters: + additionalProperties: + type: string + description: parameters is a key-value map with storage driver specific parameters for creating snapshots. These values are opaque to Kubernetes. + type: object + required: + - deletionPolicy + - driver + type: object + served: false + storage: false + subresources: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/665" + creationTimestamp: null + name: volumesnapshotcontents.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotContent + listKind: VolumeSnapshotContentList + plural: volumesnapshotcontents + shortNames: + - vsc + - vscs + singular: volumesnapshotcontent + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: Represents the complete size of the snapshot in bytes + jsonPath: .status.restoreSize + name: RestoreSize + type: integer + - description: Determines whether this VolumeSnapshotContent and its physical + snapshot on the underlying storage system should be deleted when its bound + VolumeSnapshot is deleted. + jsonPath: .spec.deletionPolicy + name: DeletionPolicy + type: string + - description: Name of the CSI driver used to create the physical snapshot on + the underlying storage system. + jsonPath: .spec.driver + name: Driver + type: string + - description: Name of the VolumeSnapshotClass to which this snapshot belongs. + jsonPath: .spec.volumeSnapshotClassName + name: VolumeSnapshotClass + type: string + - description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent + object is bound. + jsonPath: .spec.volumeSnapshotRef.name + name: VolumeSnapshot + type: string + - description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.namespace + name: VolumeSnapshotNamespace + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshotContent represents the actual "on-disk" snapshot + object in the underlying storage system + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: spec defines properties of a VolumeSnapshotContent created + by the underlying storage system. Required. + properties: + deletionPolicy: + description: deletionPolicy determines whether this VolumeSnapshotContent + and its physical snapshot on the underlying storage system should + be deleted when its bound VolumeSnapshot is deleted. Supported values + are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent + and its physical snapshot on underlying storage system are kept. + "Delete" means that the VolumeSnapshotContent and its physical snapshot + on underlying storage system are deleted. For dynamically provisioned + snapshots, this field will automatically be filled in by the CSI + snapshotter sidecar with the "DeletionPolicy" field defined in the + corresponding VolumeSnapshotClass. For pre-existing snapshots, users + MUST specify this field when creating the VolumeSnapshotContent + object. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the CSI driver used to create the + physical snapshot on the underlying storage system. This MUST be + the same as the name returned by the CSI GetPluginName() call for + that driver. Required. + type: string + source: + description: source specifies whether the snapshot is (or should be) + dynamically provisioned or already exists, and just requires a Kubernetes + object representation. This field is immutable after creation. Required. + properties: + snapshotHandle: + description: snapshotHandle specifies the CSI "snapshot_id" of + a pre-existing snapshot on the underlying storage system for + which a Kubernetes object representation was (or should be) + created. This field is immutable. + type: string + volumeHandle: + description: volumeHandle specifies the CSI "volume_id" of the + volume from which a snapshot should be dynamically taken from. + This field is immutable. + type: string + type: object + oneOf: + - required: ["snapshotHandle"] + - required: ["volumeHandle"] + sourceVolumeMode: + description: SourceVolumeMode is the mode of the volume whose snapshot + is taken. Can be either “Filesystem” or “Block”. If not specified, + it indicates the source volume's mode is unknown. This field is + immutable. This field is an alpha field. + type: string + volumeSnapshotClassName: + description: name of the VolumeSnapshotClass from which this snapshot + was (or will be) created. Note that after provisioning, the VolumeSnapshotClass + may be deleted or recreated with different set of values, and as + such, should not be referenced post-snapshot creation. + type: string + volumeSnapshotRef: + description: volumeSnapshotRef specifies the VolumeSnapshot object + to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName + field must reference to this VolumeSnapshotContent's name for the + bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent + object, name and namespace of the VolumeSnapshot object MUST be + provided for binding to happen. This field is immutable after creation. + Required. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + required: + - deletionPolicy + - driver + - source + - volumeSnapshotRef + type: object + status: + description: status represents the current information of a snapshot. + properties: + creationTime: + description: creationTime is the timestamp when the point-in-time + snapshot is taken by the underlying storage system. In dynamic snapshot + creation case, this field will be filled in by the CSI snapshotter + sidecar with the "creation_time" value returned from CSI "CreateSnapshot" + gRPC call. For a pre-existing snapshot, this field will be filled + with the "creation_time" value returned from the CSI "ListSnapshots" + gRPC call if the driver supports it. If not specified, it indicates + the creation time is unknown. The format of this field is a Unix + nanoseconds time encoded as an int64. On Unix, the command `date + +%s%N` returns the current time in nanoseconds since 1970-01-01 + 00:00:00 UTC. + format: int64 + type: integer + error: + description: error is the last observed error during snapshot creation, + if any. Upon success after retry, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error + during snapshot creation if specified. NOTE: message may be + logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used + to restore a volume. In dynamic snapshot creation case, this field + will be filled in by the CSI snapshotter sidecar with the "ready_to_use" + value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing + snapshot, this field will be filled with the "ready_to_use" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it, otherwise, this field will be set to "True". If not specified, + it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + description: restoreSize represents the complete size of the snapshot + in bytes. In dynamic snapshot creation case, this field will be + filled in by the CSI snapshotter sidecar with the "size_bytes" value + returned from CSI "CreateSnapshot" gRPC call. For a pre-existing + snapshot, this field will be filled with the "size_bytes" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it. When restoring a volume from this snapshot, the size of the + volume MUST NOT be smaller than the restoreSize if it is specified, + otherwise the restoration will fail. If not specified, it indicates + that the size is unknown. + format: int64 + minimum: 0 + type: integer + snapshotHandle: + description: snapshotHandle is the CSI "snapshot_id" of a snapshot + on the underlying storage system. If not specified, it indicates + that dynamic snapshot creation has either failed or it is still + in progress. + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: Represents the complete size of the snapshot in bytes + jsonPath: .status.restoreSize + name: RestoreSize + type: integer + - description: Determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .spec.deletionPolicy + name: DeletionPolicy + type: string + - description: Name of the CSI driver used to create the physical snapshot on the underlying storage system. + jsonPath: .spec.driver + name: Driver + type: string + - description: Name of the VolumeSnapshotClass to which this snapshot belongs. + jsonPath: .spec.volumeSnapshotClassName + name: VolumeSnapshotClass + type: string + - description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.name + name: VolumeSnapshot + type: string + - description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.namespace + name: VolumeSnapshotNamespace + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshotContent is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshotContent" + schema: + openAPIV3Schema: + description: VolumeSnapshotContent represents the actual "on-disk" snapshot object in the underlying storage system + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: spec defines properties of a VolumeSnapshotContent created by the underlying storage system. Required. + properties: + deletionPolicy: + description: deletionPolicy determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. For dynamically provisioned snapshots, this field will automatically be filled in by the CSI snapshotter sidecar with the "DeletionPolicy" field defined in the corresponding VolumeSnapshotClass. For pre-existing snapshots, users MUST specify this field when creating the VolumeSnapshotContent object. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the CSI driver used to create the physical snapshot on the underlying storage system. This MUST be the same as the name returned by the CSI GetPluginName() call for that driver. Required. + type: string + source: + description: source specifies whether the snapshot is (or should be) dynamically provisioned or already exists, and just requires a Kubernetes object representation. This field is immutable after creation. Required. + properties: + snapshotHandle: + description: snapshotHandle specifies the CSI "snapshot_id" of a pre-existing snapshot on the underlying storage system for which a Kubernetes object representation was (or should be) created. This field is immutable. + type: string + volumeHandle: + description: volumeHandle specifies the CSI "volume_id" of the volume from which a snapshot should be dynamically taken from. This field is immutable. + type: string + type: object + volumeSnapshotClassName: + description: name of the VolumeSnapshotClass from which this snapshot was (or will be) created. Note that after provisioning, the VolumeSnapshotClass may be deleted or recreated with different set of values, and as such, should not be referenced post-snapshot creation. + type: string + volumeSnapshotRef: + description: volumeSnapshotRef specifies the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName field must reference to this VolumeSnapshotContent's name for the bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent object, name and namespace of the VolumeSnapshot object MUST be provided for binding to happen. This field is immutable after creation. Required. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + required: + - deletionPolicy + - driver + - source + - volumeSnapshotRef + type: object + status: + description: status represents the current information of a snapshot. + properties: + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it indicates the creation time is unknown. The format of this field is a Unix nanoseconds time encoded as an int64. On Unix, the command `date +%s%N` returns the current time in nanoseconds since 1970-01-01 00:00:00 UTC. + format: int64 + type: integer + error: + description: error is the last observed error during snapshot creation, if any. Upon success after retry, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + description: restoreSize represents the complete size of the snapshot in bytes. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown. + format: int64 + minimum: 0 + type: integer + snapshotHandle: + description: snapshotHandle is the CSI "snapshot_id" of a snapshot on the underlying storage system. If not specified, it indicates that dynamic snapshot creation has either failed or it is still in progress. + type: string + type: object + required: + - spec + type: object + served: false + storage: false + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/deploy/v1.28.4/csi-azuredisk-controller.yaml b/deploy/v1.28.4/csi-azuredisk-controller.yaml new file mode 100644 index 000000000..5a70ebc86 --- /dev/null +++ b/deploy/v1.28.4/csi-azuredisk-controller.yaml @@ -0,0 +1,194 @@ +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: csi-azuredisk-controller + namespace: kube-system +spec: + replicas: 2 + selector: + matchLabels: + app: csi-azuredisk-controller + template: + metadata: + labels: + app: csi-azuredisk-controller + spec: + hostNetwork: true + serviceAccountName: csi-azuredisk-controller-sa + nodeSelector: + kubernetes.io/os: linux # add "kubernetes.io/role: master" to run controller on master node + priorityClassName: system-cluster-critical + securityContext: + seccompProfile: + type: RuntimeDefault + tolerations: + - key: "node-role.kubernetes.io/master" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/controlplane" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/control-plane" + operator: "Exists" + effect: "NoSchedule" + containers: + - name: csi-provisioner + image: mcr.microsoft.com/oss/kubernetes-csi/csi-provisioner:v3.5.0 + args: + - "--feature-gates=Topology=true" + - "--csi-address=$(ADDRESS)" + - "--v=2" + - "--timeout=30s" + - "--leader-election" + - "--leader-election-namespace=kube-system" + - "--worker-threads=100" + - "--extra-create-metadata=true" + - "--strict-topology=true" + - "--kube-api-qps=50" + - "--kube-api-burst=100" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + resources: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + - name: csi-attacher + image: mcr.microsoft.com/oss/kubernetes-csi/csi-attacher:v4.3.0 + args: + - "-v=2" + - "-csi-address=$(ADDRESS)" + - "-timeout=1200s" + - "-leader-election" + - "--leader-election-namespace=kube-system" + - "-worker-threads=1000" + - "-kube-api-qps=200" + - "-kube-api-burst=400" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + resources: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + - name: csi-snapshotter + image: mcr.microsoft.com/oss/kubernetes-csi/csi-snapshotter:v6.2.2 + args: + - "-csi-address=$(ADDRESS)" + - "-leader-election" + - "--leader-election-namespace=kube-system" + - "--v=2" + - "--timeout=1200s" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 20Mi + - name: csi-resizer + image: mcr.microsoft.com/oss/kubernetes-csi/csi-resizer:v1.8.0 + args: + - "-csi-address=$(ADDRESS)" + - "-v=2" + - "-leader-election" + - "--leader-election-namespace=kube-system" + - '-handle-volume-inuse-error=false' + - '-feature-gates=RecoverVolumeExpansionFailure=true' + - "-timeout=240s" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + - name: liveness-probe + image: mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.10.0 + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --health-port=29602 + - --v=2 + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + - name: azuredisk + image: mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.28.4 + imagePullPolicy: IfNotPresent + args: + - "--v=5" + - "--endpoint=$(CSI_ENDPOINT)" + - "--metrics-address=0.0.0.0:29604" + - "--user-agent-suffix=OSS-kubectl" + - "--disable-avset-nodes=false" + - "--allow-empty-cloud-config=false" + ports: + - containerPort: 29602 + name: healthz + protocol: TCP + - containerPort: 29604 + name: metrics + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path + optional: true + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /etc/kubernetes/ + name: azure-cred + resources: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + volumes: + - name: socket-dir + emptyDir: {} + - name: azure-cred + hostPath: + path: /etc/kubernetes/ + type: DirectoryOrCreate diff --git a/deploy/v1.28.4/csi-azuredisk-driver.yaml b/deploy/v1.28.4/csi-azuredisk-driver.yaml new file mode 100644 index 000000000..16d593097 --- /dev/null +++ b/deploy/v1.28.4/csi-azuredisk-driver.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: disk.csi.azure.com + annotations: + csiDriver: v1.28.0 + snapshot: v6.2.1 +spec: + attachRequired: true + podInfoOnMount: false + fsGroupPolicy: File diff --git a/deploy/v1.28.4/csi-azuredisk-node-windows-hostprocess.yaml b/deploy/v1.28.4/csi-azuredisk-node-windows-hostprocess.yaml new file mode 100644 index 000000000..29914bf30 --- /dev/null +++ b/deploy/v1.28.4/csi-azuredisk-node-windows-hostprocess.yaml @@ -0,0 +1,108 @@ +--- +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: csi-azuredisk-node-win + namespace: kube-system +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + selector: + matchLabels: + app: csi-azuredisk-node-win + template: + metadata: + labels: + app: csi-azuredisk-node-win + spec: + serviceAccountName: csi-azuredisk-node-sa + tolerations: + - key: "node.kubernetes.io/os" + operator: "Exists" + effect: "NoSchedule" + nodeSelector: + kubernetes.io/os: windows + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: type + operator: NotIn + values: + - virtual-kubelet + priorityClassName: system-node-critical + securityContext: + windowsOptions: + hostProcess: true + runAsUserName: "NT AUTHORITY\\SYSTEM" + hostNetwork: true + initContainers: + - name: init + image: mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.28.4-windows-hp + imagePullPolicy: IfNotPresent + command: + - "powershell.exe" + - "-c" + - "New-Item -ItemType Directory -Path C:\\var\\lib\\kubelet\\plugins\\disk.csi.azure.com\\ -Force" + containers: + - name: node-driver-registrar + image: mcr.microsoft.com/oss/kubernetes-csi/csi-node-driver-registrar:v2.8.0 + imagePullPolicy: IfNotPresent + command: + - "csi-node-driver-registrar.exe" + args: + - "--v=2" + - "--csi-address=$(CSI_ENDPOINT)" + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" + - "--plugin-registration-path=$(PLUGIN_REG_DIR)" + env: + - name: CSI_ENDPOINT + value: unix://C:\\var\\lib\\kubelet\\plugins\\disk.csi.azure.com\\csi.sock + - name: DRIVER_REG_SOCK_PATH + value: C:\\var\\lib\\kubelet\\plugins\\disk.csi.azure.com\\csi.sock + - name: PLUGIN_REG_DIR + value: C:\\var\\lib\\kubelet\\plugins_registry\\ + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + resources: + limits: + memory: 150Mi + requests: + cpu: 30m + memory: 40Mi + - name: azuredisk + image: mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.28.4-windows-hp + imagePullPolicy: IfNotPresent + command: + - "azurediskplugin.exe" + args: + - --v=5 + - --endpoint=$(CSI_ENDPOINT) + - --nodeid=$(KUBE_NODE_NAME) + - --allow-empty-cloud-config=true + - --enable-windows-host-process=true + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path-windows + optional: true + - name: CSI_ENDPOINT + value: unix://C:\\var\\lib\\kubelet\\plugins\\disk.csi.azure.com\\csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + resources: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 40Mi diff --git a/deploy/v1.28.4/csi-azuredisk-node-windows.yaml b/deploy/v1.28.4/csi-azuredisk-node-windows.yaml new file mode 100644 index 000000000..02c001077 --- /dev/null +++ b/deploy/v1.28.4/csi-azuredisk-node-windows.yaml @@ -0,0 +1,194 @@ +--- +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: csi-azuredisk-node-win + namespace: kube-system +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + selector: + matchLabels: + app: csi-azuredisk-node-win + template: + metadata: + labels: + app: csi-azuredisk-node-win + spec: + serviceAccountName: csi-azuredisk-node-sa + tolerations: + - key: "node.kubernetes.io/os" + operator: "Exists" + effect: "NoSchedule" + nodeSelector: + kubernetes.io/os: windows + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: type + operator: NotIn + values: + - virtual-kubelet + priorityClassName: system-node-critical + securityContext: + seccompProfile: + type: RuntimeDefault + containers: + - name: liveness-probe + volumeMounts: + - mountPath: C:\csi + name: plugin-dir + image: mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.10.0 + args: + - "--csi-address=$(CSI_ENDPOINT)" + - "--probe-timeout=3s" + - "--health-port=29603" + - "--v=2" + env: + - name: CSI_ENDPOINT + value: unix://C:\\csi\\csi.sock + resources: + limits: + memory: 150Mi + requests: + cpu: 10m + memory: 40Mi + - name: node-driver-registrar + image: mcr.microsoft.com/oss/kubernetes-csi/csi-node-driver-registrar:v2.8.0 + args: + - "--v=2" + - "--csi-address=$(CSI_ENDPOINT)" + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" + livenessProbe: + exec: + command: + - /csi-node-driver-registrar.exe + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --mode=kubelet-registration-probe + initialDelaySeconds: 60 + timeoutSeconds: 30 + env: + - name: CSI_ENDPOINT + value: unix://C:\\csi\\csi.sock + - name: DRIVER_REG_SOCK_PATH + value: C:\\var\\lib\\kubelet\\plugins\\disk.csi.azure.com\\csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - name: kubelet-dir + mountPath: "C:\\var\\lib\\kubelet" + - name: plugin-dir + mountPath: C:\csi + - name: registration-dir + mountPath: C:\registration + resources: + limits: + memory: 150Mi + requests: + cpu: 30m + memory: 40Mi + - name: azuredisk + image: mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.28.4 + imagePullPolicy: IfNotPresent + args: + - "--v=5" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--allow-empty-cloud-config=true" + - "--get-node-info-from-labels=false" + ports: + - containerPort: 29603 + name: healthz + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path-windows + optional: true + - name: CSI_ENDPOINT + value: unix://C:\\csi\\csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + volumeMounts: + - name: kubelet-dir + mountPath: "C:\\var\\lib\\kubelet" + - name: plugin-dir + mountPath: C:\csi + - name: azure-config + mountPath: C:\k + - name: csi-proxy-fs-pipe-v1 + mountPath: \\.\pipe\csi-proxy-filesystem-v1 + - name: csi-proxy-disk-pipe-v1 + mountPath: \\.\pipe\csi-proxy-disk-v1 + - name: csi-proxy-volume-pipe-v1 + mountPath: \\.\pipe\csi-proxy-volume-v1 + # these paths are still included for compatibility, they're used + # only if the node has still the beta version of the CSI proxy + - name: csi-proxy-fs-pipe-v1beta1 + mountPath: \\.\pipe\csi-proxy-filesystem-v1beta1 + - name: csi-proxy-disk-pipe-v1beta2 + mountPath: \\.\pipe\csi-proxy-disk-v1beta2 + - name: csi-proxy-volume-pipe-v1beta2 + mountPath: \\.\pipe\csi-proxy-volume-v1beta2 + resources: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 40Mi + volumes: + - name: csi-proxy-fs-pipe-v1 + hostPath: + path: \\.\pipe\csi-proxy-filesystem-v1 + - name: csi-proxy-disk-pipe-v1 + hostPath: + path: \\.\pipe\csi-proxy-disk-v1 + - name: csi-proxy-volume-pipe-v1 + hostPath: + path: \\.\pipe\csi-proxy-volume-v1 + # these paths are still included for compatibility, they're used + # only if the node has still the beta version of the CSI proxy + - name: csi-proxy-fs-pipe-v1beta1 + hostPath: + path: \\.\pipe\csi-proxy-filesystem-v1beta1 + - name: csi-proxy-disk-pipe-v1beta2 + hostPath: + path: \\.\pipe\csi-proxy-disk-v1beta2 + - name: csi-proxy-volume-pipe-v1beta2 + hostPath: + path: \\.\pipe\csi-proxy-volume-v1beta2 + - name: registration-dir + hostPath: + path: C:\var\lib\kubelet\plugins_registry\ + type: Directory + - name: kubelet-dir + hostPath: + path: C:\var\lib\kubelet\ + type: Directory + - name: plugin-dir + hostPath: + path: C:\var\lib\kubelet\plugins\disk.csi.azure.com\ + type: DirectoryOrCreate + - name: azure-config + hostPath: + path: C:\k + type: DirectoryOrCreate diff --git a/deploy/v1.28.4/csi-azuredisk-node.yaml b/deploy/v1.28.4/csi-azuredisk-node.yaml new file mode 100644 index 000000000..1588d30e0 --- /dev/null +++ b/deploy/v1.28.4/csi-azuredisk-node.yaml @@ -0,0 +1,174 @@ +--- +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: csi-azuredisk-node + namespace: kube-system +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + selector: + matchLabels: + app: csi-azuredisk-node + template: + metadata: + labels: + app: csi-azuredisk-node + spec: + hostNetwork: true + dnsPolicy: Default + serviceAccountName: csi-azuredisk-node-sa + nodeSelector: + kubernetes.io/os: linux + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: type + operator: NotIn + values: + - virtual-kubelet + priorityClassName: system-node-critical + securityContext: + seccompProfile: + type: RuntimeDefault + tolerations: + - operator: "Exists" + containers: + - name: liveness-probe + volumeMounts: + - mountPath: /csi + name: socket-dir + image: mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.10.0 + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --health-port=29603 + - --v=2 + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + - name: node-driver-registrar + image: mcr.microsoft.com/oss/kubernetes-csi/csi-node-driver-registrar:v2.8.0 + args: + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --v=2 + livenessProbe: + exec: + command: + - /csi-node-driver-registrar + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --mode=kubelet-registration-probe + initialDelaySeconds: 30 + timeoutSeconds: 15 + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/disk.csi.azure.com/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: registration-dir + mountPath: /registration + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + - name: azuredisk + image: mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.28.4 + imagePullPolicy: IfNotPresent + args: + - "--v=5" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--enable-perf-optimization=true" + - "--allow-empty-cloud-config=true" + - "--get-node-info-from-labels=false" + ports: + - containerPort: 29603 + name: healthz + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path + optional: true + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + securityContext: + privileged: true + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /var/lib/kubelet/ + mountPropagation: Bidirectional + name: mountpoint-dir + - mountPath: /etc/kubernetes/ + name: azure-cred + - mountPath: /dev + name: device-dir + - mountPath: /sys/bus/scsi/devices + name: sys-devices-dir + - mountPath: /sys/class/ + name: sys-class + resources: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 20Mi + volumes: + - hostPath: + path: /var/lib/kubelet/plugins/disk.csi.azure.com + type: DirectoryOrCreate + name: socket-dir + - hostPath: + path: /var/lib/kubelet/ + type: DirectoryOrCreate + name: mountpoint-dir + - hostPath: + path: /var/lib/kubelet/plugins_registry/ + type: DirectoryOrCreate + name: registration-dir + - hostPath: + path: /etc/kubernetes/ + type: DirectoryOrCreate + name: azure-cred + - hostPath: + path: /dev + type: Directory + name: device-dir + - hostPath: + path: /sys/bus/scsi/devices + type: Directory + name: sys-devices-dir + - hostPath: + path: /sys/class/ + type: Directory + name: sys-class +--- diff --git a/deploy/v1.28.4/csi-snapshot-controller.yaml b/deploy/v1.28.4/csi-snapshot-controller.yaml new file mode 100644 index 000000000..8303b1a73 --- /dev/null +++ b/deploy/v1.28.4/csi-snapshot-controller.yaml @@ -0,0 +1,58 @@ +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: csi-snapshot-controller + namespace: kube-system +spec: + replicas: 2 + selector: + matchLabels: + app: csi-snapshot-controller + # the snapshot controller won't be marked as ready if the v1 CRDs are unavailable + # in #504 the snapshot-controller will exit after around 7.5 seconds if it + # can't find the v1 CRDs so this value should be greater than that + minReadySeconds: 15 + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + app: csi-snapshot-controller + spec: + serviceAccountName: csi-snapshot-controller-sa + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + securityContext: + seccompProfile: + type: RuntimeDefault + tolerations: + - key: "node-role.kubernetes.io/master" + operator: "Equal" + value: "true" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/controlplane" + operator: "Equal" + value: "true" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/control-plane" + operator: "Equal" + value: "true" + effect: "NoSchedule" + containers: + - name: csi-snapshot-controller + image: mcr.microsoft.com/oss/kubernetes-csi/snapshot-controller:v6.2.2 + args: + - "--v=2" + - "--leader-election=true" + - "--leader-election-namespace=kube-system" + resources: + limits: + memory: 300Mi + requests: + cpu: 10m + memory: 20Mi diff --git a/deploy/v1.28.4/rbac-csi-azuredisk-controller.yaml b/deploy/v1.28.4/rbac-csi-azuredisk-controller.yaml new file mode 100644 index 000000000..0e59ad15b --- /dev/null +++ b/deploy/v1.28.4/rbac-csi-azuredisk-controller.yaml @@ -0,0 +1,196 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-azuredisk-controller-sa + namespace: kube-system +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-external-provisioner-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["get", "list"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-csi-provisioner-binding +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: azuredisk-external-provisioner-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-external-attacher-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["csi.storage.k8s.io"] + resources: ["csinodeinfos"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments/status"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-csi-attacher-binding +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: azuredisk-external-attacher-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-external-snapshotter-role +rules: + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-csi-snapshotter-binding +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: azuredisk-external-snapshotter-role + apiGroup: rbac.authorization.k8s.io +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-external-resizer-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-csi-resizer-role +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: azuredisk-external-resizer-role + apiGroup: rbac.authorization.k8s.io + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-azuredisk-controller-secret-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-azuredisk-controller-secret-binding +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-azuredisk-controller-secret-role + apiGroup: rbac.authorization.k8s.io diff --git a/deploy/v1.28.4/rbac-csi-azuredisk-node.yaml b/deploy/v1.28.4/rbac-csi-azuredisk-node.yaml new file mode 100644 index 000000000..c77181ee1 --- /dev/null +++ b/deploy/v1.28.4/rbac-csi-azuredisk-node.yaml @@ -0,0 +1,33 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-azuredisk-node-sa + namespace: kube-system + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-azuredisk-node-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-azuredisk-node-secret-binding +subjects: + - kind: ServiceAccount + name: csi-azuredisk-node-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-azuredisk-node-role + apiGroup: rbac.authorization.k8s.io diff --git a/deploy/v1.28.4/rbac-csi-snapshot-controller.yaml b/deploy/v1.28.4/rbac-csi-snapshot-controller.yaml new file mode 100644 index 000000000..03af76542 --- /dev/null +++ b/deploy/v1.28.4/rbac-csi-snapshot-controller.yaml @@ -0,0 +1,78 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-snapshot-controller-sa + namespace: kube-system + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots/status"] + verbs: ["update", "patch"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-binding +subjects: + - kind: ServiceAccount + name: csi-snapshot-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-snapshot-controller-role + apiGroup: rbac.authorization.k8s.io + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-leaderelection-role +rules: + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-leaderelection-binding +subjects: + - kind: ServiceAccount + name: csi-snapshot-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-snapshot-controller-leaderelection-role + apiGroup: rbac.authorization.k8s.io diff --git a/deploy/v1.28.5/crd-csi-snapshot.yaml b/deploy/v1.28.5/crd-csi-snapshot.yaml new file mode 100644 index 000000000..d4b90b266 --- /dev/null +++ b/deploy/v1.28.5/crd-csi-snapshot.yaml @@ -0,0 +1,838 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/665" + creationTimestamp: null + name: volumesnapshots.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshot + listKind: VolumeSnapshotList + plural: volumesnapshots + shortNames: + - vs + singular: volumesnapshot + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: If a new snapshot needs to be created, this contains the name of + the source PVC from which this snapshot was (or will be) created. + jsonPath: .spec.source.persistentVolumeClaimName + name: SourcePVC + type: string + - description: If a snapshot already exists, this contains the name of the existing + VolumeSnapshotContent object representing the existing snapshot. + jsonPath: .spec.source.volumeSnapshotContentName + name: SourceSnapshotContent + type: string + - description: Represents the minimum size of volume required to rehydrate from + this snapshot. + jsonPath: .status.restoreSize + name: RestoreSize + type: string + - description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot. + jsonPath: .spec.volumeSnapshotClassName + name: SnapshotClass + type: string + - description: Name of the VolumeSnapshotContent object to which the VolumeSnapshot + object intends to bind to. Please note that verification of binding actually + requires checking both VolumeSnapshot and VolumeSnapshotContent to ensure + both are pointing at each other. Binding MUST be verified prior to usage of + this object. + jsonPath: .status.boundVolumeSnapshotContentName + name: SnapshotContent + type: string + - description: Timestamp when the point-in-time snapshot was taken by the underlying + storage system. + jsonPath: .status.creationTime + name: CreationTime + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshot is a user's request for either creating a point-in-time + snapshot of a persistent volume, or binding to a pre-existing snapshot. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: 'spec defines the desired characteristics of a snapshot requested + by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots + Required.' + properties: + source: + description: source specifies where a snapshot will be created from. + This field is immutable after creation. Required. + properties: + persistentVolumeClaimName: + description: persistentVolumeClaimName specifies the name of the + PersistentVolumeClaim object representing the volume from which + a snapshot should be created. This PVC is assumed to be in the + same namespace as the VolumeSnapshot object. This field should + be set if the snapshot does not exists, and needs to be created. + This field is immutable. + type: string + volumeSnapshotContentName: + description: volumeSnapshotContentName specifies the name of a + pre-existing VolumeSnapshotContent object representing an existing + volume snapshot. This field should be set if the snapshot already + exists and only needs a representation in Kubernetes. This field + is immutable. + type: string + type: object + oneOf: + - required: ["persistentVolumeClaimName"] + - required: ["volumeSnapshotContentName"] + volumeSnapshotClassName: + description: 'VolumeSnapshotClassName is the name of the VolumeSnapshotClass + requested by the VolumeSnapshot. VolumeSnapshotClassName may be + left nil to indicate that the default SnapshotClass should be used. + A given cluster may have multiple default Volume SnapshotClasses: + one default per CSI Driver. If a VolumeSnapshot does not specify + a SnapshotClass, VolumeSnapshotSource will be checked to figure + out what the associated CSI Driver is, and the default VolumeSnapshotClass + associated with that CSI Driver will be used. If more than one VolumeSnapshotClass + exist for a given CSI Driver and more than one have been marked + as default, CreateSnapshot will fail and generate an event. Empty + string is not allowed for this field.' + type: string + required: + - source + type: object + status: + description: status represents the current information of a snapshot. + Consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent + objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent + point at each other) before using this object. + properties: + boundVolumeSnapshotContentName: + description: 'boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent + object to which this VolumeSnapshot object intends to bind to. If + not specified, it indicates that the VolumeSnapshot object has not + been successfully bound to a VolumeSnapshotContent object yet. NOTE: + To avoid possible security issues, consumers must verify binding + between VolumeSnapshot and VolumeSnapshotContent objects is successful + (by validating that both VolumeSnapshot and VolumeSnapshotContent + point at each other) before using this object.' + type: string + creationTime: + description: creationTime is the timestamp when the point-in-time + snapshot is taken by the underlying storage system. In dynamic snapshot + creation case, this field will be filled in by the snapshot controller + with the "creation_time" value returned from CSI "CreateSnapshot" + gRPC call. For a pre-existing snapshot, this field will be filled + with the "creation_time" value returned from the CSI "ListSnapshots" + gRPC call if the driver supports it. If not specified, it may indicate + that the creation time of the snapshot is unknown. + format: date-time + type: string + error: + description: error is the last observed error during snapshot creation, + if any. This field could be helpful to upper level controllers(i.e., + application controller) to decide whether they should continue on + waiting for the snapshot to be created based on the type of error + reported. The snapshot controller will keep retrying when an error + occurs during the snapshot creation. Upon success, this error field + will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error + during snapshot creation if specified. NOTE: message may be + logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if the snapshot is ready to be used + to restore a volume. In dynamic snapshot creation case, this field + will be filled in by the snapshot controller with the "ready_to_use" + value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing + snapshot, this field will be filled with the "ready_to_use" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it, otherwise, this field will be set to "True". If not specified, + it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + type: string + description: restoreSize represents the minimum size of volume required + to create a volume from this snapshot. In dynamic snapshot creation + case, this field will be filled in by the snapshot controller with + the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. + For a pre-existing snapshot, this field will be filled with the + "size_bytes" value returned from the CSI "ListSnapshots" gRPC call + if the driver supports it. When restoring a volume from this snapshot, + the size of the volume MUST NOT be smaller than the restoreSize + if it is specified, otherwise the restoration will fail. If not + specified, it indicates that the size is unknown. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: If a new snapshot needs to be created, this contains the name of the source PVC from which this snapshot was (or will be) created. + jsonPath: .spec.source.persistentVolumeClaimName + name: SourcePVC + type: string + - description: If a snapshot already exists, this contains the name of the existing VolumeSnapshotContent object representing the existing snapshot. + jsonPath: .spec.source.volumeSnapshotContentName + name: SourceSnapshotContent + type: string + - description: Represents the minimum size of volume required to rehydrate from this snapshot. + jsonPath: .status.restoreSize + name: RestoreSize + type: string + - description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot. + jsonPath: .spec.volumeSnapshotClassName + name: SnapshotClass + type: string + - description: Name of the VolumeSnapshotContent object to which the VolumeSnapshot object intends to bind to. Please note that verification of binding actually requires checking both VolumeSnapshot and VolumeSnapshotContent to ensure both are pointing at each other. Binding MUST be verified prior to usage of this object. + jsonPath: .status.boundVolumeSnapshotContentName + name: SnapshotContent + type: string + - description: Timestamp when the point-in-time snapshot was taken by the underlying storage system. + jsonPath: .status.creationTime + name: CreationTime + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshot is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshot" + schema: + openAPIV3Schema: + description: VolumeSnapshot is a user's request for either creating a point-in-time snapshot of a persistent volume, or binding to a pre-existing snapshot. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: 'spec defines the desired characteristics of a snapshot requested by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots Required.' + properties: + source: + description: source specifies where a snapshot will be created from. This field is immutable after creation. Required. + properties: + persistentVolumeClaimName: + description: persistentVolumeClaimName specifies the name of the PersistentVolumeClaim object representing the volume from which a snapshot should be created. This PVC is assumed to be in the same namespace as the VolumeSnapshot object. This field should be set if the snapshot does not exists, and needs to be created. This field is immutable. + type: string + volumeSnapshotContentName: + description: volumeSnapshotContentName specifies the name of a pre-existing VolumeSnapshotContent object representing an existing volume snapshot. This field should be set if the snapshot already exists and only needs a representation in Kubernetes. This field is immutable. + type: string + type: object + volumeSnapshotClassName: + description: 'VolumeSnapshotClassName is the name of the VolumeSnapshotClass requested by the VolumeSnapshot. VolumeSnapshotClassName may be left nil to indicate that the default SnapshotClass should be used. A given cluster may have multiple default Volume SnapshotClasses: one default per CSI Driver. If a VolumeSnapshot does not specify a SnapshotClass, VolumeSnapshotSource will be checked to figure out what the associated CSI Driver is, and the default VolumeSnapshotClass associated with that CSI Driver will be used. If more than one VolumeSnapshotClass exist for a given CSI Driver and more than one have been marked as default, CreateSnapshot will fail and generate an event. Empty string is not allowed for this field.' + type: string + required: + - source + type: object + status: + description: status represents the current information of a snapshot. Consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object. + properties: + boundVolumeSnapshotContentName: + description: 'boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent object to which this VolumeSnapshot object intends to bind to. If not specified, it indicates that the VolumeSnapshot object has not been successfully bound to a VolumeSnapshotContent object yet. NOTE: To avoid possible security issues, consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object.' + type: string + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it may indicate that the creation time of the snapshot is unknown. + format: date-time + type: string + error: + description: error is the last observed error during snapshot creation, if any. This field could be helpful to upper level controllers(i.e., application controller) to decide whether they should continue on waiting for the snapshot to be created based on the type of error reported. The snapshot controller will keep retrying when an error occurs during the snapshot creation. Upon success, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if the snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + type: string + description: restoreSize represents the minimum size of volume required to create a volume from this snapshot. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + required: + - spec + type: object + served: false + storage: false + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/665" + creationTimestamp: null + name: volumesnapshotclasses.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotClass + listKind: VolumeSnapshotClassList + plural: volumesnapshotclasses + shortNames: + - vsclass + - vsclasses + singular: volumesnapshotclass + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .driver + name: Driver + type: string + - description: Determines whether a VolumeSnapshotContent created through the + VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .deletionPolicy + name: DeletionPolicy + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshotClass specifies parameters that a underlying storage + system uses when creating a volume snapshot. A specific VolumeSnapshotClass + is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses + are non-namespaced + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + deletionPolicy: + description: deletionPolicy determines whether a VolumeSnapshotContent + created through the VolumeSnapshotClass should be deleted when its bound + VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". + "Retain" means that the VolumeSnapshotContent and its physical snapshot + on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent + and its physical snapshot on underlying storage system are deleted. + Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the storage driver that handles this + VolumeSnapshotClass. Required. + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + parameters: + additionalProperties: + type: string + description: parameters is a key-value map with storage driver specific + parameters for creating snapshots. These values are opaque to Kubernetes. + type: object + required: + - deletionPolicy + - driver + type: object + served: true + storage: true + subresources: {} + - additionalPrinterColumns: + - jsonPath: .driver + name: Driver + type: string + - description: Determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .deletionPolicy + name: DeletionPolicy + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshotClass is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshotClass" + schema: + openAPIV3Schema: + description: VolumeSnapshotClass specifies parameters that a underlying storage system uses when creating a volume snapshot. A specific VolumeSnapshotClass is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses are non-namespaced + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + deletionPolicy: + description: deletionPolicy determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the storage driver that handles this VolumeSnapshotClass. Required. + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + parameters: + additionalProperties: + type: string + description: parameters is a key-value map with storage driver specific parameters for creating snapshots. These values are opaque to Kubernetes. + type: object + required: + - deletionPolicy + - driver + type: object + served: false + storage: false + subresources: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/665" + creationTimestamp: null + name: volumesnapshotcontents.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotContent + listKind: VolumeSnapshotContentList + plural: volumesnapshotcontents + shortNames: + - vsc + - vscs + singular: volumesnapshotcontent + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: Represents the complete size of the snapshot in bytes + jsonPath: .status.restoreSize + name: RestoreSize + type: integer + - description: Determines whether this VolumeSnapshotContent and its physical + snapshot on the underlying storage system should be deleted when its bound + VolumeSnapshot is deleted. + jsonPath: .spec.deletionPolicy + name: DeletionPolicy + type: string + - description: Name of the CSI driver used to create the physical snapshot on + the underlying storage system. + jsonPath: .spec.driver + name: Driver + type: string + - description: Name of the VolumeSnapshotClass to which this snapshot belongs. + jsonPath: .spec.volumeSnapshotClassName + name: VolumeSnapshotClass + type: string + - description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent + object is bound. + jsonPath: .spec.volumeSnapshotRef.name + name: VolumeSnapshot + type: string + - description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.namespace + name: VolumeSnapshotNamespace + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshotContent represents the actual "on-disk" snapshot + object in the underlying storage system + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: spec defines properties of a VolumeSnapshotContent created + by the underlying storage system. Required. + properties: + deletionPolicy: + description: deletionPolicy determines whether this VolumeSnapshotContent + and its physical snapshot on the underlying storage system should + be deleted when its bound VolumeSnapshot is deleted. Supported values + are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent + and its physical snapshot on underlying storage system are kept. + "Delete" means that the VolumeSnapshotContent and its physical snapshot + on underlying storage system are deleted. For dynamically provisioned + snapshots, this field will automatically be filled in by the CSI + snapshotter sidecar with the "DeletionPolicy" field defined in the + corresponding VolumeSnapshotClass. For pre-existing snapshots, users + MUST specify this field when creating the VolumeSnapshotContent + object. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the CSI driver used to create the + physical snapshot on the underlying storage system. This MUST be + the same as the name returned by the CSI GetPluginName() call for + that driver. Required. + type: string + source: + description: source specifies whether the snapshot is (or should be) + dynamically provisioned or already exists, and just requires a Kubernetes + object representation. This field is immutable after creation. Required. + properties: + snapshotHandle: + description: snapshotHandle specifies the CSI "snapshot_id" of + a pre-existing snapshot on the underlying storage system for + which a Kubernetes object representation was (or should be) + created. This field is immutable. + type: string + volumeHandle: + description: volumeHandle specifies the CSI "volume_id" of the + volume from which a snapshot should be dynamically taken from. + This field is immutable. + type: string + type: object + oneOf: + - required: ["snapshotHandle"] + - required: ["volumeHandle"] + sourceVolumeMode: + description: SourceVolumeMode is the mode of the volume whose snapshot + is taken. Can be either “Filesystem” or “Block”. If not specified, + it indicates the source volume's mode is unknown. This field is + immutable. This field is an alpha field. + type: string + volumeSnapshotClassName: + description: name of the VolumeSnapshotClass from which this snapshot + was (or will be) created. Note that after provisioning, the VolumeSnapshotClass + may be deleted or recreated with different set of values, and as + such, should not be referenced post-snapshot creation. + type: string + volumeSnapshotRef: + description: volumeSnapshotRef specifies the VolumeSnapshot object + to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName + field must reference to this VolumeSnapshotContent's name for the + bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent + object, name and namespace of the VolumeSnapshot object MUST be + provided for binding to happen. This field is immutable after creation. + Required. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + required: + - deletionPolicy + - driver + - source + - volumeSnapshotRef + type: object + status: + description: status represents the current information of a snapshot. + properties: + creationTime: + description: creationTime is the timestamp when the point-in-time + snapshot is taken by the underlying storage system. In dynamic snapshot + creation case, this field will be filled in by the CSI snapshotter + sidecar with the "creation_time" value returned from CSI "CreateSnapshot" + gRPC call. For a pre-existing snapshot, this field will be filled + with the "creation_time" value returned from the CSI "ListSnapshots" + gRPC call if the driver supports it. If not specified, it indicates + the creation time is unknown. The format of this field is a Unix + nanoseconds time encoded as an int64. On Unix, the command `date + +%s%N` returns the current time in nanoseconds since 1970-01-01 + 00:00:00 UTC. + format: int64 + type: integer + error: + description: error is the last observed error during snapshot creation, + if any. Upon success after retry, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error + during snapshot creation if specified. NOTE: message may be + logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used + to restore a volume. In dynamic snapshot creation case, this field + will be filled in by the CSI snapshotter sidecar with the "ready_to_use" + value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing + snapshot, this field will be filled with the "ready_to_use" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it, otherwise, this field will be set to "True". If not specified, + it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + description: restoreSize represents the complete size of the snapshot + in bytes. In dynamic snapshot creation case, this field will be + filled in by the CSI snapshotter sidecar with the "size_bytes" value + returned from CSI "CreateSnapshot" gRPC call. For a pre-existing + snapshot, this field will be filled with the "size_bytes" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it. When restoring a volume from this snapshot, the size of the + volume MUST NOT be smaller than the restoreSize if it is specified, + otherwise the restoration will fail. If not specified, it indicates + that the size is unknown. + format: int64 + minimum: 0 + type: integer + snapshotHandle: + description: snapshotHandle is the CSI "snapshot_id" of a snapshot + on the underlying storage system. If not specified, it indicates + that dynamic snapshot creation has either failed or it is still + in progress. + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: Represents the complete size of the snapshot in bytes + jsonPath: .status.restoreSize + name: RestoreSize + type: integer + - description: Determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .spec.deletionPolicy + name: DeletionPolicy + type: string + - description: Name of the CSI driver used to create the physical snapshot on the underlying storage system. + jsonPath: .spec.driver + name: Driver + type: string + - description: Name of the VolumeSnapshotClass to which this snapshot belongs. + jsonPath: .spec.volumeSnapshotClassName + name: VolumeSnapshotClass + type: string + - description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.name + name: VolumeSnapshot + type: string + - description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.namespace + name: VolumeSnapshotNamespace + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshotContent is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshotContent" + schema: + openAPIV3Schema: + description: VolumeSnapshotContent represents the actual "on-disk" snapshot object in the underlying storage system + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: spec defines properties of a VolumeSnapshotContent created by the underlying storage system. Required. + properties: + deletionPolicy: + description: deletionPolicy determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. For dynamically provisioned snapshots, this field will automatically be filled in by the CSI snapshotter sidecar with the "DeletionPolicy" field defined in the corresponding VolumeSnapshotClass. For pre-existing snapshots, users MUST specify this field when creating the VolumeSnapshotContent object. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the CSI driver used to create the physical snapshot on the underlying storage system. This MUST be the same as the name returned by the CSI GetPluginName() call for that driver. Required. + type: string + source: + description: source specifies whether the snapshot is (or should be) dynamically provisioned or already exists, and just requires a Kubernetes object representation. This field is immutable after creation. Required. + properties: + snapshotHandle: + description: snapshotHandle specifies the CSI "snapshot_id" of a pre-existing snapshot on the underlying storage system for which a Kubernetes object representation was (or should be) created. This field is immutable. + type: string + volumeHandle: + description: volumeHandle specifies the CSI "volume_id" of the volume from which a snapshot should be dynamically taken from. This field is immutable. + type: string + type: object + volumeSnapshotClassName: + description: name of the VolumeSnapshotClass from which this snapshot was (or will be) created. Note that after provisioning, the VolumeSnapshotClass may be deleted or recreated with different set of values, and as such, should not be referenced post-snapshot creation. + type: string + volumeSnapshotRef: + description: volumeSnapshotRef specifies the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName field must reference to this VolumeSnapshotContent's name for the bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent object, name and namespace of the VolumeSnapshot object MUST be provided for binding to happen. This field is immutable after creation. Required. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + required: + - deletionPolicy + - driver + - source + - volumeSnapshotRef + type: object + status: + description: status represents the current information of a snapshot. + properties: + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it indicates the creation time is unknown. The format of this field is a Unix nanoseconds time encoded as an int64. On Unix, the command `date +%s%N` returns the current time in nanoseconds since 1970-01-01 00:00:00 UTC. + format: int64 + type: integer + error: + description: error is the last observed error during snapshot creation, if any. Upon success after retry, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + description: restoreSize represents the complete size of the snapshot in bytes. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown. + format: int64 + minimum: 0 + type: integer + snapshotHandle: + description: snapshotHandle is the CSI "snapshot_id" of a snapshot on the underlying storage system. If not specified, it indicates that dynamic snapshot creation has either failed or it is still in progress. + type: string + type: object + required: + - spec + type: object + served: false + storage: false + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/deploy/v1.28.5/csi-azuredisk-controller.yaml b/deploy/v1.28.5/csi-azuredisk-controller.yaml new file mode 100644 index 000000000..22a675de5 --- /dev/null +++ b/deploy/v1.28.5/csi-azuredisk-controller.yaml @@ -0,0 +1,194 @@ +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: csi-azuredisk-controller + namespace: kube-system +spec: + replicas: 2 + selector: + matchLabels: + app: csi-azuredisk-controller + template: + metadata: + labels: + app: csi-azuredisk-controller + spec: + hostNetwork: true + serviceAccountName: csi-azuredisk-controller-sa + nodeSelector: + kubernetes.io/os: linux # add "kubernetes.io/role: master" to run controller on master node + priorityClassName: system-cluster-critical + securityContext: + seccompProfile: + type: RuntimeDefault + tolerations: + - key: "node-role.kubernetes.io/master" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/controlplane" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/control-plane" + operator: "Exists" + effect: "NoSchedule" + containers: + - name: csi-provisioner + image: mcr.microsoft.com/oss/kubernetes-csi/csi-provisioner:v3.5.0 + args: + - "--feature-gates=Topology=true" + - "--csi-address=$(ADDRESS)" + - "--v=2" + - "--timeout=30s" + - "--leader-election" + - "--leader-election-namespace=kube-system" + - "--worker-threads=100" + - "--extra-create-metadata=true" + - "--strict-topology=true" + - "--kube-api-qps=50" + - "--kube-api-burst=100" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + resources: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + - name: csi-attacher + image: mcr.microsoft.com/oss/kubernetes-csi/csi-attacher:v4.3.0 + args: + - "-v=2" + - "-csi-address=$(ADDRESS)" + - "-timeout=1200s" + - "-leader-election" + - "--leader-election-namespace=kube-system" + - "-worker-threads=1000" + - "-kube-api-qps=200" + - "-kube-api-burst=400" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + resources: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + - name: csi-snapshotter + image: mcr.microsoft.com/oss/kubernetes-csi/csi-snapshotter:v6.2.2 + args: + - "-csi-address=$(ADDRESS)" + - "-leader-election" + - "--leader-election-namespace=kube-system" + - "--v=2" + - "--timeout=1200s" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 20Mi + - name: csi-resizer + image: mcr.microsoft.com/oss/kubernetes-csi/csi-resizer:v1.8.0 + args: + - "-csi-address=$(ADDRESS)" + - "-v=2" + - "-leader-election" + - "--leader-election-namespace=kube-system" + - '-handle-volume-inuse-error=false' + - '-feature-gates=RecoverVolumeExpansionFailure=true' + - "-timeout=240s" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + - name: liveness-probe + image: mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.10.0 + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --health-port=29602 + - --v=2 + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + - name: azuredisk + image: mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.28.5 + imagePullPolicy: IfNotPresent + args: + - "--v=5" + - "--endpoint=$(CSI_ENDPOINT)" + - "--metrics-address=0.0.0.0:29604" + - "--user-agent-suffix=OSS-kubectl" + - "--disable-avset-nodes=false" + - "--allow-empty-cloud-config=false" + ports: + - containerPort: 29602 + name: healthz + protocol: TCP + - containerPort: 29604 + name: metrics + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path + optional: true + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /etc/kubernetes/ + name: azure-cred + resources: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + volumes: + - name: socket-dir + emptyDir: {} + - name: azure-cred + hostPath: + path: /etc/kubernetes/ + type: DirectoryOrCreate diff --git a/deploy/v1.28.5/csi-azuredisk-driver.yaml b/deploy/v1.28.5/csi-azuredisk-driver.yaml new file mode 100644 index 000000000..16d593097 --- /dev/null +++ b/deploy/v1.28.5/csi-azuredisk-driver.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: disk.csi.azure.com + annotations: + csiDriver: v1.28.0 + snapshot: v6.2.1 +spec: + attachRequired: true + podInfoOnMount: false + fsGroupPolicy: File diff --git a/deploy/v1.28.5/csi-azuredisk-node-windows-hostprocess.yaml b/deploy/v1.28.5/csi-azuredisk-node-windows-hostprocess.yaml new file mode 100644 index 000000000..5cc46d238 --- /dev/null +++ b/deploy/v1.28.5/csi-azuredisk-node-windows-hostprocess.yaml @@ -0,0 +1,108 @@ +--- +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: csi-azuredisk-node-win + namespace: kube-system +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + selector: + matchLabels: + app: csi-azuredisk-node-win + template: + metadata: + labels: + app: csi-azuredisk-node-win + spec: + serviceAccountName: csi-azuredisk-node-sa + tolerations: + - key: "node.kubernetes.io/os" + operator: "Exists" + effect: "NoSchedule" + nodeSelector: + kubernetes.io/os: windows + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: type + operator: NotIn + values: + - virtual-kubelet + priorityClassName: system-node-critical + securityContext: + windowsOptions: + hostProcess: true + runAsUserName: "NT AUTHORITY\\SYSTEM" + hostNetwork: true + initContainers: + - name: init + image: mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.28.5-windows-hp + imagePullPolicy: IfNotPresent + command: + - "powershell.exe" + - "-c" + - "New-Item -ItemType Directory -Path C:\\var\\lib\\kubelet\\plugins\\disk.csi.azure.com\\ -Force" + containers: + - name: node-driver-registrar + image: mcr.microsoft.com/oss/kubernetes-csi/csi-node-driver-registrar:v2.8.0 + imagePullPolicy: IfNotPresent + command: + - "csi-node-driver-registrar.exe" + args: + - "--v=2" + - "--csi-address=$(CSI_ENDPOINT)" + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" + - "--plugin-registration-path=$(PLUGIN_REG_DIR)" + env: + - name: CSI_ENDPOINT + value: unix://C:\\var\\lib\\kubelet\\plugins\\disk.csi.azure.com\\csi.sock + - name: DRIVER_REG_SOCK_PATH + value: C:\\var\\lib\\kubelet\\plugins\\disk.csi.azure.com\\csi.sock + - name: PLUGIN_REG_DIR + value: C:\\var\\lib\\kubelet\\plugins_registry\\ + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + resources: + limits: + memory: 150Mi + requests: + cpu: 30m + memory: 40Mi + - name: azuredisk + image: mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.28.5-windows-hp + imagePullPolicy: IfNotPresent + command: + - "azurediskplugin.exe" + args: + - --v=5 + - --endpoint=$(CSI_ENDPOINT) + - --nodeid=$(KUBE_NODE_NAME) + - --allow-empty-cloud-config=true + - --enable-windows-host-process=true + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path-windows + optional: true + - name: CSI_ENDPOINT + value: unix://C:\\var\\lib\\kubelet\\plugins\\disk.csi.azure.com\\csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + resources: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 40Mi diff --git a/deploy/v1.28.5/csi-azuredisk-node-windows.yaml b/deploy/v1.28.5/csi-azuredisk-node-windows.yaml new file mode 100644 index 000000000..1422f89e1 --- /dev/null +++ b/deploy/v1.28.5/csi-azuredisk-node-windows.yaml @@ -0,0 +1,194 @@ +--- +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: csi-azuredisk-node-win + namespace: kube-system +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + selector: + matchLabels: + app: csi-azuredisk-node-win + template: + metadata: + labels: + app: csi-azuredisk-node-win + spec: + serviceAccountName: csi-azuredisk-node-sa + tolerations: + - key: "node.kubernetes.io/os" + operator: "Exists" + effect: "NoSchedule" + nodeSelector: + kubernetes.io/os: windows + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: type + operator: NotIn + values: + - virtual-kubelet + priorityClassName: system-node-critical + securityContext: + seccompProfile: + type: RuntimeDefault + containers: + - name: liveness-probe + volumeMounts: + - mountPath: C:\csi + name: plugin-dir + image: mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.10.0 + args: + - "--csi-address=$(CSI_ENDPOINT)" + - "--probe-timeout=3s" + - "--health-port=29603" + - "--v=2" + env: + - name: CSI_ENDPOINT + value: unix://C:\\csi\\csi.sock + resources: + limits: + memory: 150Mi + requests: + cpu: 10m + memory: 40Mi + - name: node-driver-registrar + image: mcr.microsoft.com/oss/kubernetes-csi/csi-node-driver-registrar:v2.8.0 + args: + - "--v=2" + - "--csi-address=$(CSI_ENDPOINT)" + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" + livenessProbe: + exec: + command: + - /csi-node-driver-registrar.exe + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --mode=kubelet-registration-probe + initialDelaySeconds: 60 + timeoutSeconds: 30 + env: + - name: CSI_ENDPOINT + value: unix://C:\\csi\\csi.sock + - name: DRIVER_REG_SOCK_PATH + value: C:\\var\\lib\\kubelet\\plugins\\disk.csi.azure.com\\csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - name: kubelet-dir + mountPath: "C:\\var\\lib\\kubelet" + - name: plugin-dir + mountPath: C:\csi + - name: registration-dir + mountPath: C:\registration + resources: + limits: + memory: 150Mi + requests: + cpu: 30m + memory: 40Mi + - name: azuredisk + image: mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.28.5 + imagePullPolicy: IfNotPresent + args: + - "--v=5" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--allow-empty-cloud-config=true" + - "--get-node-info-from-labels=false" + ports: + - containerPort: 29603 + name: healthz + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path-windows + optional: true + - name: CSI_ENDPOINT + value: unix://C:\\csi\\csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + volumeMounts: + - name: kubelet-dir + mountPath: "C:\\var\\lib\\kubelet" + - name: plugin-dir + mountPath: C:\csi + - name: azure-config + mountPath: C:\k + - name: csi-proxy-fs-pipe-v1 + mountPath: \\.\pipe\csi-proxy-filesystem-v1 + - name: csi-proxy-disk-pipe-v1 + mountPath: \\.\pipe\csi-proxy-disk-v1 + - name: csi-proxy-volume-pipe-v1 + mountPath: \\.\pipe\csi-proxy-volume-v1 + # these paths are still included for compatibility, they're used + # only if the node has still the beta version of the CSI proxy + - name: csi-proxy-fs-pipe-v1beta1 + mountPath: \\.\pipe\csi-proxy-filesystem-v1beta1 + - name: csi-proxy-disk-pipe-v1beta2 + mountPath: \\.\pipe\csi-proxy-disk-v1beta2 + - name: csi-proxy-volume-pipe-v1beta2 + mountPath: \\.\pipe\csi-proxy-volume-v1beta2 + resources: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 40Mi + volumes: + - name: csi-proxy-fs-pipe-v1 + hostPath: + path: \\.\pipe\csi-proxy-filesystem-v1 + - name: csi-proxy-disk-pipe-v1 + hostPath: + path: \\.\pipe\csi-proxy-disk-v1 + - name: csi-proxy-volume-pipe-v1 + hostPath: + path: \\.\pipe\csi-proxy-volume-v1 + # these paths are still included for compatibility, they're used + # only if the node has still the beta version of the CSI proxy + - name: csi-proxy-fs-pipe-v1beta1 + hostPath: + path: \\.\pipe\csi-proxy-filesystem-v1beta1 + - name: csi-proxy-disk-pipe-v1beta2 + hostPath: + path: \\.\pipe\csi-proxy-disk-v1beta2 + - name: csi-proxy-volume-pipe-v1beta2 + hostPath: + path: \\.\pipe\csi-proxy-volume-v1beta2 + - name: registration-dir + hostPath: + path: C:\var\lib\kubelet\plugins_registry\ + type: Directory + - name: kubelet-dir + hostPath: + path: C:\var\lib\kubelet\ + type: Directory + - name: plugin-dir + hostPath: + path: C:\var\lib\kubelet\plugins\disk.csi.azure.com\ + type: DirectoryOrCreate + - name: azure-config + hostPath: + path: C:\k + type: DirectoryOrCreate diff --git a/deploy/v1.28.5/csi-azuredisk-node.yaml b/deploy/v1.28.5/csi-azuredisk-node.yaml new file mode 100644 index 000000000..3da03db22 --- /dev/null +++ b/deploy/v1.28.5/csi-azuredisk-node.yaml @@ -0,0 +1,174 @@ +--- +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: csi-azuredisk-node + namespace: kube-system +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + selector: + matchLabels: + app: csi-azuredisk-node + template: + metadata: + labels: + app: csi-azuredisk-node + spec: + hostNetwork: true + dnsPolicy: Default + serviceAccountName: csi-azuredisk-node-sa + nodeSelector: + kubernetes.io/os: linux + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: type + operator: NotIn + values: + - virtual-kubelet + priorityClassName: system-node-critical + securityContext: + seccompProfile: + type: RuntimeDefault + tolerations: + - operator: "Exists" + containers: + - name: liveness-probe + volumeMounts: + - mountPath: /csi + name: socket-dir + image: mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.10.0 + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --health-port=29603 + - --v=2 + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + - name: node-driver-registrar + image: mcr.microsoft.com/oss/kubernetes-csi/csi-node-driver-registrar:v2.8.0 + args: + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --v=2 + livenessProbe: + exec: + command: + - /csi-node-driver-registrar + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --mode=kubelet-registration-probe + initialDelaySeconds: 30 + timeoutSeconds: 15 + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/disk.csi.azure.com/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: registration-dir + mountPath: /registration + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + - name: azuredisk + image: mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.28.5 + imagePullPolicy: IfNotPresent + args: + - "--v=5" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--enable-perf-optimization=true" + - "--allow-empty-cloud-config=true" + - "--get-node-info-from-labels=false" + ports: + - containerPort: 29603 + name: healthz + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path + optional: true + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + securityContext: + privileged: true + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /var/lib/kubelet/ + mountPropagation: Bidirectional + name: mountpoint-dir + - mountPath: /etc/kubernetes/ + name: azure-cred + - mountPath: /dev + name: device-dir + - mountPath: /sys/bus/scsi/devices + name: sys-devices-dir + - mountPath: /sys/class/ + name: sys-class + resources: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 20Mi + volumes: + - hostPath: + path: /var/lib/kubelet/plugins/disk.csi.azure.com + type: DirectoryOrCreate + name: socket-dir + - hostPath: + path: /var/lib/kubelet/ + type: DirectoryOrCreate + name: mountpoint-dir + - hostPath: + path: /var/lib/kubelet/plugins_registry/ + type: DirectoryOrCreate + name: registration-dir + - hostPath: + path: /etc/kubernetes/ + type: DirectoryOrCreate + name: azure-cred + - hostPath: + path: /dev + type: Directory + name: device-dir + - hostPath: + path: /sys/bus/scsi/devices + type: Directory + name: sys-devices-dir + - hostPath: + path: /sys/class/ + type: Directory + name: sys-class +--- diff --git a/deploy/v1.28.5/csi-snapshot-controller.yaml b/deploy/v1.28.5/csi-snapshot-controller.yaml new file mode 100644 index 000000000..8303b1a73 --- /dev/null +++ b/deploy/v1.28.5/csi-snapshot-controller.yaml @@ -0,0 +1,58 @@ +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: csi-snapshot-controller + namespace: kube-system +spec: + replicas: 2 + selector: + matchLabels: + app: csi-snapshot-controller + # the snapshot controller won't be marked as ready if the v1 CRDs are unavailable + # in #504 the snapshot-controller will exit after around 7.5 seconds if it + # can't find the v1 CRDs so this value should be greater than that + minReadySeconds: 15 + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + app: csi-snapshot-controller + spec: + serviceAccountName: csi-snapshot-controller-sa + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + securityContext: + seccompProfile: + type: RuntimeDefault + tolerations: + - key: "node-role.kubernetes.io/master" + operator: "Equal" + value: "true" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/controlplane" + operator: "Equal" + value: "true" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/control-plane" + operator: "Equal" + value: "true" + effect: "NoSchedule" + containers: + - name: csi-snapshot-controller + image: mcr.microsoft.com/oss/kubernetes-csi/snapshot-controller:v6.2.2 + args: + - "--v=2" + - "--leader-election=true" + - "--leader-election-namespace=kube-system" + resources: + limits: + memory: 300Mi + requests: + cpu: 10m + memory: 20Mi diff --git a/deploy/v1.28.5/rbac-csi-azuredisk-controller.yaml b/deploy/v1.28.5/rbac-csi-azuredisk-controller.yaml new file mode 100644 index 000000000..0e59ad15b --- /dev/null +++ b/deploy/v1.28.5/rbac-csi-azuredisk-controller.yaml @@ -0,0 +1,196 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-azuredisk-controller-sa + namespace: kube-system +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-external-provisioner-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["get", "list"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-csi-provisioner-binding +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: azuredisk-external-provisioner-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-external-attacher-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["csi.storage.k8s.io"] + resources: ["csinodeinfos"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments/status"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-csi-attacher-binding +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: azuredisk-external-attacher-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-external-snapshotter-role +rules: + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-csi-snapshotter-binding +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: azuredisk-external-snapshotter-role + apiGroup: rbac.authorization.k8s.io +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-external-resizer-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-csi-resizer-role +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: azuredisk-external-resizer-role + apiGroup: rbac.authorization.k8s.io + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-azuredisk-controller-secret-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-azuredisk-controller-secret-binding +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-azuredisk-controller-secret-role + apiGroup: rbac.authorization.k8s.io diff --git a/deploy/v1.28.5/rbac-csi-azuredisk-node.yaml b/deploy/v1.28.5/rbac-csi-azuredisk-node.yaml new file mode 100644 index 000000000..c77181ee1 --- /dev/null +++ b/deploy/v1.28.5/rbac-csi-azuredisk-node.yaml @@ -0,0 +1,33 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-azuredisk-node-sa + namespace: kube-system + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-azuredisk-node-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-azuredisk-node-secret-binding +subjects: + - kind: ServiceAccount + name: csi-azuredisk-node-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-azuredisk-node-role + apiGroup: rbac.authorization.k8s.io diff --git a/deploy/v1.28.5/rbac-csi-snapshot-controller.yaml b/deploy/v1.28.5/rbac-csi-snapshot-controller.yaml new file mode 100644 index 000000000..03af76542 --- /dev/null +++ b/deploy/v1.28.5/rbac-csi-snapshot-controller.yaml @@ -0,0 +1,78 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-snapshot-controller-sa + namespace: kube-system + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots/status"] + verbs: ["update", "patch"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-binding +subjects: + - kind: ServiceAccount + name: csi-snapshot-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-snapshot-controller-role + apiGroup: rbac.authorization.k8s.io + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-leaderelection-role +rules: + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-leaderelection-binding +subjects: + - kind: ServiceAccount + name: csi-snapshot-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-snapshot-controller-leaderelection-role + apiGroup: rbac.authorization.k8s.io diff --git a/deploy/v1.29.1/crd-csi-snapshot.yaml b/deploy/v1.29.1/crd-csi-snapshot.yaml new file mode 100644 index 000000000..d4b90b266 --- /dev/null +++ b/deploy/v1.29.1/crd-csi-snapshot.yaml @@ -0,0 +1,838 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/665" + creationTimestamp: null + name: volumesnapshots.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshot + listKind: VolumeSnapshotList + plural: volumesnapshots + shortNames: + - vs + singular: volumesnapshot + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: If a new snapshot needs to be created, this contains the name of + the source PVC from which this snapshot was (or will be) created. + jsonPath: .spec.source.persistentVolumeClaimName + name: SourcePVC + type: string + - description: If a snapshot already exists, this contains the name of the existing + VolumeSnapshotContent object representing the existing snapshot. + jsonPath: .spec.source.volumeSnapshotContentName + name: SourceSnapshotContent + type: string + - description: Represents the minimum size of volume required to rehydrate from + this snapshot. + jsonPath: .status.restoreSize + name: RestoreSize + type: string + - description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot. + jsonPath: .spec.volumeSnapshotClassName + name: SnapshotClass + type: string + - description: Name of the VolumeSnapshotContent object to which the VolumeSnapshot + object intends to bind to. Please note that verification of binding actually + requires checking both VolumeSnapshot and VolumeSnapshotContent to ensure + both are pointing at each other. Binding MUST be verified prior to usage of + this object. + jsonPath: .status.boundVolumeSnapshotContentName + name: SnapshotContent + type: string + - description: Timestamp when the point-in-time snapshot was taken by the underlying + storage system. + jsonPath: .status.creationTime + name: CreationTime + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshot is a user's request for either creating a point-in-time + snapshot of a persistent volume, or binding to a pre-existing snapshot. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: 'spec defines the desired characteristics of a snapshot requested + by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots + Required.' + properties: + source: + description: source specifies where a snapshot will be created from. + This field is immutable after creation. Required. + properties: + persistentVolumeClaimName: + description: persistentVolumeClaimName specifies the name of the + PersistentVolumeClaim object representing the volume from which + a snapshot should be created. This PVC is assumed to be in the + same namespace as the VolumeSnapshot object. This field should + be set if the snapshot does not exists, and needs to be created. + This field is immutable. + type: string + volumeSnapshotContentName: + description: volumeSnapshotContentName specifies the name of a + pre-existing VolumeSnapshotContent object representing an existing + volume snapshot. This field should be set if the snapshot already + exists and only needs a representation in Kubernetes. This field + is immutable. + type: string + type: object + oneOf: + - required: ["persistentVolumeClaimName"] + - required: ["volumeSnapshotContentName"] + volumeSnapshotClassName: + description: 'VolumeSnapshotClassName is the name of the VolumeSnapshotClass + requested by the VolumeSnapshot. VolumeSnapshotClassName may be + left nil to indicate that the default SnapshotClass should be used. + A given cluster may have multiple default Volume SnapshotClasses: + one default per CSI Driver. If a VolumeSnapshot does not specify + a SnapshotClass, VolumeSnapshotSource will be checked to figure + out what the associated CSI Driver is, and the default VolumeSnapshotClass + associated with that CSI Driver will be used. If more than one VolumeSnapshotClass + exist for a given CSI Driver and more than one have been marked + as default, CreateSnapshot will fail and generate an event. Empty + string is not allowed for this field.' + type: string + required: + - source + type: object + status: + description: status represents the current information of a snapshot. + Consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent + objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent + point at each other) before using this object. + properties: + boundVolumeSnapshotContentName: + description: 'boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent + object to which this VolumeSnapshot object intends to bind to. If + not specified, it indicates that the VolumeSnapshot object has not + been successfully bound to a VolumeSnapshotContent object yet. NOTE: + To avoid possible security issues, consumers must verify binding + between VolumeSnapshot and VolumeSnapshotContent objects is successful + (by validating that both VolumeSnapshot and VolumeSnapshotContent + point at each other) before using this object.' + type: string + creationTime: + description: creationTime is the timestamp when the point-in-time + snapshot is taken by the underlying storage system. In dynamic snapshot + creation case, this field will be filled in by the snapshot controller + with the "creation_time" value returned from CSI "CreateSnapshot" + gRPC call. For a pre-existing snapshot, this field will be filled + with the "creation_time" value returned from the CSI "ListSnapshots" + gRPC call if the driver supports it. If not specified, it may indicate + that the creation time of the snapshot is unknown. + format: date-time + type: string + error: + description: error is the last observed error during snapshot creation, + if any. This field could be helpful to upper level controllers(i.e., + application controller) to decide whether they should continue on + waiting for the snapshot to be created based on the type of error + reported. The snapshot controller will keep retrying when an error + occurs during the snapshot creation. Upon success, this error field + will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error + during snapshot creation if specified. NOTE: message may be + logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if the snapshot is ready to be used + to restore a volume. In dynamic snapshot creation case, this field + will be filled in by the snapshot controller with the "ready_to_use" + value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing + snapshot, this field will be filled with the "ready_to_use" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it, otherwise, this field will be set to "True". If not specified, + it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + type: string + description: restoreSize represents the minimum size of volume required + to create a volume from this snapshot. In dynamic snapshot creation + case, this field will be filled in by the snapshot controller with + the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. + For a pre-existing snapshot, this field will be filled with the + "size_bytes" value returned from the CSI "ListSnapshots" gRPC call + if the driver supports it. When restoring a volume from this snapshot, + the size of the volume MUST NOT be smaller than the restoreSize + if it is specified, otherwise the restoration will fail. If not + specified, it indicates that the size is unknown. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: If a new snapshot needs to be created, this contains the name of the source PVC from which this snapshot was (or will be) created. + jsonPath: .spec.source.persistentVolumeClaimName + name: SourcePVC + type: string + - description: If a snapshot already exists, this contains the name of the existing VolumeSnapshotContent object representing the existing snapshot. + jsonPath: .spec.source.volumeSnapshotContentName + name: SourceSnapshotContent + type: string + - description: Represents the minimum size of volume required to rehydrate from this snapshot. + jsonPath: .status.restoreSize + name: RestoreSize + type: string + - description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot. + jsonPath: .spec.volumeSnapshotClassName + name: SnapshotClass + type: string + - description: Name of the VolumeSnapshotContent object to which the VolumeSnapshot object intends to bind to. Please note that verification of binding actually requires checking both VolumeSnapshot and VolumeSnapshotContent to ensure both are pointing at each other. Binding MUST be verified prior to usage of this object. + jsonPath: .status.boundVolumeSnapshotContentName + name: SnapshotContent + type: string + - description: Timestamp when the point-in-time snapshot was taken by the underlying storage system. + jsonPath: .status.creationTime + name: CreationTime + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshot is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshot" + schema: + openAPIV3Schema: + description: VolumeSnapshot is a user's request for either creating a point-in-time snapshot of a persistent volume, or binding to a pre-existing snapshot. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: 'spec defines the desired characteristics of a snapshot requested by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots Required.' + properties: + source: + description: source specifies where a snapshot will be created from. This field is immutable after creation. Required. + properties: + persistentVolumeClaimName: + description: persistentVolumeClaimName specifies the name of the PersistentVolumeClaim object representing the volume from which a snapshot should be created. This PVC is assumed to be in the same namespace as the VolumeSnapshot object. This field should be set if the snapshot does not exists, and needs to be created. This field is immutable. + type: string + volumeSnapshotContentName: + description: volumeSnapshotContentName specifies the name of a pre-existing VolumeSnapshotContent object representing an existing volume snapshot. This field should be set if the snapshot already exists and only needs a representation in Kubernetes. This field is immutable. + type: string + type: object + volumeSnapshotClassName: + description: 'VolumeSnapshotClassName is the name of the VolumeSnapshotClass requested by the VolumeSnapshot. VolumeSnapshotClassName may be left nil to indicate that the default SnapshotClass should be used. A given cluster may have multiple default Volume SnapshotClasses: one default per CSI Driver. If a VolumeSnapshot does not specify a SnapshotClass, VolumeSnapshotSource will be checked to figure out what the associated CSI Driver is, and the default VolumeSnapshotClass associated with that CSI Driver will be used. If more than one VolumeSnapshotClass exist for a given CSI Driver and more than one have been marked as default, CreateSnapshot will fail and generate an event. Empty string is not allowed for this field.' + type: string + required: + - source + type: object + status: + description: status represents the current information of a snapshot. Consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object. + properties: + boundVolumeSnapshotContentName: + description: 'boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent object to which this VolumeSnapshot object intends to bind to. If not specified, it indicates that the VolumeSnapshot object has not been successfully bound to a VolumeSnapshotContent object yet. NOTE: To avoid possible security issues, consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object.' + type: string + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it may indicate that the creation time of the snapshot is unknown. + format: date-time + type: string + error: + description: error is the last observed error during snapshot creation, if any. This field could be helpful to upper level controllers(i.e., application controller) to decide whether they should continue on waiting for the snapshot to be created based on the type of error reported. The snapshot controller will keep retrying when an error occurs during the snapshot creation. Upon success, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if the snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + type: string + description: restoreSize represents the minimum size of volume required to create a volume from this snapshot. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + required: + - spec + type: object + served: false + storage: false + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/665" + creationTimestamp: null + name: volumesnapshotclasses.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotClass + listKind: VolumeSnapshotClassList + plural: volumesnapshotclasses + shortNames: + - vsclass + - vsclasses + singular: volumesnapshotclass + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .driver + name: Driver + type: string + - description: Determines whether a VolumeSnapshotContent created through the + VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .deletionPolicy + name: DeletionPolicy + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshotClass specifies parameters that a underlying storage + system uses when creating a volume snapshot. A specific VolumeSnapshotClass + is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses + are non-namespaced + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + deletionPolicy: + description: deletionPolicy determines whether a VolumeSnapshotContent + created through the VolumeSnapshotClass should be deleted when its bound + VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". + "Retain" means that the VolumeSnapshotContent and its physical snapshot + on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent + and its physical snapshot on underlying storage system are deleted. + Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the storage driver that handles this + VolumeSnapshotClass. Required. + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + parameters: + additionalProperties: + type: string + description: parameters is a key-value map with storage driver specific + parameters for creating snapshots. These values are opaque to Kubernetes. + type: object + required: + - deletionPolicy + - driver + type: object + served: true + storage: true + subresources: {} + - additionalPrinterColumns: + - jsonPath: .driver + name: Driver + type: string + - description: Determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .deletionPolicy + name: DeletionPolicy + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshotClass is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshotClass" + schema: + openAPIV3Schema: + description: VolumeSnapshotClass specifies parameters that a underlying storage system uses when creating a volume snapshot. A specific VolumeSnapshotClass is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses are non-namespaced + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + deletionPolicy: + description: deletionPolicy determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the storage driver that handles this VolumeSnapshotClass. Required. + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + parameters: + additionalProperties: + type: string + description: parameters is a key-value map with storage driver specific parameters for creating snapshots. These values are opaque to Kubernetes. + type: object + required: + - deletionPolicy + - driver + type: object + served: false + storage: false + subresources: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/665" + creationTimestamp: null + name: volumesnapshotcontents.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotContent + listKind: VolumeSnapshotContentList + plural: volumesnapshotcontents + shortNames: + - vsc + - vscs + singular: volumesnapshotcontent + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: Represents the complete size of the snapshot in bytes + jsonPath: .status.restoreSize + name: RestoreSize + type: integer + - description: Determines whether this VolumeSnapshotContent and its physical + snapshot on the underlying storage system should be deleted when its bound + VolumeSnapshot is deleted. + jsonPath: .spec.deletionPolicy + name: DeletionPolicy + type: string + - description: Name of the CSI driver used to create the physical snapshot on + the underlying storage system. + jsonPath: .spec.driver + name: Driver + type: string + - description: Name of the VolumeSnapshotClass to which this snapshot belongs. + jsonPath: .spec.volumeSnapshotClassName + name: VolumeSnapshotClass + type: string + - description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent + object is bound. + jsonPath: .spec.volumeSnapshotRef.name + name: VolumeSnapshot + type: string + - description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.namespace + name: VolumeSnapshotNamespace + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshotContent represents the actual "on-disk" snapshot + object in the underlying storage system + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: spec defines properties of a VolumeSnapshotContent created + by the underlying storage system. Required. + properties: + deletionPolicy: + description: deletionPolicy determines whether this VolumeSnapshotContent + and its physical snapshot on the underlying storage system should + be deleted when its bound VolumeSnapshot is deleted. Supported values + are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent + and its physical snapshot on underlying storage system are kept. + "Delete" means that the VolumeSnapshotContent and its physical snapshot + on underlying storage system are deleted. For dynamically provisioned + snapshots, this field will automatically be filled in by the CSI + snapshotter sidecar with the "DeletionPolicy" field defined in the + corresponding VolumeSnapshotClass. For pre-existing snapshots, users + MUST specify this field when creating the VolumeSnapshotContent + object. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the CSI driver used to create the + physical snapshot on the underlying storage system. This MUST be + the same as the name returned by the CSI GetPluginName() call for + that driver. Required. + type: string + source: + description: source specifies whether the snapshot is (or should be) + dynamically provisioned or already exists, and just requires a Kubernetes + object representation. This field is immutable after creation. Required. + properties: + snapshotHandle: + description: snapshotHandle specifies the CSI "snapshot_id" of + a pre-existing snapshot on the underlying storage system for + which a Kubernetes object representation was (or should be) + created. This field is immutable. + type: string + volumeHandle: + description: volumeHandle specifies the CSI "volume_id" of the + volume from which a snapshot should be dynamically taken from. + This field is immutable. + type: string + type: object + oneOf: + - required: ["snapshotHandle"] + - required: ["volumeHandle"] + sourceVolumeMode: + description: SourceVolumeMode is the mode of the volume whose snapshot + is taken. Can be either “Filesystem” or “Block”. If not specified, + it indicates the source volume's mode is unknown. This field is + immutable. This field is an alpha field. + type: string + volumeSnapshotClassName: + description: name of the VolumeSnapshotClass from which this snapshot + was (or will be) created. Note that after provisioning, the VolumeSnapshotClass + may be deleted or recreated with different set of values, and as + such, should not be referenced post-snapshot creation. + type: string + volumeSnapshotRef: + description: volumeSnapshotRef specifies the VolumeSnapshot object + to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName + field must reference to this VolumeSnapshotContent's name for the + bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent + object, name and namespace of the VolumeSnapshot object MUST be + provided for binding to happen. This field is immutable after creation. + Required. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + required: + - deletionPolicy + - driver + - source + - volumeSnapshotRef + type: object + status: + description: status represents the current information of a snapshot. + properties: + creationTime: + description: creationTime is the timestamp when the point-in-time + snapshot is taken by the underlying storage system. In dynamic snapshot + creation case, this field will be filled in by the CSI snapshotter + sidecar with the "creation_time" value returned from CSI "CreateSnapshot" + gRPC call. For a pre-existing snapshot, this field will be filled + with the "creation_time" value returned from the CSI "ListSnapshots" + gRPC call if the driver supports it. If not specified, it indicates + the creation time is unknown. The format of this field is a Unix + nanoseconds time encoded as an int64. On Unix, the command `date + +%s%N` returns the current time in nanoseconds since 1970-01-01 + 00:00:00 UTC. + format: int64 + type: integer + error: + description: error is the last observed error during snapshot creation, + if any. Upon success after retry, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error + during snapshot creation if specified. NOTE: message may be + logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used + to restore a volume. In dynamic snapshot creation case, this field + will be filled in by the CSI snapshotter sidecar with the "ready_to_use" + value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing + snapshot, this field will be filled with the "ready_to_use" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it, otherwise, this field will be set to "True". If not specified, + it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + description: restoreSize represents the complete size of the snapshot + in bytes. In dynamic snapshot creation case, this field will be + filled in by the CSI snapshotter sidecar with the "size_bytes" value + returned from CSI "CreateSnapshot" gRPC call. For a pre-existing + snapshot, this field will be filled with the "size_bytes" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it. When restoring a volume from this snapshot, the size of the + volume MUST NOT be smaller than the restoreSize if it is specified, + otherwise the restoration will fail. If not specified, it indicates + that the size is unknown. + format: int64 + minimum: 0 + type: integer + snapshotHandle: + description: snapshotHandle is the CSI "snapshot_id" of a snapshot + on the underlying storage system. If not specified, it indicates + that dynamic snapshot creation has either failed or it is still + in progress. + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: Represents the complete size of the snapshot in bytes + jsonPath: .status.restoreSize + name: RestoreSize + type: integer + - description: Determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .spec.deletionPolicy + name: DeletionPolicy + type: string + - description: Name of the CSI driver used to create the physical snapshot on the underlying storage system. + jsonPath: .spec.driver + name: Driver + type: string + - description: Name of the VolumeSnapshotClass to which this snapshot belongs. + jsonPath: .spec.volumeSnapshotClassName + name: VolumeSnapshotClass + type: string + - description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.name + name: VolumeSnapshot + type: string + - description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.namespace + name: VolumeSnapshotNamespace + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshotContent is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshotContent" + schema: + openAPIV3Schema: + description: VolumeSnapshotContent represents the actual "on-disk" snapshot object in the underlying storage system + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: spec defines properties of a VolumeSnapshotContent created by the underlying storage system. Required. + properties: + deletionPolicy: + description: deletionPolicy determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. For dynamically provisioned snapshots, this field will automatically be filled in by the CSI snapshotter sidecar with the "DeletionPolicy" field defined in the corresponding VolumeSnapshotClass. For pre-existing snapshots, users MUST specify this field when creating the VolumeSnapshotContent object. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the CSI driver used to create the physical snapshot on the underlying storage system. This MUST be the same as the name returned by the CSI GetPluginName() call for that driver. Required. + type: string + source: + description: source specifies whether the snapshot is (or should be) dynamically provisioned or already exists, and just requires a Kubernetes object representation. This field is immutable after creation. Required. + properties: + snapshotHandle: + description: snapshotHandle specifies the CSI "snapshot_id" of a pre-existing snapshot on the underlying storage system for which a Kubernetes object representation was (or should be) created. This field is immutable. + type: string + volumeHandle: + description: volumeHandle specifies the CSI "volume_id" of the volume from which a snapshot should be dynamically taken from. This field is immutable. + type: string + type: object + volumeSnapshotClassName: + description: name of the VolumeSnapshotClass from which this snapshot was (or will be) created. Note that after provisioning, the VolumeSnapshotClass may be deleted or recreated with different set of values, and as such, should not be referenced post-snapshot creation. + type: string + volumeSnapshotRef: + description: volumeSnapshotRef specifies the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName field must reference to this VolumeSnapshotContent's name for the bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent object, name and namespace of the VolumeSnapshot object MUST be provided for binding to happen. This field is immutable after creation. Required. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + required: + - deletionPolicy + - driver + - source + - volumeSnapshotRef + type: object + status: + description: status represents the current information of a snapshot. + properties: + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it indicates the creation time is unknown. The format of this field is a Unix nanoseconds time encoded as an int64. On Unix, the command `date +%s%N` returns the current time in nanoseconds since 1970-01-01 00:00:00 UTC. + format: int64 + type: integer + error: + description: error is the last observed error during snapshot creation, if any. Upon success after retry, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + description: restoreSize represents the complete size of the snapshot in bytes. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown. + format: int64 + minimum: 0 + type: integer + snapshotHandle: + description: snapshotHandle is the CSI "snapshot_id" of a snapshot on the underlying storage system. If not specified, it indicates that dynamic snapshot creation has either failed or it is still in progress. + type: string + type: object + required: + - spec + type: object + served: false + storage: false + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/deploy/v1.29.1/csi-azuredisk-controller.yaml b/deploy/v1.29.1/csi-azuredisk-controller.yaml new file mode 100644 index 000000000..bcbb4b9ef --- /dev/null +++ b/deploy/v1.29.1/csi-azuredisk-controller.yaml @@ -0,0 +1,194 @@ +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: csi-azuredisk-controller + namespace: kube-system +spec: + replicas: 2 + selector: + matchLabels: + app: csi-azuredisk-controller + template: + metadata: + labels: + app: csi-azuredisk-controller + spec: + hostNetwork: true + serviceAccountName: csi-azuredisk-controller-sa + nodeSelector: + kubernetes.io/os: linux # add "kubernetes.io/role: master" to run controller on master node + priorityClassName: system-cluster-critical + securityContext: + seccompProfile: + type: RuntimeDefault + tolerations: + - key: "node-role.kubernetes.io/master" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/controlplane" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/control-plane" + operator: "Exists" + effect: "NoSchedule" + containers: + - name: csi-provisioner + image: mcr.microsoft.com/oss/kubernetes-csi/csi-provisioner:v3.5.0 + args: + - "--feature-gates=Topology=true,HonorPVReclaimPolicy=true" + - "--csi-address=$(ADDRESS)" + - "--v=2" + - "--timeout=30s" + - "--leader-election" + - "--leader-election-namespace=kube-system" + - "--worker-threads=100" + - "--extra-create-metadata=true" + - "--strict-topology=true" + - "--kube-api-qps=50" + - "--kube-api-burst=100" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + resources: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + - name: csi-attacher + image: mcr.microsoft.com/oss/kubernetes-csi/csi-attacher:v4.3.0 + args: + - "-v=2" + - "-csi-address=$(ADDRESS)" + - "-timeout=1200s" + - "-leader-election" + - "--leader-election-namespace=kube-system" + - "-worker-threads=1000" + - "-kube-api-qps=200" + - "-kube-api-burst=400" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + resources: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + - name: csi-snapshotter + image: mcr.microsoft.com/oss/kubernetes-csi/csi-snapshotter:v6.2.2 + args: + - "-csi-address=$(ADDRESS)" + - "-leader-election" + - "--leader-election-namespace=kube-system" + - "--v=2" + - "--timeout=1200s" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 20Mi + - name: csi-resizer + image: mcr.microsoft.com/oss/kubernetes-csi/csi-resizer:v1.8.0 + args: + - "-csi-address=$(ADDRESS)" + - "-v=2" + - "-leader-election" + - "--leader-election-namespace=kube-system" + - '-handle-volume-inuse-error=false' + - '-feature-gates=RecoverVolumeExpansionFailure=true' + - "-timeout=240s" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + - name: liveness-probe + image: mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.10.0 + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --health-port=29602 + - --v=2 + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + - name: azuredisk + image: mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.29.1 + imagePullPolicy: IfNotPresent + args: + - "--v=5" + - "--endpoint=$(CSI_ENDPOINT)" + - "--metrics-address=0.0.0.0:29604" + - "--user-agent-suffix=OSS-kubectl" + - "--disable-avset-nodes=false" + - "--allow-empty-cloud-config=false" + ports: + - containerPort: 29602 + name: healthz + protocol: TCP + - containerPort: 29604 + name: metrics + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path + optional: true + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /etc/kubernetes/ + name: azure-cred + resources: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + volumes: + - name: socket-dir + emptyDir: {} + - name: azure-cred + hostPath: + path: /etc/kubernetes/ + type: DirectoryOrCreate diff --git a/deploy/v1.29.1/csi-azuredisk-driver.yaml b/deploy/v1.29.1/csi-azuredisk-driver.yaml new file mode 100644 index 000000000..0ff59b2f2 --- /dev/null +++ b/deploy/v1.29.1/csi-azuredisk-driver.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: disk.csi.azure.com + annotations: + csiDriver: v1.29.0 + snapshot: v6.2.1 +spec: + attachRequired: true + podInfoOnMount: false + fsGroupPolicy: File diff --git a/deploy/v1.29.1/csi-azuredisk-node-windows-hostprocess.yaml b/deploy/v1.29.1/csi-azuredisk-node-windows-hostprocess.yaml new file mode 100644 index 000000000..7fe606490 --- /dev/null +++ b/deploy/v1.29.1/csi-azuredisk-node-windows-hostprocess.yaml @@ -0,0 +1,108 @@ +--- +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: csi-azuredisk-node-win + namespace: kube-system +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + selector: + matchLabels: + app: csi-azuredisk-node-win + template: + metadata: + labels: + app: csi-azuredisk-node-win + spec: + serviceAccountName: csi-azuredisk-node-sa + tolerations: + - key: "node.kubernetes.io/os" + operator: "Exists" + effect: "NoSchedule" + nodeSelector: + kubernetes.io/os: windows + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: type + operator: NotIn + values: + - virtual-kubelet + priorityClassName: system-node-critical + securityContext: + windowsOptions: + hostProcess: true + runAsUserName: "NT AUTHORITY\\SYSTEM" + hostNetwork: true + initContainers: + - name: init + image: mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.29.1.1-windows-hp + imagePullPolicy: IfNotPresent + command: + - "powershell.exe" + - "-c" + - "New-Item -ItemType Directory -Path C:\\var\\lib\\kubelet\\plugins\\disk.csi.azure.com\\ -Force" + containers: + - name: node-driver-registrar + image: mcr.microsoft.com/oss/kubernetes-csi/csi-node-driver-registrar:v2.8.0 + imagePullPolicy: IfNotPresent + command: + - "csi-node-driver-registrar.exe" + args: + - "--v=2" + - "--csi-address=$(CSI_ENDPOINT)" + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" + - "--plugin-registration-path=$(PLUGIN_REG_DIR)" + env: + - name: CSI_ENDPOINT + value: unix://C:\\var\\lib\\kubelet\\plugins\\disk.csi.azure.com\\csi.sock + - name: DRIVER_REG_SOCK_PATH + value: C:\\var\\lib\\kubelet\\plugins\\disk.csi.azure.com\\csi.sock + - name: PLUGIN_REG_DIR + value: C:\\var\\lib\\kubelet\\plugins_registry\\ + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + resources: + limits: + memory: 150Mi + requests: + cpu: 30m + memory: 40Mi + - name: azuredisk + image: mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.29.1.1-windows-hp + imagePullPolicy: IfNotPresent + command: + - "azurediskplugin.exe" + args: + - --v=5 + - --endpoint=$(CSI_ENDPOINT) + - --nodeid=$(KUBE_NODE_NAME) + - --allow-empty-cloud-config=true + - --enable-windows-host-process=true + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path-windows + optional: true + - name: CSI_ENDPOINT + value: unix://C:\\var\\lib\\kubelet\\plugins\\disk.csi.azure.com\\csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + resources: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 40Mi diff --git a/deploy/v1.29.1/csi-azuredisk-node-windows.yaml b/deploy/v1.29.1/csi-azuredisk-node-windows.yaml new file mode 100644 index 000000000..fe977f8b3 --- /dev/null +++ b/deploy/v1.29.1/csi-azuredisk-node-windows.yaml @@ -0,0 +1,194 @@ +--- +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: csi-azuredisk-node-win + namespace: kube-system +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + selector: + matchLabels: + app: csi-azuredisk-node-win + template: + metadata: + labels: + app: csi-azuredisk-node-win + spec: + serviceAccountName: csi-azuredisk-node-sa + tolerations: + - key: "node.kubernetes.io/os" + operator: "Exists" + effect: "NoSchedule" + nodeSelector: + kubernetes.io/os: windows + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: type + operator: NotIn + values: + - virtual-kubelet + priorityClassName: system-node-critical + securityContext: + seccompProfile: + type: RuntimeDefault + containers: + - name: liveness-probe + volumeMounts: + - mountPath: C:\csi + name: plugin-dir + image: mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.10.0 + args: + - "--csi-address=$(CSI_ENDPOINT)" + - "--probe-timeout=3s" + - "--health-port=29603" + - "--v=2" + env: + - name: CSI_ENDPOINT + value: unix://C:\\csi\\csi.sock + resources: + limits: + memory: 150Mi + requests: + cpu: 10m + memory: 40Mi + - name: node-driver-registrar + image: mcr.microsoft.com/oss/kubernetes-csi/csi-node-driver-registrar:v2.8.0 + args: + - "--v=2" + - "--csi-address=$(CSI_ENDPOINT)" + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" + livenessProbe: + exec: + command: + - /csi-node-driver-registrar.exe + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --mode=kubelet-registration-probe + initialDelaySeconds: 60 + timeoutSeconds: 30 + env: + - name: CSI_ENDPOINT + value: unix://C:\\csi\\csi.sock + - name: DRIVER_REG_SOCK_PATH + value: C:\\var\\lib\\kubelet\\plugins\\disk.csi.azure.com\\csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - name: kubelet-dir + mountPath: "C:\\var\\lib\\kubelet" + - name: plugin-dir + mountPath: C:\csi + - name: registration-dir + mountPath: C:\registration + resources: + limits: + memory: 150Mi + requests: + cpu: 30m + memory: 40Mi + - name: azuredisk + image: mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.29.1 + imagePullPolicy: IfNotPresent + args: + - "--v=5" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--allow-empty-cloud-config=true" + - "--get-node-info-from-labels=false" + ports: + - containerPort: 29603 + name: healthz + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path-windows + optional: true + - name: CSI_ENDPOINT + value: unix://C:\\csi\\csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + volumeMounts: + - name: kubelet-dir + mountPath: "C:\\var\\lib\\kubelet" + - name: plugin-dir + mountPath: C:\csi + - name: azure-config + mountPath: C:\k + - name: csi-proxy-fs-pipe-v1 + mountPath: \\.\pipe\csi-proxy-filesystem-v1 + - name: csi-proxy-disk-pipe-v1 + mountPath: \\.\pipe\csi-proxy-disk-v1 + - name: csi-proxy-volume-pipe-v1 + mountPath: \\.\pipe\csi-proxy-volume-v1 + # these paths are still included for compatibility, they're used + # only if the node has still the beta version of the CSI proxy + - name: csi-proxy-fs-pipe-v1beta1 + mountPath: \\.\pipe\csi-proxy-filesystem-v1beta1 + - name: csi-proxy-disk-pipe-v1beta2 + mountPath: \\.\pipe\csi-proxy-disk-v1beta2 + - name: csi-proxy-volume-pipe-v1beta2 + mountPath: \\.\pipe\csi-proxy-volume-v1beta2 + resources: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 40Mi + volumes: + - name: csi-proxy-fs-pipe-v1 + hostPath: + path: \\.\pipe\csi-proxy-filesystem-v1 + - name: csi-proxy-disk-pipe-v1 + hostPath: + path: \\.\pipe\csi-proxy-disk-v1 + - name: csi-proxy-volume-pipe-v1 + hostPath: + path: \\.\pipe\csi-proxy-volume-v1 + # these paths are still included for compatibility, they're used + # only if the node has still the beta version of the CSI proxy + - name: csi-proxy-fs-pipe-v1beta1 + hostPath: + path: \\.\pipe\csi-proxy-filesystem-v1beta1 + - name: csi-proxy-disk-pipe-v1beta2 + hostPath: + path: \\.\pipe\csi-proxy-disk-v1beta2 + - name: csi-proxy-volume-pipe-v1beta2 + hostPath: + path: \\.\pipe\csi-proxy-volume-v1beta2 + - name: registration-dir + hostPath: + path: C:\var\lib\kubelet\plugins_registry\ + type: Directory + - name: kubelet-dir + hostPath: + path: C:\var\lib\kubelet\ + type: Directory + - name: plugin-dir + hostPath: + path: C:\var\lib\kubelet\plugins\disk.csi.azure.com\ + type: DirectoryOrCreate + - name: azure-config + hostPath: + path: C:\k + type: DirectoryOrCreate diff --git a/deploy/v1.29.1/csi-azuredisk-node.yaml b/deploy/v1.29.1/csi-azuredisk-node.yaml new file mode 100644 index 000000000..3a0dac093 --- /dev/null +++ b/deploy/v1.29.1/csi-azuredisk-node.yaml @@ -0,0 +1,174 @@ +--- +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: csi-azuredisk-node + namespace: kube-system +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + selector: + matchLabels: + app: csi-azuredisk-node + template: + metadata: + labels: + app: csi-azuredisk-node + spec: + hostNetwork: true + dnsPolicy: Default + serviceAccountName: csi-azuredisk-node-sa + nodeSelector: + kubernetes.io/os: linux + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: type + operator: NotIn + values: + - virtual-kubelet + priorityClassName: system-node-critical + securityContext: + seccompProfile: + type: RuntimeDefault + tolerations: + - operator: "Exists" + containers: + - name: liveness-probe + volumeMounts: + - mountPath: /csi + name: socket-dir + image: mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.10.0 + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --health-port=29603 + - --v=2 + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + - name: node-driver-registrar + image: mcr.microsoft.com/oss/kubernetes-csi/csi-node-driver-registrar:v2.8.0 + args: + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --v=2 + livenessProbe: + exec: + command: + - /csi-node-driver-registrar + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --mode=kubelet-registration-probe + initialDelaySeconds: 30 + timeoutSeconds: 15 + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/disk.csi.azure.com/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: registration-dir + mountPath: /registration + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + - name: azuredisk + image: mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.29.1 + imagePullPolicy: IfNotPresent + args: + - "--v=5" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--enable-perf-optimization=true" + - "--allow-empty-cloud-config=true" + - "--get-node-info-from-labels=false" + ports: + - containerPort: 29603 + name: healthz + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path + optional: true + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + securityContext: + privileged: true + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /var/lib/kubelet/ + mountPropagation: Bidirectional + name: mountpoint-dir + - mountPath: /etc/kubernetes/ + name: azure-cred + - mountPath: /dev + name: device-dir + - mountPath: /sys/bus/scsi/devices + name: sys-devices-dir + - mountPath: /sys/class/ + name: sys-class + resources: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 20Mi + volumes: + - hostPath: + path: /var/lib/kubelet/plugins/disk.csi.azure.com + type: DirectoryOrCreate + name: socket-dir + - hostPath: + path: /var/lib/kubelet/ + type: DirectoryOrCreate + name: mountpoint-dir + - hostPath: + path: /var/lib/kubelet/plugins_registry/ + type: DirectoryOrCreate + name: registration-dir + - hostPath: + path: /etc/kubernetes/ + type: DirectoryOrCreate + name: azure-cred + - hostPath: + path: /dev + type: Directory + name: device-dir + - hostPath: + path: /sys/bus/scsi/devices + type: Directory + name: sys-devices-dir + - hostPath: + path: /sys/class/ + type: Directory + name: sys-class +--- diff --git a/deploy/v1.29.1/csi-snapshot-controller.yaml b/deploy/v1.29.1/csi-snapshot-controller.yaml new file mode 100644 index 000000000..8303b1a73 --- /dev/null +++ b/deploy/v1.29.1/csi-snapshot-controller.yaml @@ -0,0 +1,58 @@ +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: csi-snapshot-controller + namespace: kube-system +spec: + replicas: 2 + selector: + matchLabels: + app: csi-snapshot-controller + # the snapshot controller won't be marked as ready if the v1 CRDs are unavailable + # in #504 the snapshot-controller will exit after around 7.5 seconds if it + # can't find the v1 CRDs so this value should be greater than that + minReadySeconds: 15 + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + app: csi-snapshot-controller + spec: + serviceAccountName: csi-snapshot-controller-sa + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + securityContext: + seccompProfile: + type: RuntimeDefault + tolerations: + - key: "node-role.kubernetes.io/master" + operator: "Equal" + value: "true" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/controlplane" + operator: "Equal" + value: "true" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/control-plane" + operator: "Equal" + value: "true" + effect: "NoSchedule" + containers: + - name: csi-snapshot-controller + image: mcr.microsoft.com/oss/kubernetes-csi/snapshot-controller:v6.2.2 + args: + - "--v=2" + - "--leader-election=true" + - "--leader-election-namespace=kube-system" + resources: + limits: + memory: 300Mi + requests: + cpu: 10m + memory: 20Mi diff --git a/deploy/v1.29.1/rbac-csi-azuredisk-controller.yaml b/deploy/v1.29.1/rbac-csi-azuredisk-controller.yaml new file mode 100644 index 000000000..0e59ad15b --- /dev/null +++ b/deploy/v1.29.1/rbac-csi-azuredisk-controller.yaml @@ -0,0 +1,196 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-azuredisk-controller-sa + namespace: kube-system +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-external-provisioner-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["get", "list"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-csi-provisioner-binding +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: azuredisk-external-provisioner-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-external-attacher-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["csi.storage.k8s.io"] + resources: ["csinodeinfos"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments/status"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-csi-attacher-binding +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: azuredisk-external-attacher-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-external-snapshotter-role +rules: + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-csi-snapshotter-binding +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: azuredisk-external-snapshotter-role + apiGroup: rbac.authorization.k8s.io +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-external-resizer-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-csi-resizer-role +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: azuredisk-external-resizer-role + apiGroup: rbac.authorization.k8s.io + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-azuredisk-controller-secret-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-azuredisk-controller-secret-binding +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-azuredisk-controller-secret-role + apiGroup: rbac.authorization.k8s.io diff --git a/deploy/v1.29.1/rbac-csi-azuredisk-node.yaml b/deploy/v1.29.1/rbac-csi-azuredisk-node.yaml new file mode 100644 index 000000000..c77181ee1 --- /dev/null +++ b/deploy/v1.29.1/rbac-csi-azuredisk-node.yaml @@ -0,0 +1,33 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-azuredisk-node-sa + namespace: kube-system + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-azuredisk-node-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-azuredisk-node-secret-binding +subjects: + - kind: ServiceAccount + name: csi-azuredisk-node-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-azuredisk-node-role + apiGroup: rbac.authorization.k8s.io diff --git a/deploy/v1.29.1/rbac-csi-snapshot-controller.yaml b/deploy/v1.29.1/rbac-csi-snapshot-controller.yaml new file mode 100644 index 000000000..03af76542 --- /dev/null +++ b/deploy/v1.29.1/rbac-csi-snapshot-controller.yaml @@ -0,0 +1,78 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-snapshot-controller-sa + namespace: kube-system + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots/status"] + verbs: ["update", "patch"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-binding +subjects: + - kind: ServiceAccount + name: csi-snapshot-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-snapshot-controller-role + apiGroup: rbac.authorization.k8s.io + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-leaderelection-role +rules: + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-leaderelection-binding +subjects: + - kind: ServiceAccount + name: csi-snapshot-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-snapshot-controller-leaderelection-role + apiGroup: rbac.authorization.k8s.io diff --git a/deploy/v1.29.2/crd-csi-snapshot.yaml b/deploy/v1.29.2/crd-csi-snapshot.yaml new file mode 100644 index 000000000..d4b90b266 --- /dev/null +++ b/deploy/v1.29.2/crd-csi-snapshot.yaml @@ -0,0 +1,838 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/665" + creationTimestamp: null + name: volumesnapshots.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshot + listKind: VolumeSnapshotList + plural: volumesnapshots + shortNames: + - vs + singular: volumesnapshot + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: If a new snapshot needs to be created, this contains the name of + the source PVC from which this snapshot was (or will be) created. + jsonPath: .spec.source.persistentVolumeClaimName + name: SourcePVC + type: string + - description: If a snapshot already exists, this contains the name of the existing + VolumeSnapshotContent object representing the existing snapshot. + jsonPath: .spec.source.volumeSnapshotContentName + name: SourceSnapshotContent + type: string + - description: Represents the minimum size of volume required to rehydrate from + this snapshot. + jsonPath: .status.restoreSize + name: RestoreSize + type: string + - description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot. + jsonPath: .spec.volumeSnapshotClassName + name: SnapshotClass + type: string + - description: Name of the VolumeSnapshotContent object to which the VolumeSnapshot + object intends to bind to. Please note that verification of binding actually + requires checking both VolumeSnapshot and VolumeSnapshotContent to ensure + both are pointing at each other. Binding MUST be verified prior to usage of + this object. + jsonPath: .status.boundVolumeSnapshotContentName + name: SnapshotContent + type: string + - description: Timestamp when the point-in-time snapshot was taken by the underlying + storage system. + jsonPath: .status.creationTime + name: CreationTime + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshot is a user's request for either creating a point-in-time + snapshot of a persistent volume, or binding to a pre-existing snapshot. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: 'spec defines the desired characteristics of a snapshot requested + by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots + Required.' + properties: + source: + description: source specifies where a snapshot will be created from. + This field is immutable after creation. Required. + properties: + persistentVolumeClaimName: + description: persistentVolumeClaimName specifies the name of the + PersistentVolumeClaim object representing the volume from which + a snapshot should be created. This PVC is assumed to be in the + same namespace as the VolumeSnapshot object. This field should + be set if the snapshot does not exists, and needs to be created. + This field is immutable. + type: string + volumeSnapshotContentName: + description: volumeSnapshotContentName specifies the name of a + pre-existing VolumeSnapshotContent object representing an existing + volume snapshot. This field should be set if the snapshot already + exists and only needs a representation in Kubernetes. This field + is immutable. + type: string + type: object + oneOf: + - required: ["persistentVolumeClaimName"] + - required: ["volumeSnapshotContentName"] + volumeSnapshotClassName: + description: 'VolumeSnapshotClassName is the name of the VolumeSnapshotClass + requested by the VolumeSnapshot. VolumeSnapshotClassName may be + left nil to indicate that the default SnapshotClass should be used. + A given cluster may have multiple default Volume SnapshotClasses: + one default per CSI Driver. If a VolumeSnapshot does not specify + a SnapshotClass, VolumeSnapshotSource will be checked to figure + out what the associated CSI Driver is, and the default VolumeSnapshotClass + associated with that CSI Driver will be used. If more than one VolumeSnapshotClass + exist for a given CSI Driver and more than one have been marked + as default, CreateSnapshot will fail and generate an event. Empty + string is not allowed for this field.' + type: string + required: + - source + type: object + status: + description: status represents the current information of a snapshot. + Consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent + objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent + point at each other) before using this object. + properties: + boundVolumeSnapshotContentName: + description: 'boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent + object to which this VolumeSnapshot object intends to bind to. If + not specified, it indicates that the VolumeSnapshot object has not + been successfully bound to a VolumeSnapshotContent object yet. NOTE: + To avoid possible security issues, consumers must verify binding + between VolumeSnapshot and VolumeSnapshotContent objects is successful + (by validating that both VolumeSnapshot and VolumeSnapshotContent + point at each other) before using this object.' + type: string + creationTime: + description: creationTime is the timestamp when the point-in-time + snapshot is taken by the underlying storage system. In dynamic snapshot + creation case, this field will be filled in by the snapshot controller + with the "creation_time" value returned from CSI "CreateSnapshot" + gRPC call. For a pre-existing snapshot, this field will be filled + with the "creation_time" value returned from the CSI "ListSnapshots" + gRPC call if the driver supports it. If not specified, it may indicate + that the creation time of the snapshot is unknown. + format: date-time + type: string + error: + description: error is the last observed error during snapshot creation, + if any. This field could be helpful to upper level controllers(i.e., + application controller) to decide whether they should continue on + waiting for the snapshot to be created based on the type of error + reported. The snapshot controller will keep retrying when an error + occurs during the snapshot creation. Upon success, this error field + will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error + during snapshot creation if specified. NOTE: message may be + logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if the snapshot is ready to be used + to restore a volume. In dynamic snapshot creation case, this field + will be filled in by the snapshot controller with the "ready_to_use" + value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing + snapshot, this field will be filled with the "ready_to_use" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it, otherwise, this field will be set to "True". If not specified, + it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + type: string + description: restoreSize represents the minimum size of volume required + to create a volume from this snapshot. In dynamic snapshot creation + case, this field will be filled in by the snapshot controller with + the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. + For a pre-existing snapshot, this field will be filled with the + "size_bytes" value returned from the CSI "ListSnapshots" gRPC call + if the driver supports it. When restoring a volume from this snapshot, + the size of the volume MUST NOT be smaller than the restoreSize + if it is specified, otherwise the restoration will fail. If not + specified, it indicates that the size is unknown. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: If a new snapshot needs to be created, this contains the name of the source PVC from which this snapshot was (or will be) created. + jsonPath: .spec.source.persistentVolumeClaimName + name: SourcePVC + type: string + - description: If a snapshot already exists, this contains the name of the existing VolumeSnapshotContent object representing the existing snapshot. + jsonPath: .spec.source.volumeSnapshotContentName + name: SourceSnapshotContent + type: string + - description: Represents the minimum size of volume required to rehydrate from this snapshot. + jsonPath: .status.restoreSize + name: RestoreSize + type: string + - description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot. + jsonPath: .spec.volumeSnapshotClassName + name: SnapshotClass + type: string + - description: Name of the VolumeSnapshotContent object to which the VolumeSnapshot object intends to bind to. Please note that verification of binding actually requires checking both VolumeSnapshot and VolumeSnapshotContent to ensure both are pointing at each other. Binding MUST be verified prior to usage of this object. + jsonPath: .status.boundVolumeSnapshotContentName + name: SnapshotContent + type: string + - description: Timestamp when the point-in-time snapshot was taken by the underlying storage system. + jsonPath: .status.creationTime + name: CreationTime + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshot is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshot" + schema: + openAPIV3Schema: + description: VolumeSnapshot is a user's request for either creating a point-in-time snapshot of a persistent volume, or binding to a pre-existing snapshot. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: 'spec defines the desired characteristics of a snapshot requested by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots Required.' + properties: + source: + description: source specifies where a snapshot will be created from. This field is immutable after creation. Required. + properties: + persistentVolumeClaimName: + description: persistentVolumeClaimName specifies the name of the PersistentVolumeClaim object representing the volume from which a snapshot should be created. This PVC is assumed to be in the same namespace as the VolumeSnapshot object. This field should be set if the snapshot does not exists, and needs to be created. This field is immutable. + type: string + volumeSnapshotContentName: + description: volumeSnapshotContentName specifies the name of a pre-existing VolumeSnapshotContent object representing an existing volume snapshot. This field should be set if the snapshot already exists and only needs a representation in Kubernetes. This field is immutable. + type: string + type: object + volumeSnapshotClassName: + description: 'VolumeSnapshotClassName is the name of the VolumeSnapshotClass requested by the VolumeSnapshot. VolumeSnapshotClassName may be left nil to indicate that the default SnapshotClass should be used. A given cluster may have multiple default Volume SnapshotClasses: one default per CSI Driver. If a VolumeSnapshot does not specify a SnapshotClass, VolumeSnapshotSource will be checked to figure out what the associated CSI Driver is, and the default VolumeSnapshotClass associated with that CSI Driver will be used. If more than one VolumeSnapshotClass exist for a given CSI Driver and more than one have been marked as default, CreateSnapshot will fail and generate an event. Empty string is not allowed for this field.' + type: string + required: + - source + type: object + status: + description: status represents the current information of a snapshot. Consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object. + properties: + boundVolumeSnapshotContentName: + description: 'boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent object to which this VolumeSnapshot object intends to bind to. If not specified, it indicates that the VolumeSnapshot object has not been successfully bound to a VolumeSnapshotContent object yet. NOTE: To avoid possible security issues, consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object.' + type: string + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it may indicate that the creation time of the snapshot is unknown. + format: date-time + type: string + error: + description: error is the last observed error during snapshot creation, if any. This field could be helpful to upper level controllers(i.e., application controller) to decide whether they should continue on waiting for the snapshot to be created based on the type of error reported. The snapshot controller will keep retrying when an error occurs during the snapshot creation. Upon success, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if the snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + type: string + description: restoreSize represents the minimum size of volume required to create a volume from this snapshot. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + required: + - spec + type: object + served: false + storage: false + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/665" + creationTimestamp: null + name: volumesnapshotclasses.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotClass + listKind: VolumeSnapshotClassList + plural: volumesnapshotclasses + shortNames: + - vsclass + - vsclasses + singular: volumesnapshotclass + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .driver + name: Driver + type: string + - description: Determines whether a VolumeSnapshotContent created through the + VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .deletionPolicy + name: DeletionPolicy + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshotClass specifies parameters that a underlying storage + system uses when creating a volume snapshot. A specific VolumeSnapshotClass + is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses + are non-namespaced + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + deletionPolicy: + description: deletionPolicy determines whether a VolumeSnapshotContent + created through the VolumeSnapshotClass should be deleted when its bound + VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". + "Retain" means that the VolumeSnapshotContent and its physical snapshot + on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent + and its physical snapshot on underlying storage system are deleted. + Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the storage driver that handles this + VolumeSnapshotClass. Required. + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + parameters: + additionalProperties: + type: string + description: parameters is a key-value map with storage driver specific + parameters for creating snapshots. These values are opaque to Kubernetes. + type: object + required: + - deletionPolicy + - driver + type: object + served: true + storage: true + subresources: {} + - additionalPrinterColumns: + - jsonPath: .driver + name: Driver + type: string + - description: Determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .deletionPolicy + name: DeletionPolicy + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshotClass is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshotClass" + schema: + openAPIV3Schema: + description: VolumeSnapshotClass specifies parameters that a underlying storage system uses when creating a volume snapshot. A specific VolumeSnapshotClass is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses are non-namespaced + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + deletionPolicy: + description: deletionPolicy determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the storage driver that handles this VolumeSnapshotClass. Required. + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + parameters: + additionalProperties: + type: string + description: parameters is a key-value map with storage driver specific parameters for creating snapshots. These values are opaque to Kubernetes. + type: object + required: + - deletionPolicy + - driver + type: object + served: false + storage: false + subresources: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/665" + creationTimestamp: null + name: volumesnapshotcontents.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotContent + listKind: VolumeSnapshotContentList + plural: volumesnapshotcontents + shortNames: + - vsc + - vscs + singular: volumesnapshotcontent + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: Represents the complete size of the snapshot in bytes + jsonPath: .status.restoreSize + name: RestoreSize + type: integer + - description: Determines whether this VolumeSnapshotContent and its physical + snapshot on the underlying storage system should be deleted when its bound + VolumeSnapshot is deleted. + jsonPath: .spec.deletionPolicy + name: DeletionPolicy + type: string + - description: Name of the CSI driver used to create the physical snapshot on + the underlying storage system. + jsonPath: .spec.driver + name: Driver + type: string + - description: Name of the VolumeSnapshotClass to which this snapshot belongs. + jsonPath: .spec.volumeSnapshotClassName + name: VolumeSnapshotClass + type: string + - description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent + object is bound. + jsonPath: .spec.volumeSnapshotRef.name + name: VolumeSnapshot + type: string + - description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.namespace + name: VolumeSnapshotNamespace + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshotContent represents the actual "on-disk" snapshot + object in the underlying storage system + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: spec defines properties of a VolumeSnapshotContent created + by the underlying storage system. Required. + properties: + deletionPolicy: + description: deletionPolicy determines whether this VolumeSnapshotContent + and its physical snapshot on the underlying storage system should + be deleted when its bound VolumeSnapshot is deleted. Supported values + are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent + and its physical snapshot on underlying storage system are kept. + "Delete" means that the VolumeSnapshotContent and its physical snapshot + on underlying storage system are deleted. For dynamically provisioned + snapshots, this field will automatically be filled in by the CSI + snapshotter sidecar with the "DeletionPolicy" field defined in the + corresponding VolumeSnapshotClass. For pre-existing snapshots, users + MUST specify this field when creating the VolumeSnapshotContent + object. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the CSI driver used to create the + physical snapshot on the underlying storage system. This MUST be + the same as the name returned by the CSI GetPluginName() call for + that driver. Required. + type: string + source: + description: source specifies whether the snapshot is (or should be) + dynamically provisioned or already exists, and just requires a Kubernetes + object representation. This field is immutable after creation. Required. + properties: + snapshotHandle: + description: snapshotHandle specifies the CSI "snapshot_id" of + a pre-existing snapshot on the underlying storage system for + which a Kubernetes object representation was (or should be) + created. This field is immutable. + type: string + volumeHandle: + description: volumeHandle specifies the CSI "volume_id" of the + volume from which a snapshot should be dynamically taken from. + This field is immutable. + type: string + type: object + oneOf: + - required: ["snapshotHandle"] + - required: ["volumeHandle"] + sourceVolumeMode: + description: SourceVolumeMode is the mode of the volume whose snapshot + is taken. Can be either “Filesystem” or “Block”. If not specified, + it indicates the source volume's mode is unknown. This field is + immutable. This field is an alpha field. + type: string + volumeSnapshotClassName: + description: name of the VolumeSnapshotClass from which this snapshot + was (or will be) created. Note that after provisioning, the VolumeSnapshotClass + may be deleted or recreated with different set of values, and as + such, should not be referenced post-snapshot creation. + type: string + volumeSnapshotRef: + description: volumeSnapshotRef specifies the VolumeSnapshot object + to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName + field must reference to this VolumeSnapshotContent's name for the + bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent + object, name and namespace of the VolumeSnapshot object MUST be + provided for binding to happen. This field is immutable after creation. + Required. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + required: + - deletionPolicy + - driver + - source + - volumeSnapshotRef + type: object + status: + description: status represents the current information of a snapshot. + properties: + creationTime: + description: creationTime is the timestamp when the point-in-time + snapshot is taken by the underlying storage system. In dynamic snapshot + creation case, this field will be filled in by the CSI snapshotter + sidecar with the "creation_time" value returned from CSI "CreateSnapshot" + gRPC call. For a pre-existing snapshot, this field will be filled + with the "creation_time" value returned from the CSI "ListSnapshots" + gRPC call if the driver supports it. If not specified, it indicates + the creation time is unknown. The format of this field is a Unix + nanoseconds time encoded as an int64. On Unix, the command `date + +%s%N` returns the current time in nanoseconds since 1970-01-01 + 00:00:00 UTC. + format: int64 + type: integer + error: + description: error is the last observed error during snapshot creation, + if any. Upon success after retry, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error + during snapshot creation if specified. NOTE: message may be + logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used + to restore a volume. In dynamic snapshot creation case, this field + will be filled in by the CSI snapshotter sidecar with the "ready_to_use" + value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing + snapshot, this field will be filled with the "ready_to_use" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it, otherwise, this field will be set to "True". If not specified, + it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + description: restoreSize represents the complete size of the snapshot + in bytes. In dynamic snapshot creation case, this field will be + filled in by the CSI snapshotter sidecar with the "size_bytes" value + returned from CSI "CreateSnapshot" gRPC call. For a pre-existing + snapshot, this field will be filled with the "size_bytes" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it. When restoring a volume from this snapshot, the size of the + volume MUST NOT be smaller than the restoreSize if it is specified, + otherwise the restoration will fail. If not specified, it indicates + that the size is unknown. + format: int64 + minimum: 0 + type: integer + snapshotHandle: + description: snapshotHandle is the CSI "snapshot_id" of a snapshot + on the underlying storage system. If not specified, it indicates + that dynamic snapshot creation has either failed or it is still + in progress. + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: Represents the complete size of the snapshot in bytes + jsonPath: .status.restoreSize + name: RestoreSize + type: integer + - description: Determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .spec.deletionPolicy + name: DeletionPolicy + type: string + - description: Name of the CSI driver used to create the physical snapshot on the underlying storage system. + jsonPath: .spec.driver + name: Driver + type: string + - description: Name of the VolumeSnapshotClass to which this snapshot belongs. + jsonPath: .spec.volumeSnapshotClassName + name: VolumeSnapshotClass + type: string + - description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.name + name: VolumeSnapshot + type: string + - description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.namespace + name: VolumeSnapshotNamespace + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshotContent is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshotContent" + schema: + openAPIV3Schema: + description: VolumeSnapshotContent represents the actual "on-disk" snapshot object in the underlying storage system + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: spec defines properties of a VolumeSnapshotContent created by the underlying storage system. Required. + properties: + deletionPolicy: + description: deletionPolicy determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. For dynamically provisioned snapshots, this field will automatically be filled in by the CSI snapshotter sidecar with the "DeletionPolicy" field defined in the corresponding VolumeSnapshotClass. For pre-existing snapshots, users MUST specify this field when creating the VolumeSnapshotContent object. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the CSI driver used to create the physical snapshot on the underlying storage system. This MUST be the same as the name returned by the CSI GetPluginName() call for that driver. Required. + type: string + source: + description: source specifies whether the snapshot is (or should be) dynamically provisioned or already exists, and just requires a Kubernetes object representation. This field is immutable after creation. Required. + properties: + snapshotHandle: + description: snapshotHandle specifies the CSI "snapshot_id" of a pre-existing snapshot on the underlying storage system for which a Kubernetes object representation was (or should be) created. This field is immutable. + type: string + volumeHandle: + description: volumeHandle specifies the CSI "volume_id" of the volume from which a snapshot should be dynamically taken from. This field is immutable. + type: string + type: object + volumeSnapshotClassName: + description: name of the VolumeSnapshotClass from which this snapshot was (or will be) created. Note that after provisioning, the VolumeSnapshotClass may be deleted or recreated with different set of values, and as such, should not be referenced post-snapshot creation. + type: string + volumeSnapshotRef: + description: volumeSnapshotRef specifies the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName field must reference to this VolumeSnapshotContent's name for the bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent object, name and namespace of the VolumeSnapshot object MUST be provided for binding to happen. This field is immutable after creation. Required. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + required: + - deletionPolicy + - driver + - source + - volumeSnapshotRef + type: object + status: + description: status represents the current information of a snapshot. + properties: + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it indicates the creation time is unknown. The format of this field is a Unix nanoseconds time encoded as an int64. On Unix, the command `date +%s%N` returns the current time in nanoseconds since 1970-01-01 00:00:00 UTC. + format: int64 + type: integer + error: + description: error is the last observed error during snapshot creation, if any. Upon success after retry, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + description: restoreSize represents the complete size of the snapshot in bytes. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown. + format: int64 + minimum: 0 + type: integer + snapshotHandle: + description: snapshotHandle is the CSI "snapshot_id" of a snapshot on the underlying storage system. If not specified, it indicates that dynamic snapshot creation has either failed or it is still in progress. + type: string + type: object + required: + - spec + type: object + served: false + storage: false + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/deploy/v1.29.2/csi-azuredisk-controller.yaml b/deploy/v1.29.2/csi-azuredisk-controller.yaml new file mode 100644 index 000000000..d6c03a162 --- /dev/null +++ b/deploy/v1.29.2/csi-azuredisk-controller.yaml @@ -0,0 +1,194 @@ +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: csi-azuredisk-controller + namespace: kube-system +spec: + replicas: 2 + selector: + matchLabels: + app: csi-azuredisk-controller + template: + metadata: + labels: + app: csi-azuredisk-controller + spec: + hostNetwork: true + serviceAccountName: csi-azuredisk-controller-sa + nodeSelector: + kubernetes.io/os: linux # add "kubernetes.io/role: master" to run controller on master node + priorityClassName: system-cluster-critical + securityContext: + seccompProfile: + type: RuntimeDefault + tolerations: + - key: "node-role.kubernetes.io/master" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/controlplane" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/control-plane" + operator: "Exists" + effect: "NoSchedule" + containers: + - name: csi-provisioner + image: mcr.microsoft.com/oss/kubernetes-csi/csi-provisioner:v3.5.0 + args: + - "--feature-gates=Topology=true,HonorPVReclaimPolicy=true" + - "--csi-address=$(ADDRESS)" + - "--v=2" + - "--timeout=30s" + - "--leader-election" + - "--leader-election-namespace=kube-system" + - "--worker-threads=100" + - "--extra-create-metadata=true" + - "--strict-topology=true" + - "--kube-api-qps=50" + - "--kube-api-burst=100" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + resources: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + - name: csi-attacher + image: mcr.microsoft.com/oss/kubernetes-csi/csi-attacher:v4.3.0 + args: + - "-v=2" + - "-csi-address=$(ADDRESS)" + - "-timeout=1200s" + - "-leader-election" + - "--leader-election-namespace=kube-system" + - "-worker-threads=1000" + - "-kube-api-qps=200" + - "-kube-api-burst=400" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + resources: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + - name: csi-snapshotter + image: mcr.microsoft.com/oss/kubernetes-csi/csi-snapshotter:v6.3.1 + args: + - "-csi-address=$(ADDRESS)" + - "-leader-election" + - "--leader-election-namespace=kube-system" + - "--v=2" + - "--timeout=1200s" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 20Mi + - name: csi-resizer + image: mcr.microsoft.com/oss/kubernetes-csi/csi-resizer:v1.8.0 + args: + - "-csi-address=$(ADDRESS)" + - "-v=2" + - "-leader-election" + - "--leader-election-namespace=kube-system" + - '-handle-volume-inuse-error=false' + - '-feature-gates=RecoverVolumeExpansionFailure=true' + - "-timeout=240s" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + - name: liveness-probe + image: mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.10.0 + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --health-port=29602 + - --v=2 + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + - name: azuredisk + image: mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.29.2 + imagePullPolicy: IfNotPresent + args: + - "--v=5" + - "--endpoint=$(CSI_ENDPOINT)" + - "--metrics-address=0.0.0.0:29604" + - "--user-agent-suffix=OSS-kubectl" + - "--disable-avset-nodes=false" + - "--allow-empty-cloud-config=false" + ports: + - containerPort: 29602 + name: healthz + protocol: TCP + - containerPort: 29604 + name: metrics + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path + optional: true + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /etc/kubernetes/ + name: azure-cred + resources: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + volumes: + - name: socket-dir + emptyDir: {} + - name: azure-cred + hostPath: + path: /etc/kubernetes/ + type: DirectoryOrCreate diff --git a/deploy/v1.29.2/csi-azuredisk-driver.yaml b/deploy/v1.29.2/csi-azuredisk-driver.yaml new file mode 100644 index 000000000..0ff59b2f2 --- /dev/null +++ b/deploy/v1.29.2/csi-azuredisk-driver.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: disk.csi.azure.com + annotations: + csiDriver: v1.29.0 + snapshot: v6.2.1 +spec: + attachRequired: true + podInfoOnMount: false + fsGroupPolicy: File diff --git a/deploy/v1.29.2/csi-azuredisk-node-windows-hostprocess.yaml b/deploy/v1.29.2/csi-azuredisk-node-windows-hostprocess.yaml new file mode 100644 index 000000000..479c186fd --- /dev/null +++ b/deploy/v1.29.2/csi-azuredisk-node-windows-hostprocess.yaml @@ -0,0 +1,108 @@ +--- +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: csi-azuredisk-node-win + namespace: kube-system +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + selector: + matchLabels: + app: csi-azuredisk-node-win + template: + metadata: + labels: + app: csi-azuredisk-node-win + spec: + serviceAccountName: csi-azuredisk-node-sa + tolerations: + - key: "node.kubernetes.io/os" + operator: "Exists" + effect: "NoSchedule" + nodeSelector: + kubernetes.io/os: windows + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: type + operator: NotIn + values: + - virtual-kubelet + priorityClassName: system-node-critical + securityContext: + windowsOptions: + hostProcess: true + runAsUserName: "NT AUTHORITY\\SYSTEM" + hostNetwork: true + initContainers: + - name: init + image: mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.29.2-windows-hp + imagePullPolicy: IfNotPresent + command: + - "powershell.exe" + - "-c" + - "New-Item -ItemType Directory -Path C:\\var\\lib\\kubelet\\plugins\\disk.csi.azure.com\\ -Force" + containers: + - name: node-driver-registrar + image: mcr.microsoft.com/oss/kubernetes-csi/csi-node-driver-registrar:v2.8.0 + imagePullPolicy: IfNotPresent + command: + - "csi-node-driver-registrar.exe" + args: + - "--v=2" + - "--csi-address=$(CSI_ENDPOINT)" + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" + - "--plugin-registration-path=$(PLUGIN_REG_DIR)" + env: + - name: CSI_ENDPOINT + value: unix://C:\\var\\lib\\kubelet\\plugins\\disk.csi.azure.com\\csi.sock + - name: DRIVER_REG_SOCK_PATH + value: C:\\var\\lib\\kubelet\\plugins\\disk.csi.azure.com\\csi.sock + - name: PLUGIN_REG_DIR + value: C:\\var\\lib\\kubelet\\plugins_registry\\ + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + resources: + limits: + memory: 150Mi + requests: + cpu: 30m + memory: 40Mi + - name: azuredisk + image: mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.29.2-windows-hp + imagePullPolicy: IfNotPresent + command: + - "azurediskplugin.exe" + args: + - --v=5 + - --endpoint=$(CSI_ENDPOINT) + - --nodeid=$(KUBE_NODE_NAME) + - --allow-empty-cloud-config=true + - --enable-windows-host-process=true + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path-windows + optional: true + - name: CSI_ENDPOINT + value: unix://C:\\var\\lib\\kubelet\\plugins\\disk.csi.azure.com\\csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + resources: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 40Mi diff --git a/deploy/v1.29.2/csi-azuredisk-node-windows.yaml b/deploy/v1.29.2/csi-azuredisk-node-windows.yaml new file mode 100644 index 000000000..516e0faf5 --- /dev/null +++ b/deploy/v1.29.2/csi-azuredisk-node-windows.yaml @@ -0,0 +1,194 @@ +--- +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: csi-azuredisk-node-win + namespace: kube-system +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + selector: + matchLabels: + app: csi-azuredisk-node-win + template: + metadata: + labels: + app: csi-azuredisk-node-win + spec: + serviceAccountName: csi-azuredisk-node-sa + tolerations: + - key: "node.kubernetes.io/os" + operator: "Exists" + effect: "NoSchedule" + nodeSelector: + kubernetes.io/os: windows + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: type + operator: NotIn + values: + - virtual-kubelet + priorityClassName: system-node-critical + securityContext: + seccompProfile: + type: RuntimeDefault + containers: + - name: liveness-probe + volumeMounts: + - mountPath: C:\csi + name: plugin-dir + image: mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.10.0 + args: + - "--csi-address=$(CSI_ENDPOINT)" + - "--probe-timeout=3s" + - "--health-port=29603" + - "--v=2" + env: + - name: CSI_ENDPOINT + value: unix://C:\\csi\\csi.sock + resources: + limits: + memory: 150Mi + requests: + cpu: 10m + memory: 40Mi + - name: node-driver-registrar + image: mcr.microsoft.com/oss/kubernetes-csi/csi-node-driver-registrar:v2.8.0 + args: + - "--v=2" + - "--csi-address=$(CSI_ENDPOINT)" + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" + livenessProbe: + exec: + command: + - /csi-node-driver-registrar.exe + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --mode=kubelet-registration-probe + initialDelaySeconds: 60 + timeoutSeconds: 30 + env: + - name: CSI_ENDPOINT + value: unix://C:\\csi\\csi.sock + - name: DRIVER_REG_SOCK_PATH + value: C:\\var\\lib\\kubelet\\plugins\\disk.csi.azure.com\\csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - name: kubelet-dir + mountPath: "C:\\var\\lib\\kubelet" + - name: plugin-dir + mountPath: C:\csi + - name: registration-dir + mountPath: C:\registration + resources: + limits: + memory: 150Mi + requests: + cpu: 30m + memory: 40Mi + - name: azuredisk + image: mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.29.2 + imagePullPolicy: IfNotPresent + args: + - "--v=5" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--allow-empty-cloud-config=true" + - "--get-node-info-from-labels=false" + ports: + - containerPort: 29603 + name: healthz + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path-windows + optional: true + - name: CSI_ENDPOINT + value: unix://C:\\csi\\csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + volumeMounts: + - name: kubelet-dir + mountPath: "C:\\var\\lib\\kubelet" + - name: plugin-dir + mountPath: C:\csi + - name: azure-config + mountPath: C:\k + - name: csi-proxy-fs-pipe-v1 + mountPath: \\.\pipe\csi-proxy-filesystem-v1 + - name: csi-proxy-disk-pipe-v1 + mountPath: \\.\pipe\csi-proxy-disk-v1 + - name: csi-proxy-volume-pipe-v1 + mountPath: \\.\pipe\csi-proxy-volume-v1 + # these paths are still included for compatibility, they're used + # only if the node has still the beta version of the CSI proxy + - name: csi-proxy-fs-pipe-v1beta1 + mountPath: \\.\pipe\csi-proxy-filesystem-v1beta1 + - name: csi-proxy-disk-pipe-v1beta2 + mountPath: \\.\pipe\csi-proxy-disk-v1beta2 + - name: csi-proxy-volume-pipe-v1beta2 + mountPath: \\.\pipe\csi-proxy-volume-v1beta2 + resources: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 40Mi + volumes: + - name: csi-proxy-fs-pipe-v1 + hostPath: + path: \\.\pipe\csi-proxy-filesystem-v1 + - name: csi-proxy-disk-pipe-v1 + hostPath: + path: \\.\pipe\csi-proxy-disk-v1 + - name: csi-proxy-volume-pipe-v1 + hostPath: + path: \\.\pipe\csi-proxy-volume-v1 + # these paths are still included for compatibility, they're used + # only if the node has still the beta version of the CSI proxy + - name: csi-proxy-fs-pipe-v1beta1 + hostPath: + path: \\.\pipe\csi-proxy-filesystem-v1beta1 + - name: csi-proxy-disk-pipe-v1beta2 + hostPath: + path: \\.\pipe\csi-proxy-disk-v1beta2 + - name: csi-proxy-volume-pipe-v1beta2 + hostPath: + path: \\.\pipe\csi-proxy-volume-v1beta2 + - name: registration-dir + hostPath: + path: C:\var\lib\kubelet\plugins_registry\ + type: Directory + - name: kubelet-dir + hostPath: + path: C:\var\lib\kubelet\ + type: Directory + - name: plugin-dir + hostPath: + path: C:\var\lib\kubelet\plugins\disk.csi.azure.com\ + type: DirectoryOrCreate + - name: azure-config + hostPath: + path: C:\k + type: DirectoryOrCreate diff --git a/deploy/v1.29.2/csi-azuredisk-node.yaml b/deploy/v1.29.2/csi-azuredisk-node.yaml new file mode 100644 index 000000000..ba2ae1d33 --- /dev/null +++ b/deploy/v1.29.2/csi-azuredisk-node.yaml @@ -0,0 +1,174 @@ +--- +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: csi-azuredisk-node + namespace: kube-system +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + selector: + matchLabels: + app: csi-azuredisk-node + template: + metadata: + labels: + app: csi-azuredisk-node + spec: + hostNetwork: true + dnsPolicy: Default + serviceAccountName: csi-azuredisk-node-sa + nodeSelector: + kubernetes.io/os: linux + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: type + operator: NotIn + values: + - virtual-kubelet + priorityClassName: system-node-critical + securityContext: + seccompProfile: + type: RuntimeDefault + tolerations: + - operator: "Exists" + containers: + - name: liveness-probe + volumeMounts: + - mountPath: /csi + name: socket-dir + image: mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.10.0 + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --health-port=29603 + - --v=2 + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + - name: node-driver-registrar + image: mcr.microsoft.com/oss/kubernetes-csi/csi-node-driver-registrar:v2.8.0 + args: + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --v=2 + livenessProbe: + exec: + command: + - /csi-node-driver-registrar + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --mode=kubelet-registration-probe + initialDelaySeconds: 30 + timeoutSeconds: 15 + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/disk.csi.azure.com/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: registration-dir + mountPath: /registration + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + - name: azuredisk + image: mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.29.2 + imagePullPolicy: IfNotPresent + args: + - "--v=5" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--enable-perf-optimization=true" + - "--allow-empty-cloud-config=true" + - "--get-node-info-from-labels=false" + ports: + - containerPort: 29603 + name: healthz + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path + optional: true + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + securityContext: + privileged: true + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /var/lib/kubelet/ + mountPropagation: Bidirectional + name: mountpoint-dir + - mountPath: /etc/kubernetes/ + name: azure-cred + - mountPath: /dev + name: device-dir + - mountPath: /sys/bus/scsi/devices + name: sys-devices-dir + - mountPath: /sys/class/ + name: sys-class + resources: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 20Mi + volumes: + - hostPath: + path: /var/lib/kubelet/plugins/disk.csi.azure.com + type: DirectoryOrCreate + name: socket-dir + - hostPath: + path: /var/lib/kubelet/ + type: DirectoryOrCreate + name: mountpoint-dir + - hostPath: + path: /var/lib/kubelet/plugins_registry/ + type: DirectoryOrCreate + name: registration-dir + - hostPath: + path: /etc/kubernetes/ + type: DirectoryOrCreate + name: azure-cred + - hostPath: + path: /dev + type: Directory + name: device-dir + - hostPath: + path: /sys/bus/scsi/devices + type: Directory + name: sys-devices-dir + - hostPath: + path: /sys/class/ + type: Directory + name: sys-class +--- diff --git a/deploy/v1.29.2/csi-snapshot-controller.yaml b/deploy/v1.29.2/csi-snapshot-controller.yaml new file mode 100644 index 000000000..26f361fdb --- /dev/null +++ b/deploy/v1.29.2/csi-snapshot-controller.yaml @@ -0,0 +1,58 @@ +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: csi-snapshot-controller + namespace: kube-system +spec: + replicas: 2 + selector: + matchLabels: + app: csi-snapshot-controller + # the snapshot controller won't be marked as ready if the v1 CRDs are unavailable + # in #504 the snapshot-controller will exit after around 7.5 seconds if it + # can't find the v1 CRDs so this value should be greater than that + minReadySeconds: 15 + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + app: csi-snapshot-controller + spec: + serviceAccountName: csi-snapshot-controller-sa + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + securityContext: + seccompProfile: + type: RuntimeDefault + tolerations: + - key: "node-role.kubernetes.io/master" + operator: "Equal" + value: "true" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/controlplane" + operator: "Equal" + value: "true" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/control-plane" + operator: "Equal" + value: "true" + effect: "NoSchedule" + containers: + - name: csi-snapshot-controller + image: mcr.microsoft.com/oss/kubernetes-csi/snapshot-controller:v6.3.1 + args: + - "--v=2" + - "--leader-election=true" + - "--leader-election-namespace=kube-system" + resources: + limits: + memory: 300Mi + requests: + cpu: 10m + memory: 20Mi diff --git a/deploy/v1.29.2/rbac-csi-azuredisk-controller.yaml b/deploy/v1.29.2/rbac-csi-azuredisk-controller.yaml new file mode 100644 index 000000000..0e59ad15b --- /dev/null +++ b/deploy/v1.29.2/rbac-csi-azuredisk-controller.yaml @@ -0,0 +1,196 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-azuredisk-controller-sa + namespace: kube-system +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-external-provisioner-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["get", "list"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-csi-provisioner-binding +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: azuredisk-external-provisioner-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-external-attacher-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["csi.storage.k8s.io"] + resources: ["csinodeinfos"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments/status"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-csi-attacher-binding +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: azuredisk-external-attacher-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-external-snapshotter-role +rules: + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-csi-snapshotter-binding +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: azuredisk-external-snapshotter-role + apiGroup: rbac.authorization.k8s.io +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-external-resizer-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-csi-resizer-role +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: azuredisk-external-resizer-role + apiGroup: rbac.authorization.k8s.io + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-azuredisk-controller-secret-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-azuredisk-controller-secret-binding +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-azuredisk-controller-secret-role + apiGroup: rbac.authorization.k8s.io diff --git a/deploy/v1.29.2/rbac-csi-azuredisk-node.yaml b/deploy/v1.29.2/rbac-csi-azuredisk-node.yaml new file mode 100644 index 000000000..c77181ee1 --- /dev/null +++ b/deploy/v1.29.2/rbac-csi-azuredisk-node.yaml @@ -0,0 +1,33 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-azuredisk-node-sa + namespace: kube-system + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-azuredisk-node-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-azuredisk-node-secret-binding +subjects: + - kind: ServiceAccount + name: csi-azuredisk-node-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-azuredisk-node-role + apiGroup: rbac.authorization.k8s.io diff --git a/deploy/v1.29.2/rbac-csi-snapshot-controller.yaml b/deploy/v1.29.2/rbac-csi-snapshot-controller.yaml new file mode 100644 index 000000000..03af76542 --- /dev/null +++ b/deploy/v1.29.2/rbac-csi-snapshot-controller.yaml @@ -0,0 +1,78 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-snapshot-controller-sa + namespace: kube-system + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots/status"] + verbs: ["update", "patch"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-binding +subjects: + - kind: ServiceAccount + name: csi-snapshot-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-snapshot-controller-role + apiGroup: rbac.authorization.k8s.io + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-leaderelection-role +rules: + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-leaderelection-binding +subjects: + - kind: ServiceAccount + name: csi-snapshot-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-snapshot-controller-leaderelection-role + apiGroup: rbac.authorization.k8s.io diff --git a/deploy/v1.30.0/crd-csi-snapshot.yaml b/deploy/v1.30.0/crd-csi-snapshot.yaml new file mode 100644 index 000000000..d4b90b266 --- /dev/null +++ b/deploy/v1.30.0/crd-csi-snapshot.yaml @@ -0,0 +1,838 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/665" + creationTimestamp: null + name: volumesnapshots.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshot + listKind: VolumeSnapshotList + plural: volumesnapshots + shortNames: + - vs + singular: volumesnapshot + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: If a new snapshot needs to be created, this contains the name of + the source PVC from which this snapshot was (or will be) created. + jsonPath: .spec.source.persistentVolumeClaimName + name: SourcePVC + type: string + - description: If a snapshot already exists, this contains the name of the existing + VolumeSnapshotContent object representing the existing snapshot. + jsonPath: .spec.source.volumeSnapshotContentName + name: SourceSnapshotContent + type: string + - description: Represents the minimum size of volume required to rehydrate from + this snapshot. + jsonPath: .status.restoreSize + name: RestoreSize + type: string + - description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot. + jsonPath: .spec.volumeSnapshotClassName + name: SnapshotClass + type: string + - description: Name of the VolumeSnapshotContent object to which the VolumeSnapshot + object intends to bind to. Please note that verification of binding actually + requires checking both VolumeSnapshot and VolumeSnapshotContent to ensure + both are pointing at each other. Binding MUST be verified prior to usage of + this object. + jsonPath: .status.boundVolumeSnapshotContentName + name: SnapshotContent + type: string + - description: Timestamp when the point-in-time snapshot was taken by the underlying + storage system. + jsonPath: .status.creationTime + name: CreationTime + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshot is a user's request for either creating a point-in-time + snapshot of a persistent volume, or binding to a pre-existing snapshot. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: 'spec defines the desired characteristics of a snapshot requested + by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots + Required.' + properties: + source: + description: source specifies where a snapshot will be created from. + This field is immutable after creation. Required. + properties: + persistentVolumeClaimName: + description: persistentVolumeClaimName specifies the name of the + PersistentVolumeClaim object representing the volume from which + a snapshot should be created. This PVC is assumed to be in the + same namespace as the VolumeSnapshot object. This field should + be set if the snapshot does not exists, and needs to be created. + This field is immutable. + type: string + volumeSnapshotContentName: + description: volumeSnapshotContentName specifies the name of a + pre-existing VolumeSnapshotContent object representing an existing + volume snapshot. This field should be set if the snapshot already + exists and only needs a representation in Kubernetes. This field + is immutable. + type: string + type: object + oneOf: + - required: ["persistentVolumeClaimName"] + - required: ["volumeSnapshotContentName"] + volumeSnapshotClassName: + description: 'VolumeSnapshotClassName is the name of the VolumeSnapshotClass + requested by the VolumeSnapshot. VolumeSnapshotClassName may be + left nil to indicate that the default SnapshotClass should be used. + A given cluster may have multiple default Volume SnapshotClasses: + one default per CSI Driver. If a VolumeSnapshot does not specify + a SnapshotClass, VolumeSnapshotSource will be checked to figure + out what the associated CSI Driver is, and the default VolumeSnapshotClass + associated with that CSI Driver will be used. If more than one VolumeSnapshotClass + exist for a given CSI Driver and more than one have been marked + as default, CreateSnapshot will fail and generate an event. Empty + string is not allowed for this field.' + type: string + required: + - source + type: object + status: + description: status represents the current information of a snapshot. + Consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent + objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent + point at each other) before using this object. + properties: + boundVolumeSnapshotContentName: + description: 'boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent + object to which this VolumeSnapshot object intends to bind to. If + not specified, it indicates that the VolumeSnapshot object has not + been successfully bound to a VolumeSnapshotContent object yet. NOTE: + To avoid possible security issues, consumers must verify binding + between VolumeSnapshot and VolumeSnapshotContent objects is successful + (by validating that both VolumeSnapshot and VolumeSnapshotContent + point at each other) before using this object.' + type: string + creationTime: + description: creationTime is the timestamp when the point-in-time + snapshot is taken by the underlying storage system. In dynamic snapshot + creation case, this field will be filled in by the snapshot controller + with the "creation_time" value returned from CSI "CreateSnapshot" + gRPC call. For a pre-existing snapshot, this field will be filled + with the "creation_time" value returned from the CSI "ListSnapshots" + gRPC call if the driver supports it. If not specified, it may indicate + that the creation time of the snapshot is unknown. + format: date-time + type: string + error: + description: error is the last observed error during snapshot creation, + if any. This field could be helpful to upper level controllers(i.e., + application controller) to decide whether they should continue on + waiting for the snapshot to be created based on the type of error + reported. The snapshot controller will keep retrying when an error + occurs during the snapshot creation. Upon success, this error field + will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error + during snapshot creation if specified. NOTE: message may be + logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if the snapshot is ready to be used + to restore a volume. In dynamic snapshot creation case, this field + will be filled in by the snapshot controller with the "ready_to_use" + value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing + snapshot, this field will be filled with the "ready_to_use" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it, otherwise, this field will be set to "True". If not specified, + it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + type: string + description: restoreSize represents the minimum size of volume required + to create a volume from this snapshot. In dynamic snapshot creation + case, this field will be filled in by the snapshot controller with + the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. + For a pre-existing snapshot, this field will be filled with the + "size_bytes" value returned from the CSI "ListSnapshots" gRPC call + if the driver supports it. When restoring a volume from this snapshot, + the size of the volume MUST NOT be smaller than the restoreSize + if it is specified, otherwise the restoration will fail. If not + specified, it indicates that the size is unknown. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: If a new snapshot needs to be created, this contains the name of the source PVC from which this snapshot was (or will be) created. + jsonPath: .spec.source.persistentVolumeClaimName + name: SourcePVC + type: string + - description: If a snapshot already exists, this contains the name of the existing VolumeSnapshotContent object representing the existing snapshot. + jsonPath: .spec.source.volumeSnapshotContentName + name: SourceSnapshotContent + type: string + - description: Represents the minimum size of volume required to rehydrate from this snapshot. + jsonPath: .status.restoreSize + name: RestoreSize + type: string + - description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot. + jsonPath: .spec.volumeSnapshotClassName + name: SnapshotClass + type: string + - description: Name of the VolumeSnapshotContent object to which the VolumeSnapshot object intends to bind to. Please note that verification of binding actually requires checking both VolumeSnapshot and VolumeSnapshotContent to ensure both are pointing at each other. Binding MUST be verified prior to usage of this object. + jsonPath: .status.boundVolumeSnapshotContentName + name: SnapshotContent + type: string + - description: Timestamp when the point-in-time snapshot was taken by the underlying storage system. + jsonPath: .status.creationTime + name: CreationTime + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshot is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshot" + schema: + openAPIV3Schema: + description: VolumeSnapshot is a user's request for either creating a point-in-time snapshot of a persistent volume, or binding to a pre-existing snapshot. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: 'spec defines the desired characteristics of a snapshot requested by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots Required.' + properties: + source: + description: source specifies where a snapshot will be created from. This field is immutable after creation. Required. + properties: + persistentVolumeClaimName: + description: persistentVolumeClaimName specifies the name of the PersistentVolumeClaim object representing the volume from which a snapshot should be created. This PVC is assumed to be in the same namespace as the VolumeSnapshot object. This field should be set if the snapshot does not exists, and needs to be created. This field is immutable. + type: string + volumeSnapshotContentName: + description: volumeSnapshotContentName specifies the name of a pre-existing VolumeSnapshotContent object representing an existing volume snapshot. This field should be set if the snapshot already exists and only needs a representation in Kubernetes. This field is immutable. + type: string + type: object + volumeSnapshotClassName: + description: 'VolumeSnapshotClassName is the name of the VolumeSnapshotClass requested by the VolumeSnapshot. VolumeSnapshotClassName may be left nil to indicate that the default SnapshotClass should be used. A given cluster may have multiple default Volume SnapshotClasses: one default per CSI Driver. If a VolumeSnapshot does not specify a SnapshotClass, VolumeSnapshotSource will be checked to figure out what the associated CSI Driver is, and the default VolumeSnapshotClass associated with that CSI Driver will be used. If more than one VolumeSnapshotClass exist for a given CSI Driver and more than one have been marked as default, CreateSnapshot will fail and generate an event. Empty string is not allowed for this field.' + type: string + required: + - source + type: object + status: + description: status represents the current information of a snapshot. Consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object. + properties: + boundVolumeSnapshotContentName: + description: 'boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent object to which this VolumeSnapshot object intends to bind to. If not specified, it indicates that the VolumeSnapshot object has not been successfully bound to a VolumeSnapshotContent object yet. NOTE: To avoid possible security issues, consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object.' + type: string + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it may indicate that the creation time of the snapshot is unknown. + format: date-time + type: string + error: + description: error is the last observed error during snapshot creation, if any. This field could be helpful to upper level controllers(i.e., application controller) to decide whether they should continue on waiting for the snapshot to be created based on the type of error reported. The snapshot controller will keep retrying when an error occurs during the snapshot creation. Upon success, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if the snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + type: string + description: restoreSize represents the minimum size of volume required to create a volume from this snapshot. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + required: + - spec + type: object + served: false + storage: false + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/665" + creationTimestamp: null + name: volumesnapshotclasses.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotClass + listKind: VolumeSnapshotClassList + plural: volumesnapshotclasses + shortNames: + - vsclass + - vsclasses + singular: volumesnapshotclass + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .driver + name: Driver + type: string + - description: Determines whether a VolumeSnapshotContent created through the + VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .deletionPolicy + name: DeletionPolicy + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshotClass specifies parameters that a underlying storage + system uses when creating a volume snapshot. A specific VolumeSnapshotClass + is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses + are non-namespaced + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + deletionPolicy: + description: deletionPolicy determines whether a VolumeSnapshotContent + created through the VolumeSnapshotClass should be deleted when its bound + VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". + "Retain" means that the VolumeSnapshotContent and its physical snapshot + on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent + and its physical snapshot on underlying storage system are deleted. + Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the storage driver that handles this + VolumeSnapshotClass. Required. + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + parameters: + additionalProperties: + type: string + description: parameters is a key-value map with storage driver specific + parameters for creating snapshots. These values are opaque to Kubernetes. + type: object + required: + - deletionPolicy + - driver + type: object + served: true + storage: true + subresources: {} + - additionalPrinterColumns: + - jsonPath: .driver + name: Driver + type: string + - description: Determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .deletionPolicy + name: DeletionPolicy + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshotClass is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshotClass" + schema: + openAPIV3Schema: + description: VolumeSnapshotClass specifies parameters that a underlying storage system uses when creating a volume snapshot. A specific VolumeSnapshotClass is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses are non-namespaced + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + deletionPolicy: + description: deletionPolicy determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the storage driver that handles this VolumeSnapshotClass. Required. + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + parameters: + additionalProperties: + type: string + description: parameters is a key-value map with storage driver specific parameters for creating snapshots. These values are opaque to Kubernetes. + type: object + required: + - deletionPolicy + - driver + type: object + served: false + storage: false + subresources: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/665" + creationTimestamp: null + name: volumesnapshotcontents.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotContent + listKind: VolumeSnapshotContentList + plural: volumesnapshotcontents + shortNames: + - vsc + - vscs + singular: volumesnapshotcontent + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: Represents the complete size of the snapshot in bytes + jsonPath: .status.restoreSize + name: RestoreSize + type: integer + - description: Determines whether this VolumeSnapshotContent and its physical + snapshot on the underlying storage system should be deleted when its bound + VolumeSnapshot is deleted. + jsonPath: .spec.deletionPolicy + name: DeletionPolicy + type: string + - description: Name of the CSI driver used to create the physical snapshot on + the underlying storage system. + jsonPath: .spec.driver + name: Driver + type: string + - description: Name of the VolumeSnapshotClass to which this snapshot belongs. + jsonPath: .spec.volumeSnapshotClassName + name: VolumeSnapshotClass + type: string + - description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent + object is bound. + jsonPath: .spec.volumeSnapshotRef.name + name: VolumeSnapshot + type: string + - description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.namespace + name: VolumeSnapshotNamespace + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshotContent represents the actual "on-disk" snapshot + object in the underlying storage system + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: spec defines properties of a VolumeSnapshotContent created + by the underlying storage system. Required. + properties: + deletionPolicy: + description: deletionPolicy determines whether this VolumeSnapshotContent + and its physical snapshot on the underlying storage system should + be deleted when its bound VolumeSnapshot is deleted. Supported values + are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent + and its physical snapshot on underlying storage system are kept. + "Delete" means that the VolumeSnapshotContent and its physical snapshot + on underlying storage system are deleted. For dynamically provisioned + snapshots, this field will automatically be filled in by the CSI + snapshotter sidecar with the "DeletionPolicy" field defined in the + corresponding VolumeSnapshotClass. For pre-existing snapshots, users + MUST specify this field when creating the VolumeSnapshotContent + object. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the CSI driver used to create the + physical snapshot on the underlying storage system. This MUST be + the same as the name returned by the CSI GetPluginName() call for + that driver. Required. + type: string + source: + description: source specifies whether the snapshot is (or should be) + dynamically provisioned or already exists, and just requires a Kubernetes + object representation. This field is immutable after creation. Required. + properties: + snapshotHandle: + description: snapshotHandle specifies the CSI "snapshot_id" of + a pre-existing snapshot on the underlying storage system for + which a Kubernetes object representation was (or should be) + created. This field is immutable. + type: string + volumeHandle: + description: volumeHandle specifies the CSI "volume_id" of the + volume from which a snapshot should be dynamically taken from. + This field is immutable. + type: string + type: object + oneOf: + - required: ["snapshotHandle"] + - required: ["volumeHandle"] + sourceVolumeMode: + description: SourceVolumeMode is the mode of the volume whose snapshot + is taken. Can be either “Filesystem” or “Block”. If not specified, + it indicates the source volume's mode is unknown. This field is + immutable. This field is an alpha field. + type: string + volumeSnapshotClassName: + description: name of the VolumeSnapshotClass from which this snapshot + was (or will be) created. Note that after provisioning, the VolumeSnapshotClass + may be deleted or recreated with different set of values, and as + such, should not be referenced post-snapshot creation. + type: string + volumeSnapshotRef: + description: volumeSnapshotRef specifies the VolumeSnapshot object + to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName + field must reference to this VolumeSnapshotContent's name for the + bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent + object, name and namespace of the VolumeSnapshot object MUST be + provided for binding to happen. This field is immutable after creation. + Required. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + required: + - deletionPolicy + - driver + - source + - volumeSnapshotRef + type: object + status: + description: status represents the current information of a snapshot. + properties: + creationTime: + description: creationTime is the timestamp when the point-in-time + snapshot is taken by the underlying storage system. In dynamic snapshot + creation case, this field will be filled in by the CSI snapshotter + sidecar with the "creation_time" value returned from CSI "CreateSnapshot" + gRPC call. For a pre-existing snapshot, this field will be filled + with the "creation_time" value returned from the CSI "ListSnapshots" + gRPC call if the driver supports it. If not specified, it indicates + the creation time is unknown. The format of this field is a Unix + nanoseconds time encoded as an int64. On Unix, the command `date + +%s%N` returns the current time in nanoseconds since 1970-01-01 + 00:00:00 UTC. + format: int64 + type: integer + error: + description: error is the last observed error during snapshot creation, + if any. Upon success after retry, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error + during snapshot creation if specified. NOTE: message may be + logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used + to restore a volume. In dynamic snapshot creation case, this field + will be filled in by the CSI snapshotter sidecar with the "ready_to_use" + value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing + snapshot, this field will be filled with the "ready_to_use" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it, otherwise, this field will be set to "True". If not specified, + it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + description: restoreSize represents the complete size of the snapshot + in bytes. In dynamic snapshot creation case, this field will be + filled in by the CSI snapshotter sidecar with the "size_bytes" value + returned from CSI "CreateSnapshot" gRPC call. For a pre-existing + snapshot, this field will be filled with the "size_bytes" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it. When restoring a volume from this snapshot, the size of the + volume MUST NOT be smaller than the restoreSize if it is specified, + otherwise the restoration will fail. If not specified, it indicates + that the size is unknown. + format: int64 + minimum: 0 + type: integer + snapshotHandle: + description: snapshotHandle is the CSI "snapshot_id" of a snapshot + on the underlying storage system. If not specified, it indicates + that dynamic snapshot creation has either failed or it is still + in progress. + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: Represents the complete size of the snapshot in bytes + jsonPath: .status.restoreSize + name: RestoreSize + type: integer + - description: Determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .spec.deletionPolicy + name: DeletionPolicy + type: string + - description: Name of the CSI driver used to create the physical snapshot on the underlying storage system. + jsonPath: .spec.driver + name: Driver + type: string + - description: Name of the VolumeSnapshotClass to which this snapshot belongs. + jsonPath: .spec.volumeSnapshotClassName + name: VolumeSnapshotClass + type: string + - description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.name + name: VolumeSnapshot + type: string + - description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.namespace + name: VolumeSnapshotNamespace + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshotContent is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshotContent" + schema: + openAPIV3Schema: + description: VolumeSnapshotContent represents the actual "on-disk" snapshot object in the underlying storage system + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: spec defines properties of a VolumeSnapshotContent created by the underlying storage system. Required. + properties: + deletionPolicy: + description: deletionPolicy determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. For dynamically provisioned snapshots, this field will automatically be filled in by the CSI snapshotter sidecar with the "DeletionPolicy" field defined in the corresponding VolumeSnapshotClass. For pre-existing snapshots, users MUST specify this field when creating the VolumeSnapshotContent object. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the CSI driver used to create the physical snapshot on the underlying storage system. This MUST be the same as the name returned by the CSI GetPluginName() call for that driver. Required. + type: string + source: + description: source specifies whether the snapshot is (or should be) dynamically provisioned or already exists, and just requires a Kubernetes object representation. This field is immutable after creation. Required. + properties: + snapshotHandle: + description: snapshotHandle specifies the CSI "snapshot_id" of a pre-existing snapshot on the underlying storage system for which a Kubernetes object representation was (or should be) created. This field is immutable. + type: string + volumeHandle: + description: volumeHandle specifies the CSI "volume_id" of the volume from which a snapshot should be dynamically taken from. This field is immutable. + type: string + type: object + volumeSnapshotClassName: + description: name of the VolumeSnapshotClass from which this snapshot was (or will be) created. Note that after provisioning, the VolumeSnapshotClass may be deleted or recreated with different set of values, and as such, should not be referenced post-snapshot creation. + type: string + volumeSnapshotRef: + description: volumeSnapshotRef specifies the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName field must reference to this VolumeSnapshotContent's name for the bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent object, name and namespace of the VolumeSnapshot object MUST be provided for binding to happen. This field is immutable after creation. Required. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + required: + - deletionPolicy + - driver + - source + - volumeSnapshotRef + type: object + status: + description: status represents the current information of a snapshot. + properties: + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it indicates the creation time is unknown. The format of this field is a Unix nanoseconds time encoded as an int64. On Unix, the command `date +%s%N` returns the current time in nanoseconds since 1970-01-01 00:00:00 UTC. + format: int64 + type: integer + error: + description: error is the last observed error during snapshot creation, if any. Upon success after retry, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + description: restoreSize represents the complete size of the snapshot in bytes. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown. + format: int64 + minimum: 0 + type: integer + snapshotHandle: + description: snapshotHandle is the CSI "snapshot_id" of a snapshot on the underlying storage system. If not specified, it indicates that dynamic snapshot creation has either failed or it is still in progress. + type: string + type: object + required: + - spec + type: object + served: false + storage: false + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/deploy/v1.30.0/csi-azuredisk-controller.yaml b/deploy/v1.30.0/csi-azuredisk-controller.yaml new file mode 100644 index 000000000..4c62d18ca --- /dev/null +++ b/deploy/v1.30.0/csi-azuredisk-controller.yaml @@ -0,0 +1,194 @@ +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: csi-azuredisk-controller + namespace: kube-system +spec: + replicas: 2 + selector: + matchLabels: + app: csi-azuredisk-controller + template: + metadata: + labels: + app: csi-azuredisk-controller + spec: + hostNetwork: true + serviceAccountName: csi-azuredisk-controller-sa + nodeSelector: + kubernetes.io/os: linux # add "kubernetes.io/role: master" to run controller on master node + priorityClassName: system-cluster-critical + securityContext: + seccompProfile: + type: RuntimeDefault + tolerations: + - key: "node-role.kubernetes.io/master" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/controlplane" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/control-plane" + operator: "Exists" + effect: "NoSchedule" + containers: + - name: csi-provisioner + image: mcr.microsoft.com/oss/kubernetes-csi/csi-provisioner:v4.0.0 + args: + - "--feature-gates=Topology=true,HonorPVReclaimPolicy=true" + - "--csi-address=$(ADDRESS)" + - "--v=2" + - "--timeout=30s" + - "--leader-election" + - "--leader-election-namespace=kube-system" + - "--worker-threads=100" + - "--extra-create-metadata=true" + - "--strict-topology=true" + - "--kube-api-qps=50" + - "--kube-api-burst=100" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + resources: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + - name: csi-attacher + image: mcr.microsoft.com/oss/kubernetes-csi/csi-attacher:v4.5.0 + args: + - "-v=2" + - "-csi-address=$(ADDRESS)" + - "-timeout=1200s" + - "-leader-election" + - "--leader-election-namespace=kube-system" + - "-worker-threads=1000" + - "-kube-api-qps=200" + - "-kube-api-burst=400" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + resources: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + - name: csi-snapshotter + image: mcr.microsoft.com/oss/kubernetes-csi/csi-snapshotter:v6.3.3 + args: + - "-csi-address=$(ADDRESS)" + - "-leader-election" + - "--leader-election-namespace=kube-system" + - "--v=2" + - "--timeout=1200s" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 20Mi + - name: csi-resizer + image: mcr.microsoft.com/oss/kubernetes-csi/csi-resizer:v1.9.3 + args: + - "-csi-address=$(ADDRESS)" + - "-v=2" + - "-leader-election" + - "--leader-election-namespace=kube-system" + - '-handle-volume-inuse-error=false' + - '-feature-gates=RecoverVolumeExpansionFailure=true' + - "-timeout=240s" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + - name: liveness-probe + image: mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.12.0 + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --health-port=29602 + - --v=2 + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + - name: azuredisk + image: mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.30.0 + imagePullPolicy: IfNotPresent + args: + - "--v=5" + - "--endpoint=$(CSI_ENDPOINT)" + - "--metrics-address=0.0.0.0:29604" + - "--user-agent-suffix=OSS-kubectl" + - "--disable-avset-nodes=false" + - "--allow-empty-cloud-config=false" + ports: + - containerPort: 29602 + name: healthz + protocol: TCP + - containerPort: 29604 + name: metrics + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path + optional: true + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /etc/kubernetes/ + name: azure-cred + resources: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + volumes: + - name: socket-dir + emptyDir: {} + - name: azure-cred + hostPath: + path: /etc/kubernetes/ + type: DirectoryOrCreate diff --git a/deploy/v1.30.0/csi-azuredisk-driver.yaml b/deploy/v1.30.0/csi-azuredisk-driver.yaml new file mode 100644 index 000000000..f2f10fdaa --- /dev/null +++ b/deploy/v1.30.0/csi-azuredisk-driver.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: disk.csi.azure.com + annotations: + csiDriver: v1.30.0 + snapshot: v6.2.1 +spec: + attachRequired: true + podInfoOnMount: false + fsGroupPolicy: File diff --git a/deploy/v1.30.0/csi-azuredisk-node-windows-hostprocess.yaml b/deploy/v1.30.0/csi-azuredisk-node-windows-hostprocess.yaml new file mode 100644 index 000000000..3d33bcedf --- /dev/null +++ b/deploy/v1.30.0/csi-azuredisk-node-windows-hostprocess.yaml @@ -0,0 +1,108 @@ +--- +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: csi-azuredisk-node-win + namespace: kube-system +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + selector: + matchLabels: + app: csi-azuredisk-node-win + template: + metadata: + labels: + app: csi-azuredisk-node-win + spec: + serviceAccountName: csi-azuredisk-node-sa + tolerations: + - key: "node.kubernetes.io/os" + operator: "Exists" + effect: "NoSchedule" + nodeSelector: + kubernetes.io/os: windows + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: type + operator: NotIn + values: + - virtual-kubelet + priorityClassName: system-node-critical + securityContext: + windowsOptions: + hostProcess: true + runAsUserName: "NT AUTHORITY\\SYSTEM" + hostNetwork: true + initContainers: + - name: init + image: mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.30.0-windows-hp + imagePullPolicy: IfNotPresent + command: + - "powershell.exe" + - "-c" + - "New-Item -ItemType Directory -Path C:\\var\\lib\\kubelet\\plugins\\disk.csi.azure.com\\ -Force" + containers: + - name: node-driver-registrar + image: mcr.microsoft.com/oss/kubernetes-csi/csi-node-driver-registrar:v2.10.0 + imagePullPolicy: IfNotPresent + command: + - "csi-node-driver-registrar.exe" + args: + - "--v=2" + - "--csi-address=$(CSI_ENDPOINT)" + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" + - "--plugin-registration-path=$(PLUGIN_REG_DIR)" + env: + - name: CSI_ENDPOINT + value: unix://C:\\var\\lib\\kubelet\\plugins\\disk.csi.azure.com\\csi.sock + - name: DRIVER_REG_SOCK_PATH + value: C:\\var\\lib\\kubelet\\plugins\\disk.csi.azure.com\\csi.sock + - name: PLUGIN_REG_DIR + value: C:\\var\\lib\\kubelet\\plugins_registry\\ + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + resources: + limits: + memory: 150Mi + requests: + cpu: 30m + memory: 40Mi + - name: azuredisk + image: mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.30.0-windows-hp + imagePullPolicy: IfNotPresent + command: + - "azurediskplugin.exe" + args: + - --v=5 + - --endpoint=$(CSI_ENDPOINT) + - --nodeid=$(KUBE_NODE_NAME) + - --allow-empty-cloud-config=true + - --enable-windows-host-process=true + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path-windows + optional: true + - name: CSI_ENDPOINT + value: unix://C:\\var\\lib\\kubelet\\plugins\\disk.csi.azure.com\\csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + resources: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 40Mi diff --git a/deploy/v1.30.0/csi-azuredisk-node-windows.yaml b/deploy/v1.30.0/csi-azuredisk-node-windows.yaml new file mode 100644 index 000000000..2bbde7ad5 --- /dev/null +++ b/deploy/v1.30.0/csi-azuredisk-node-windows.yaml @@ -0,0 +1,194 @@ +--- +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: csi-azuredisk-node-win + namespace: kube-system +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + selector: + matchLabels: + app: csi-azuredisk-node-win + template: + metadata: + labels: + app: csi-azuredisk-node-win + spec: + serviceAccountName: csi-azuredisk-node-sa + tolerations: + - key: "node.kubernetes.io/os" + operator: "Exists" + effect: "NoSchedule" + nodeSelector: + kubernetes.io/os: windows + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: type + operator: NotIn + values: + - virtual-kubelet + priorityClassName: system-node-critical + securityContext: + seccompProfile: + type: RuntimeDefault + containers: + - name: liveness-probe + volumeMounts: + - mountPath: C:\csi + name: plugin-dir + image: mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.12.0 + args: + - "--csi-address=$(CSI_ENDPOINT)" + - "--probe-timeout=3s" + - "--health-port=29603" + - "--v=2" + env: + - name: CSI_ENDPOINT + value: unix://C:\\csi\\csi.sock + resources: + limits: + memory: 150Mi + requests: + cpu: 10m + memory: 40Mi + - name: node-driver-registrar + image: mcr.microsoft.com/oss/kubernetes-csi/csi-node-driver-registrar:v2.10.0 + args: + - "--v=2" + - "--csi-address=$(CSI_ENDPOINT)" + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" + livenessProbe: + exec: + command: + - /csi-node-driver-registrar.exe + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --mode=kubelet-registration-probe + initialDelaySeconds: 60 + timeoutSeconds: 30 + env: + - name: CSI_ENDPOINT + value: unix://C:\\csi\\csi.sock + - name: DRIVER_REG_SOCK_PATH + value: C:\\var\\lib\\kubelet\\plugins\\disk.csi.azure.com\\csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - name: kubelet-dir + mountPath: "C:\\var\\lib\\kubelet" + - name: plugin-dir + mountPath: C:\csi + - name: registration-dir + mountPath: C:\registration + resources: + limits: + memory: 150Mi + requests: + cpu: 30m + memory: 40Mi + - name: azuredisk + image: mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.30.0 + imagePullPolicy: IfNotPresent + args: + - "--v=5" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--allow-empty-cloud-config=true" + - "--get-node-info-from-labels=false" + ports: + - containerPort: 29603 + name: healthz + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path-windows + optional: true + - name: CSI_ENDPOINT + value: unix://C:\\csi\\csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + volumeMounts: + - name: kubelet-dir + mountPath: "C:\\var\\lib\\kubelet" + - name: plugin-dir + mountPath: C:\csi + - name: azure-config + mountPath: C:\k + - name: csi-proxy-fs-pipe-v1 + mountPath: \\.\pipe\csi-proxy-filesystem-v1 + - name: csi-proxy-disk-pipe-v1 + mountPath: \\.\pipe\csi-proxy-disk-v1 + - name: csi-proxy-volume-pipe-v1 + mountPath: \\.\pipe\csi-proxy-volume-v1 + # these paths are still included for compatibility, they're used + # only if the node has still the beta version of the CSI proxy + - name: csi-proxy-fs-pipe-v1beta1 + mountPath: \\.\pipe\csi-proxy-filesystem-v1beta1 + - name: csi-proxy-disk-pipe-v1beta2 + mountPath: \\.\pipe\csi-proxy-disk-v1beta2 + - name: csi-proxy-volume-pipe-v1beta2 + mountPath: \\.\pipe\csi-proxy-volume-v1beta2 + resources: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 40Mi + volumes: + - name: csi-proxy-fs-pipe-v1 + hostPath: + path: \\.\pipe\csi-proxy-filesystem-v1 + - name: csi-proxy-disk-pipe-v1 + hostPath: + path: \\.\pipe\csi-proxy-disk-v1 + - name: csi-proxy-volume-pipe-v1 + hostPath: + path: \\.\pipe\csi-proxy-volume-v1 + # these paths are still included for compatibility, they're used + # only if the node has still the beta version of the CSI proxy + - name: csi-proxy-fs-pipe-v1beta1 + hostPath: + path: \\.\pipe\csi-proxy-filesystem-v1beta1 + - name: csi-proxy-disk-pipe-v1beta2 + hostPath: + path: \\.\pipe\csi-proxy-disk-v1beta2 + - name: csi-proxy-volume-pipe-v1beta2 + hostPath: + path: \\.\pipe\csi-proxy-volume-v1beta2 + - name: registration-dir + hostPath: + path: C:\var\lib\kubelet\plugins_registry\ + type: Directory + - name: kubelet-dir + hostPath: + path: C:\var\lib\kubelet\ + type: Directory + - name: plugin-dir + hostPath: + path: C:\var\lib\kubelet\plugins\disk.csi.azure.com\ + type: DirectoryOrCreate + - name: azure-config + hostPath: + path: C:\k + type: DirectoryOrCreate diff --git a/deploy/v1.30.0/csi-azuredisk-node.yaml b/deploy/v1.30.0/csi-azuredisk-node.yaml new file mode 100644 index 000000000..3732fcfc5 --- /dev/null +++ b/deploy/v1.30.0/csi-azuredisk-node.yaml @@ -0,0 +1,174 @@ +--- +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: csi-azuredisk-node + namespace: kube-system +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + selector: + matchLabels: + app: csi-azuredisk-node + template: + metadata: + labels: + app: csi-azuredisk-node + spec: + hostNetwork: true + dnsPolicy: Default + serviceAccountName: csi-azuredisk-node-sa + nodeSelector: + kubernetes.io/os: linux + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: type + operator: NotIn + values: + - virtual-kubelet + priorityClassName: system-node-critical + securityContext: + seccompProfile: + type: RuntimeDefault + tolerations: + - operator: "Exists" + containers: + - name: liveness-probe + volumeMounts: + - mountPath: /csi + name: socket-dir + image: mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.12.0 + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --health-port=29603 + - --v=2 + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + - name: node-driver-registrar + image: mcr.microsoft.com/oss/kubernetes-csi/csi-node-driver-registrar:v2.10.0 + args: + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --v=2 + livenessProbe: + exec: + command: + - /csi-node-driver-registrar + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --mode=kubelet-registration-probe + initialDelaySeconds: 30 + timeoutSeconds: 15 + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/disk.csi.azure.com/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: registration-dir + mountPath: /registration + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + - name: azuredisk + image: mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.30.0 + imagePullPolicy: IfNotPresent + args: + - "--v=5" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--enable-perf-optimization=true" + - "--allow-empty-cloud-config=true" + - "--get-node-info-from-labels=false" + ports: + - containerPort: 29603 + name: healthz + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path + optional: true + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + securityContext: + privileged: true + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /var/lib/kubelet/ + mountPropagation: Bidirectional + name: mountpoint-dir + - mountPath: /etc/kubernetes/ + name: azure-cred + - mountPath: /dev + name: device-dir + - mountPath: /sys/bus/scsi/devices + name: sys-devices-dir + - mountPath: /sys/class/ + name: sys-class + resources: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 20Mi + volumes: + - hostPath: + path: /var/lib/kubelet/plugins/disk.csi.azure.com + type: DirectoryOrCreate + name: socket-dir + - hostPath: + path: /var/lib/kubelet/ + type: DirectoryOrCreate + name: mountpoint-dir + - hostPath: + path: /var/lib/kubelet/plugins_registry/ + type: DirectoryOrCreate + name: registration-dir + - hostPath: + path: /etc/kubernetes/ + type: DirectoryOrCreate + name: azure-cred + - hostPath: + path: /dev + type: Directory + name: device-dir + - hostPath: + path: /sys/bus/scsi/devices + type: Directory + name: sys-devices-dir + - hostPath: + path: /sys/class/ + type: Directory + name: sys-class +--- diff --git a/deploy/v1.30.0/csi-snapshot-controller.yaml b/deploy/v1.30.0/csi-snapshot-controller.yaml new file mode 100644 index 000000000..4fc5c7945 --- /dev/null +++ b/deploy/v1.30.0/csi-snapshot-controller.yaml @@ -0,0 +1,58 @@ +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: csi-snapshot-controller + namespace: kube-system +spec: + replicas: 2 + selector: + matchLabels: + app: csi-snapshot-controller + # the snapshot controller won't be marked as ready if the v1 CRDs are unavailable + # in #504 the snapshot-controller will exit after around 7.5 seconds if it + # can't find the v1 CRDs so this value should be greater than that + minReadySeconds: 15 + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + app: csi-snapshot-controller + spec: + serviceAccountName: csi-snapshot-controller-sa + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + securityContext: + seccompProfile: + type: RuntimeDefault + tolerations: + - key: "node-role.kubernetes.io/master" + operator: "Equal" + value: "true" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/controlplane" + operator: "Equal" + value: "true" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/control-plane" + operator: "Equal" + value: "true" + effect: "NoSchedule" + containers: + - name: csi-snapshot-controller + image: mcr.microsoft.com/oss/kubernetes-csi/snapshot-controller:v6.3.3 + args: + - "--v=2" + - "--leader-election=true" + - "--leader-election-namespace=kube-system" + resources: + limits: + memory: 300Mi + requests: + cpu: 10m + memory: 20Mi diff --git a/deploy/v1.30.0/rbac-csi-azuredisk-controller.yaml b/deploy/v1.30.0/rbac-csi-azuredisk-controller.yaml new file mode 100644 index 000000000..0e59ad15b --- /dev/null +++ b/deploy/v1.30.0/rbac-csi-azuredisk-controller.yaml @@ -0,0 +1,196 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-azuredisk-controller-sa + namespace: kube-system +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-external-provisioner-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["get", "list"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-csi-provisioner-binding +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: azuredisk-external-provisioner-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-external-attacher-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["csi.storage.k8s.io"] + resources: ["csinodeinfos"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments/status"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-csi-attacher-binding +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: azuredisk-external-attacher-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-external-snapshotter-role +rules: + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-csi-snapshotter-binding +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: azuredisk-external-snapshotter-role + apiGroup: rbac.authorization.k8s.io +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-external-resizer-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-csi-resizer-role +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: azuredisk-external-resizer-role + apiGroup: rbac.authorization.k8s.io + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-azuredisk-controller-secret-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-azuredisk-controller-secret-binding +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-azuredisk-controller-secret-role + apiGroup: rbac.authorization.k8s.io diff --git a/deploy/v1.30.0/rbac-csi-azuredisk-node.yaml b/deploy/v1.30.0/rbac-csi-azuredisk-node.yaml new file mode 100644 index 000000000..c77181ee1 --- /dev/null +++ b/deploy/v1.30.0/rbac-csi-azuredisk-node.yaml @@ -0,0 +1,33 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-azuredisk-node-sa + namespace: kube-system + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-azuredisk-node-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-azuredisk-node-secret-binding +subjects: + - kind: ServiceAccount + name: csi-azuredisk-node-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-azuredisk-node-role + apiGroup: rbac.authorization.k8s.io diff --git a/deploy/v1.30.0/rbac-csi-snapshot-controller.yaml b/deploy/v1.30.0/rbac-csi-snapshot-controller.yaml new file mode 100644 index 000000000..03af76542 --- /dev/null +++ b/deploy/v1.30.0/rbac-csi-snapshot-controller.yaml @@ -0,0 +1,78 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-snapshot-controller-sa + namespace: kube-system + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots/status"] + verbs: ["update", "patch"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-binding +subjects: + - kind: ServiceAccount + name: csi-snapshot-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-snapshot-controller-role + apiGroup: rbac.authorization.k8s.io + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-leaderelection-role +rules: + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-leaderelection-binding +subjects: + - kind: ServiceAccount + name: csi-snapshot-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-snapshot-controller-leaderelection-role + apiGroup: rbac.authorization.k8s.io diff --git a/deploy/v1.30.1/crd-csi-snapshot.yaml b/deploy/v1.30.1/crd-csi-snapshot.yaml new file mode 100644 index 000000000..d4b90b266 --- /dev/null +++ b/deploy/v1.30.1/crd-csi-snapshot.yaml @@ -0,0 +1,838 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/665" + creationTimestamp: null + name: volumesnapshots.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshot + listKind: VolumeSnapshotList + plural: volumesnapshots + shortNames: + - vs + singular: volumesnapshot + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: If a new snapshot needs to be created, this contains the name of + the source PVC from which this snapshot was (or will be) created. + jsonPath: .spec.source.persistentVolumeClaimName + name: SourcePVC + type: string + - description: If a snapshot already exists, this contains the name of the existing + VolumeSnapshotContent object representing the existing snapshot. + jsonPath: .spec.source.volumeSnapshotContentName + name: SourceSnapshotContent + type: string + - description: Represents the minimum size of volume required to rehydrate from + this snapshot. + jsonPath: .status.restoreSize + name: RestoreSize + type: string + - description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot. + jsonPath: .spec.volumeSnapshotClassName + name: SnapshotClass + type: string + - description: Name of the VolumeSnapshotContent object to which the VolumeSnapshot + object intends to bind to. Please note that verification of binding actually + requires checking both VolumeSnapshot and VolumeSnapshotContent to ensure + both are pointing at each other. Binding MUST be verified prior to usage of + this object. + jsonPath: .status.boundVolumeSnapshotContentName + name: SnapshotContent + type: string + - description: Timestamp when the point-in-time snapshot was taken by the underlying + storage system. + jsonPath: .status.creationTime + name: CreationTime + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshot is a user's request for either creating a point-in-time + snapshot of a persistent volume, or binding to a pre-existing snapshot. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: 'spec defines the desired characteristics of a snapshot requested + by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots + Required.' + properties: + source: + description: source specifies where a snapshot will be created from. + This field is immutable after creation. Required. + properties: + persistentVolumeClaimName: + description: persistentVolumeClaimName specifies the name of the + PersistentVolumeClaim object representing the volume from which + a snapshot should be created. This PVC is assumed to be in the + same namespace as the VolumeSnapshot object. This field should + be set if the snapshot does not exists, and needs to be created. + This field is immutable. + type: string + volumeSnapshotContentName: + description: volumeSnapshotContentName specifies the name of a + pre-existing VolumeSnapshotContent object representing an existing + volume snapshot. This field should be set if the snapshot already + exists and only needs a representation in Kubernetes. This field + is immutable. + type: string + type: object + oneOf: + - required: ["persistentVolumeClaimName"] + - required: ["volumeSnapshotContentName"] + volumeSnapshotClassName: + description: 'VolumeSnapshotClassName is the name of the VolumeSnapshotClass + requested by the VolumeSnapshot. VolumeSnapshotClassName may be + left nil to indicate that the default SnapshotClass should be used. + A given cluster may have multiple default Volume SnapshotClasses: + one default per CSI Driver. If a VolumeSnapshot does not specify + a SnapshotClass, VolumeSnapshotSource will be checked to figure + out what the associated CSI Driver is, and the default VolumeSnapshotClass + associated with that CSI Driver will be used. If more than one VolumeSnapshotClass + exist for a given CSI Driver and more than one have been marked + as default, CreateSnapshot will fail and generate an event. Empty + string is not allowed for this field.' + type: string + required: + - source + type: object + status: + description: status represents the current information of a snapshot. + Consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent + objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent + point at each other) before using this object. + properties: + boundVolumeSnapshotContentName: + description: 'boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent + object to which this VolumeSnapshot object intends to bind to. If + not specified, it indicates that the VolumeSnapshot object has not + been successfully bound to a VolumeSnapshotContent object yet. NOTE: + To avoid possible security issues, consumers must verify binding + between VolumeSnapshot and VolumeSnapshotContent objects is successful + (by validating that both VolumeSnapshot and VolumeSnapshotContent + point at each other) before using this object.' + type: string + creationTime: + description: creationTime is the timestamp when the point-in-time + snapshot is taken by the underlying storage system. In dynamic snapshot + creation case, this field will be filled in by the snapshot controller + with the "creation_time" value returned from CSI "CreateSnapshot" + gRPC call. For a pre-existing snapshot, this field will be filled + with the "creation_time" value returned from the CSI "ListSnapshots" + gRPC call if the driver supports it. If not specified, it may indicate + that the creation time of the snapshot is unknown. + format: date-time + type: string + error: + description: error is the last observed error during snapshot creation, + if any. This field could be helpful to upper level controllers(i.e., + application controller) to decide whether they should continue on + waiting for the snapshot to be created based on the type of error + reported. The snapshot controller will keep retrying when an error + occurs during the snapshot creation. Upon success, this error field + will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error + during snapshot creation if specified. NOTE: message may be + logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if the snapshot is ready to be used + to restore a volume. In dynamic snapshot creation case, this field + will be filled in by the snapshot controller with the "ready_to_use" + value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing + snapshot, this field will be filled with the "ready_to_use" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it, otherwise, this field will be set to "True". If not specified, + it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + type: string + description: restoreSize represents the minimum size of volume required + to create a volume from this snapshot. In dynamic snapshot creation + case, this field will be filled in by the snapshot controller with + the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. + For a pre-existing snapshot, this field will be filled with the + "size_bytes" value returned from the CSI "ListSnapshots" gRPC call + if the driver supports it. When restoring a volume from this snapshot, + the size of the volume MUST NOT be smaller than the restoreSize + if it is specified, otherwise the restoration will fail. If not + specified, it indicates that the size is unknown. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: If a new snapshot needs to be created, this contains the name of the source PVC from which this snapshot was (or will be) created. + jsonPath: .spec.source.persistentVolumeClaimName + name: SourcePVC + type: string + - description: If a snapshot already exists, this contains the name of the existing VolumeSnapshotContent object representing the existing snapshot. + jsonPath: .spec.source.volumeSnapshotContentName + name: SourceSnapshotContent + type: string + - description: Represents the minimum size of volume required to rehydrate from this snapshot. + jsonPath: .status.restoreSize + name: RestoreSize + type: string + - description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot. + jsonPath: .spec.volumeSnapshotClassName + name: SnapshotClass + type: string + - description: Name of the VolumeSnapshotContent object to which the VolumeSnapshot object intends to bind to. Please note that verification of binding actually requires checking both VolumeSnapshot and VolumeSnapshotContent to ensure both are pointing at each other. Binding MUST be verified prior to usage of this object. + jsonPath: .status.boundVolumeSnapshotContentName + name: SnapshotContent + type: string + - description: Timestamp when the point-in-time snapshot was taken by the underlying storage system. + jsonPath: .status.creationTime + name: CreationTime + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshot is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshot" + schema: + openAPIV3Schema: + description: VolumeSnapshot is a user's request for either creating a point-in-time snapshot of a persistent volume, or binding to a pre-existing snapshot. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: 'spec defines the desired characteristics of a snapshot requested by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots Required.' + properties: + source: + description: source specifies where a snapshot will be created from. This field is immutable after creation. Required. + properties: + persistentVolumeClaimName: + description: persistentVolumeClaimName specifies the name of the PersistentVolumeClaim object representing the volume from which a snapshot should be created. This PVC is assumed to be in the same namespace as the VolumeSnapshot object. This field should be set if the snapshot does not exists, and needs to be created. This field is immutable. + type: string + volumeSnapshotContentName: + description: volumeSnapshotContentName specifies the name of a pre-existing VolumeSnapshotContent object representing an existing volume snapshot. This field should be set if the snapshot already exists and only needs a representation in Kubernetes. This field is immutable. + type: string + type: object + volumeSnapshotClassName: + description: 'VolumeSnapshotClassName is the name of the VolumeSnapshotClass requested by the VolumeSnapshot. VolumeSnapshotClassName may be left nil to indicate that the default SnapshotClass should be used. A given cluster may have multiple default Volume SnapshotClasses: one default per CSI Driver. If a VolumeSnapshot does not specify a SnapshotClass, VolumeSnapshotSource will be checked to figure out what the associated CSI Driver is, and the default VolumeSnapshotClass associated with that CSI Driver will be used. If more than one VolumeSnapshotClass exist for a given CSI Driver and more than one have been marked as default, CreateSnapshot will fail and generate an event. Empty string is not allowed for this field.' + type: string + required: + - source + type: object + status: + description: status represents the current information of a snapshot. Consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object. + properties: + boundVolumeSnapshotContentName: + description: 'boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent object to which this VolumeSnapshot object intends to bind to. If not specified, it indicates that the VolumeSnapshot object has not been successfully bound to a VolumeSnapshotContent object yet. NOTE: To avoid possible security issues, consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object.' + type: string + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it may indicate that the creation time of the snapshot is unknown. + format: date-time + type: string + error: + description: error is the last observed error during snapshot creation, if any. This field could be helpful to upper level controllers(i.e., application controller) to decide whether they should continue on waiting for the snapshot to be created based on the type of error reported. The snapshot controller will keep retrying when an error occurs during the snapshot creation. Upon success, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if the snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + type: string + description: restoreSize represents the minimum size of volume required to create a volume from this snapshot. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + required: + - spec + type: object + served: false + storage: false + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/665" + creationTimestamp: null + name: volumesnapshotclasses.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotClass + listKind: VolumeSnapshotClassList + plural: volumesnapshotclasses + shortNames: + - vsclass + - vsclasses + singular: volumesnapshotclass + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .driver + name: Driver + type: string + - description: Determines whether a VolumeSnapshotContent created through the + VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .deletionPolicy + name: DeletionPolicy + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshotClass specifies parameters that a underlying storage + system uses when creating a volume snapshot. A specific VolumeSnapshotClass + is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses + are non-namespaced + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + deletionPolicy: + description: deletionPolicy determines whether a VolumeSnapshotContent + created through the VolumeSnapshotClass should be deleted when its bound + VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". + "Retain" means that the VolumeSnapshotContent and its physical snapshot + on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent + and its physical snapshot on underlying storage system are deleted. + Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the storage driver that handles this + VolumeSnapshotClass. Required. + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + parameters: + additionalProperties: + type: string + description: parameters is a key-value map with storage driver specific + parameters for creating snapshots. These values are opaque to Kubernetes. + type: object + required: + - deletionPolicy + - driver + type: object + served: true + storage: true + subresources: {} + - additionalPrinterColumns: + - jsonPath: .driver + name: Driver + type: string + - description: Determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .deletionPolicy + name: DeletionPolicy + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshotClass is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshotClass" + schema: + openAPIV3Schema: + description: VolumeSnapshotClass specifies parameters that a underlying storage system uses when creating a volume snapshot. A specific VolumeSnapshotClass is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses are non-namespaced + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + deletionPolicy: + description: deletionPolicy determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the storage driver that handles this VolumeSnapshotClass. Required. + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + parameters: + additionalProperties: + type: string + description: parameters is a key-value map with storage driver specific parameters for creating snapshots. These values are opaque to Kubernetes. + type: object + required: + - deletionPolicy + - driver + type: object + served: false + storage: false + subresources: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/665" + creationTimestamp: null + name: volumesnapshotcontents.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotContent + listKind: VolumeSnapshotContentList + plural: volumesnapshotcontents + shortNames: + - vsc + - vscs + singular: volumesnapshotcontent + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: Represents the complete size of the snapshot in bytes + jsonPath: .status.restoreSize + name: RestoreSize + type: integer + - description: Determines whether this VolumeSnapshotContent and its physical + snapshot on the underlying storage system should be deleted when its bound + VolumeSnapshot is deleted. + jsonPath: .spec.deletionPolicy + name: DeletionPolicy + type: string + - description: Name of the CSI driver used to create the physical snapshot on + the underlying storage system. + jsonPath: .spec.driver + name: Driver + type: string + - description: Name of the VolumeSnapshotClass to which this snapshot belongs. + jsonPath: .spec.volumeSnapshotClassName + name: VolumeSnapshotClass + type: string + - description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent + object is bound. + jsonPath: .spec.volumeSnapshotRef.name + name: VolumeSnapshot + type: string + - description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.namespace + name: VolumeSnapshotNamespace + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshotContent represents the actual "on-disk" snapshot + object in the underlying storage system + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: spec defines properties of a VolumeSnapshotContent created + by the underlying storage system. Required. + properties: + deletionPolicy: + description: deletionPolicy determines whether this VolumeSnapshotContent + and its physical snapshot on the underlying storage system should + be deleted when its bound VolumeSnapshot is deleted. Supported values + are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent + and its physical snapshot on underlying storage system are kept. + "Delete" means that the VolumeSnapshotContent and its physical snapshot + on underlying storage system are deleted. For dynamically provisioned + snapshots, this field will automatically be filled in by the CSI + snapshotter sidecar with the "DeletionPolicy" field defined in the + corresponding VolumeSnapshotClass. For pre-existing snapshots, users + MUST specify this field when creating the VolumeSnapshotContent + object. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the CSI driver used to create the + physical snapshot on the underlying storage system. This MUST be + the same as the name returned by the CSI GetPluginName() call for + that driver. Required. + type: string + source: + description: source specifies whether the snapshot is (or should be) + dynamically provisioned or already exists, and just requires a Kubernetes + object representation. This field is immutable after creation. Required. + properties: + snapshotHandle: + description: snapshotHandle specifies the CSI "snapshot_id" of + a pre-existing snapshot on the underlying storage system for + which a Kubernetes object representation was (or should be) + created. This field is immutable. + type: string + volumeHandle: + description: volumeHandle specifies the CSI "volume_id" of the + volume from which a snapshot should be dynamically taken from. + This field is immutable. + type: string + type: object + oneOf: + - required: ["snapshotHandle"] + - required: ["volumeHandle"] + sourceVolumeMode: + description: SourceVolumeMode is the mode of the volume whose snapshot + is taken. Can be either “Filesystem” or “Block”. If not specified, + it indicates the source volume's mode is unknown. This field is + immutable. This field is an alpha field. + type: string + volumeSnapshotClassName: + description: name of the VolumeSnapshotClass from which this snapshot + was (or will be) created. Note that after provisioning, the VolumeSnapshotClass + may be deleted or recreated with different set of values, and as + such, should not be referenced post-snapshot creation. + type: string + volumeSnapshotRef: + description: volumeSnapshotRef specifies the VolumeSnapshot object + to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName + field must reference to this VolumeSnapshotContent's name for the + bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent + object, name and namespace of the VolumeSnapshot object MUST be + provided for binding to happen. This field is immutable after creation. + Required. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + required: + - deletionPolicy + - driver + - source + - volumeSnapshotRef + type: object + status: + description: status represents the current information of a snapshot. + properties: + creationTime: + description: creationTime is the timestamp when the point-in-time + snapshot is taken by the underlying storage system. In dynamic snapshot + creation case, this field will be filled in by the CSI snapshotter + sidecar with the "creation_time" value returned from CSI "CreateSnapshot" + gRPC call. For a pre-existing snapshot, this field will be filled + with the "creation_time" value returned from the CSI "ListSnapshots" + gRPC call if the driver supports it. If not specified, it indicates + the creation time is unknown. The format of this field is a Unix + nanoseconds time encoded as an int64. On Unix, the command `date + +%s%N` returns the current time in nanoseconds since 1970-01-01 + 00:00:00 UTC. + format: int64 + type: integer + error: + description: error is the last observed error during snapshot creation, + if any. Upon success after retry, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error + during snapshot creation if specified. NOTE: message may be + logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used + to restore a volume. In dynamic snapshot creation case, this field + will be filled in by the CSI snapshotter sidecar with the "ready_to_use" + value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing + snapshot, this field will be filled with the "ready_to_use" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it, otherwise, this field will be set to "True". If not specified, + it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + description: restoreSize represents the complete size of the snapshot + in bytes. In dynamic snapshot creation case, this field will be + filled in by the CSI snapshotter sidecar with the "size_bytes" value + returned from CSI "CreateSnapshot" gRPC call. For a pre-existing + snapshot, this field will be filled with the "size_bytes" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it. When restoring a volume from this snapshot, the size of the + volume MUST NOT be smaller than the restoreSize if it is specified, + otherwise the restoration will fail. If not specified, it indicates + that the size is unknown. + format: int64 + minimum: 0 + type: integer + snapshotHandle: + description: snapshotHandle is the CSI "snapshot_id" of a snapshot + on the underlying storage system. If not specified, it indicates + that dynamic snapshot creation has either failed or it is still + in progress. + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: Represents the complete size of the snapshot in bytes + jsonPath: .status.restoreSize + name: RestoreSize + type: integer + - description: Determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .spec.deletionPolicy + name: DeletionPolicy + type: string + - description: Name of the CSI driver used to create the physical snapshot on the underlying storage system. + jsonPath: .spec.driver + name: Driver + type: string + - description: Name of the VolumeSnapshotClass to which this snapshot belongs. + jsonPath: .spec.volumeSnapshotClassName + name: VolumeSnapshotClass + type: string + - description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.name + name: VolumeSnapshot + type: string + - description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.namespace + name: VolumeSnapshotNamespace + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshotContent is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshotContent" + schema: + openAPIV3Schema: + description: VolumeSnapshotContent represents the actual "on-disk" snapshot object in the underlying storage system + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: spec defines properties of a VolumeSnapshotContent created by the underlying storage system. Required. + properties: + deletionPolicy: + description: deletionPolicy determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. For dynamically provisioned snapshots, this field will automatically be filled in by the CSI snapshotter sidecar with the "DeletionPolicy" field defined in the corresponding VolumeSnapshotClass. For pre-existing snapshots, users MUST specify this field when creating the VolumeSnapshotContent object. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the CSI driver used to create the physical snapshot on the underlying storage system. This MUST be the same as the name returned by the CSI GetPluginName() call for that driver. Required. + type: string + source: + description: source specifies whether the snapshot is (or should be) dynamically provisioned or already exists, and just requires a Kubernetes object representation. This field is immutable after creation. Required. + properties: + snapshotHandle: + description: snapshotHandle specifies the CSI "snapshot_id" of a pre-existing snapshot on the underlying storage system for which a Kubernetes object representation was (or should be) created. This field is immutable. + type: string + volumeHandle: + description: volumeHandle specifies the CSI "volume_id" of the volume from which a snapshot should be dynamically taken from. This field is immutable. + type: string + type: object + volumeSnapshotClassName: + description: name of the VolumeSnapshotClass from which this snapshot was (or will be) created. Note that after provisioning, the VolumeSnapshotClass may be deleted or recreated with different set of values, and as such, should not be referenced post-snapshot creation. + type: string + volumeSnapshotRef: + description: volumeSnapshotRef specifies the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName field must reference to this VolumeSnapshotContent's name for the bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent object, name and namespace of the VolumeSnapshot object MUST be provided for binding to happen. This field is immutable after creation. Required. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + required: + - deletionPolicy + - driver + - source + - volumeSnapshotRef + type: object + status: + description: status represents the current information of a snapshot. + properties: + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it indicates the creation time is unknown. The format of this field is a Unix nanoseconds time encoded as an int64. On Unix, the command `date +%s%N` returns the current time in nanoseconds since 1970-01-01 00:00:00 UTC. + format: int64 + type: integer + error: + description: error is the last observed error during snapshot creation, if any. Upon success after retry, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + description: restoreSize represents the complete size of the snapshot in bytes. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown. + format: int64 + minimum: 0 + type: integer + snapshotHandle: + description: snapshotHandle is the CSI "snapshot_id" of a snapshot on the underlying storage system. If not specified, it indicates that dynamic snapshot creation has either failed or it is still in progress. + type: string + type: object + required: + - spec + type: object + served: false + storage: false + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/deploy/v1.30.1/csi-azuredisk-controller.yaml b/deploy/v1.30.1/csi-azuredisk-controller.yaml new file mode 100644 index 000000000..c07dc710d --- /dev/null +++ b/deploy/v1.30.1/csi-azuredisk-controller.yaml @@ -0,0 +1,192 @@ +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: csi-azuredisk-controller + namespace: kube-system +spec: + replicas: 2 + selector: + matchLabels: + app: csi-azuredisk-controller + template: + metadata: + labels: + app: csi-azuredisk-controller + spec: + hostNetwork: true + serviceAccountName: csi-azuredisk-controller-sa + nodeSelector: + kubernetes.io/os: linux # add "kubernetes.io/role: master" to run controller on master node + priorityClassName: system-cluster-critical + securityContext: + seccompProfile: + type: RuntimeDefault + tolerations: + - key: "node-role.kubernetes.io/master" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/controlplane" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/control-plane" + operator: "Exists" + effect: "NoSchedule" + containers: + - name: csi-provisioner + image: mcr.microsoft.com/oss/kubernetes-csi/csi-provisioner:v4.0.0 + args: + - "--feature-gates=Topology=true,HonorPVReclaimPolicy=true" + - "--csi-address=$(ADDRESS)" + - "--v=2" + - "--timeout=30s" + - "--leader-election" + - "--leader-election-namespace=kube-system" + - "--worker-threads=100" + - "--extra-create-metadata=true" + - "--strict-topology=true" + - "--kube-api-qps=50" + - "--kube-api-burst=100" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + resources: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + - name: csi-attacher + image: mcr.microsoft.com/oss/kubernetes-csi/csi-attacher:v4.5.0 + args: + - "-v=2" + - "-csi-address=$(ADDRESS)" + - "-timeout=1200s" + - "-leader-election" + - "--leader-election-namespace=kube-system" + - "-worker-threads=1000" + - "-kube-api-qps=200" + - "-kube-api-burst=400" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + resources: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + - name: csi-snapshotter + image: mcr.microsoft.com/oss/kubernetes-csi/csi-snapshotter:v6.3.3 + args: + - "-csi-address=$(ADDRESS)" + - "-leader-election" + - "--leader-election-namespace=kube-system" + - "--v=2" + - "--timeout=1200s" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 20Mi + - name: csi-resizer + image: mcr.microsoft.com/oss/kubernetes-csi/csi-resizer:v1.9.3 + args: + - "-csi-address=$(ADDRESS)" + - "-v=2" + - "-leader-election" + - "--leader-election-namespace=kube-system" + - '-handle-volume-inuse-error=false' + - '-feature-gates=RecoverVolumeExpansionFailure=true' + - "-timeout=240s" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + - name: liveness-probe + image: mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.12.0 + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --http-endpoint=localhost:29602 + - --v=2 + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + - name: azuredisk + image: mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.30.1 + imagePullPolicy: IfNotPresent + args: + - "--v=5" + - "--endpoint=$(CSI_ENDPOINT)" + - "--metrics-address=0.0.0.0:29604" + - "--user-agent-suffix=OSS-kubectl" + - "--disable-avset-nodes=false" + - "--allow-empty-cloud-config=false" + ports: + - containerPort: 29604 + name: metrics + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + host: localhost + path: /healthz + port: 29602 + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path + optional: true + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /etc/kubernetes/ + name: azure-cred + resources: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + volumes: + - name: socket-dir + emptyDir: {} + - name: azure-cred + hostPath: + path: /etc/kubernetes/ + type: DirectoryOrCreate diff --git a/deploy/v1.30.1/csi-azuredisk-driver.yaml b/deploy/v1.30.1/csi-azuredisk-driver.yaml new file mode 100644 index 000000000..f2f10fdaa --- /dev/null +++ b/deploy/v1.30.1/csi-azuredisk-driver.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: disk.csi.azure.com + annotations: + csiDriver: v1.30.0 + snapshot: v6.2.1 +spec: + attachRequired: true + podInfoOnMount: false + fsGroupPolicy: File diff --git a/deploy/v1.30.1/csi-azuredisk-node-windows-hostprocess.yaml b/deploy/v1.30.1/csi-azuredisk-node-windows-hostprocess.yaml new file mode 100644 index 000000000..105103ef5 --- /dev/null +++ b/deploy/v1.30.1/csi-azuredisk-node-windows-hostprocess.yaml @@ -0,0 +1,108 @@ +--- +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: csi-azuredisk-node-win + namespace: kube-system +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + selector: + matchLabels: + app: csi-azuredisk-node-win + template: + metadata: + labels: + app: csi-azuredisk-node-win + spec: + serviceAccountName: csi-azuredisk-node-sa + tolerations: + - key: "node.kubernetes.io/os" + operator: "Exists" + effect: "NoSchedule" + nodeSelector: + kubernetes.io/os: windows + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: type + operator: NotIn + values: + - virtual-kubelet + priorityClassName: system-node-critical + securityContext: + windowsOptions: + hostProcess: true + runAsUserName: "NT AUTHORITY\\SYSTEM" + hostNetwork: true + initContainers: + - name: init + image: mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.30.1-windows-hp + imagePullPolicy: IfNotPresent + command: + - "powershell.exe" + - "-c" + - "New-Item -ItemType Directory -Path C:\\var\\lib\\kubelet\\plugins\\disk.csi.azure.com\\ -Force" + containers: + - name: node-driver-registrar + image: mcr.microsoft.com/oss/kubernetes-csi/csi-node-driver-registrar:v2.10.0 + imagePullPolicy: IfNotPresent + command: + - "csi-node-driver-registrar.exe" + args: + - "--v=2" + - "--csi-address=$(CSI_ENDPOINT)" + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" + - "--plugin-registration-path=$(PLUGIN_REG_DIR)" + env: + - name: CSI_ENDPOINT + value: unix://C:\\var\\lib\\kubelet\\plugins\\disk.csi.azure.com\\csi.sock + - name: DRIVER_REG_SOCK_PATH + value: C:\\var\\lib\\kubelet\\plugins\\disk.csi.azure.com\\csi.sock + - name: PLUGIN_REG_DIR + value: C:\\var\\lib\\kubelet\\plugins_registry\\ + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + resources: + limits: + memory: 150Mi + requests: + cpu: 30m + memory: 40Mi + - name: azuredisk + image: mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.30.1-windows-hp + imagePullPolicy: IfNotPresent + command: + - "azurediskplugin.exe" + args: + - --v=5 + - --endpoint=$(CSI_ENDPOINT) + - --nodeid=$(KUBE_NODE_NAME) + - --allow-empty-cloud-config=true + - --enable-windows-host-process=true + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path-windows + optional: true + - name: CSI_ENDPOINT + value: unix://C:\\var\\lib\\kubelet\\plugins\\disk.csi.azure.com\\csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + resources: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 40Mi diff --git a/deploy/v1.30.1/csi-azuredisk-node-windows.yaml b/deploy/v1.30.1/csi-azuredisk-node-windows.yaml new file mode 100644 index 000000000..f63a16160 --- /dev/null +++ b/deploy/v1.30.1/csi-azuredisk-node-windows.yaml @@ -0,0 +1,194 @@ +--- +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: csi-azuredisk-node-win + namespace: kube-system +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + selector: + matchLabels: + app: csi-azuredisk-node-win + template: + metadata: + labels: + app: csi-azuredisk-node-win + spec: + serviceAccountName: csi-azuredisk-node-sa + tolerations: + - key: "node.kubernetes.io/os" + operator: "Exists" + effect: "NoSchedule" + nodeSelector: + kubernetes.io/os: windows + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: type + operator: NotIn + values: + - virtual-kubelet + priorityClassName: system-node-critical + securityContext: + seccompProfile: + type: RuntimeDefault + containers: + - name: liveness-probe + volumeMounts: + - mountPath: C:\csi + name: plugin-dir + image: mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.12.0 + args: + - "--csi-address=$(CSI_ENDPOINT)" + - "--probe-timeout=3s" + - "--health-port=29603" + - "--v=2" + env: + - name: CSI_ENDPOINT + value: unix://C:\\csi\\csi.sock + resources: + limits: + memory: 150Mi + requests: + cpu: 10m + memory: 40Mi + - name: node-driver-registrar + image: mcr.microsoft.com/oss/kubernetes-csi/csi-node-driver-registrar:v2.10.0 + args: + - "--v=2" + - "--csi-address=$(CSI_ENDPOINT)" + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" + livenessProbe: + exec: + command: + - /csi-node-driver-registrar.exe + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --mode=kubelet-registration-probe + initialDelaySeconds: 60 + timeoutSeconds: 30 + env: + - name: CSI_ENDPOINT + value: unix://C:\\csi\\csi.sock + - name: DRIVER_REG_SOCK_PATH + value: C:\\var\\lib\\kubelet\\plugins\\disk.csi.azure.com\\csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - name: kubelet-dir + mountPath: "C:\\var\\lib\\kubelet" + - name: plugin-dir + mountPath: C:\csi + - name: registration-dir + mountPath: C:\registration + resources: + limits: + memory: 150Mi + requests: + cpu: 30m + memory: 40Mi + - name: azuredisk + image: mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.30.1 + imagePullPolicy: IfNotPresent + args: + - "--v=5" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--allow-empty-cloud-config=true" + - "--get-node-info-from-labels=false" + ports: + - containerPort: 29603 + name: healthz + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path-windows + optional: true + - name: CSI_ENDPOINT + value: unix://C:\\csi\\csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + volumeMounts: + - name: kubelet-dir + mountPath: "C:\\var\\lib\\kubelet" + - name: plugin-dir + mountPath: C:\csi + - name: azure-config + mountPath: C:\k + - name: csi-proxy-fs-pipe-v1 + mountPath: \\.\pipe\csi-proxy-filesystem-v1 + - name: csi-proxy-disk-pipe-v1 + mountPath: \\.\pipe\csi-proxy-disk-v1 + - name: csi-proxy-volume-pipe-v1 + mountPath: \\.\pipe\csi-proxy-volume-v1 + # these paths are still included for compatibility, they're used + # only if the node has still the beta version of the CSI proxy + - name: csi-proxy-fs-pipe-v1beta1 + mountPath: \\.\pipe\csi-proxy-filesystem-v1beta1 + - name: csi-proxy-disk-pipe-v1beta2 + mountPath: \\.\pipe\csi-proxy-disk-v1beta2 + - name: csi-proxy-volume-pipe-v1beta2 + mountPath: \\.\pipe\csi-proxy-volume-v1beta2 + resources: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 40Mi + volumes: + - name: csi-proxy-fs-pipe-v1 + hostPath: + path: \\.\pipe\csi-proxy-filesystem-v1 + - name: csi-proxy-disk-pipe-v1 + hostPath: + path: \\.\pipe\csi-proxy-disk-v1 + - name: csi-proxy-volume-pipe-v1 + hostPath: + path: \\.\pipe\csi-proxy-volume-v1 + # these paths are still included for compatibility, they're used + # only if the node has still the beta version of the CSI proxy + - name: csi-proxy-fs-pipe-v1beta1 + hostPath: + path: \\.\pipe\csi-proxy-filesystem-v1beta1 + - name: csi-proxy-disk-pipe-v1beta2 + hostPath: + path: \\.\pipe\csi-proxy-disk-v1beta2 + - name: csi-proxy-volume-pipe-v1beta2 + hostPath: + path: \\.\pipe\csi-proxy-volume-v1beta2 + - name: registration-dir + hostPath: + path: C:\var\lib\kubelet\plugins_registry\ + type: Directory + - name: kubelet-dir + hostPath: + path: C:\var\lib\kubelet\ + type: Directory + - name: plugin-dir + hostPath: + path: C:\var\lib\kubelet\plugins\disk.csi.azure.com\ + type: DirectoryOrCreate + - name: azure-config + hostPath: + path: C:\k + type: DirectoryOrCreate diff --git a/deploy/v1.30.1/csi-azuredisk-node.yaml b/deploy/v1.30.1/csi-azuredisk-node.yaml new file mode 100644 index 000000000..b34f09296 --- /dev/null +++ b/deploy/v1.30.1/csi-azuredisk-node.yaml @@ -0,0 +1,171 @@ +--- +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: csi-azuredisk-node + namespace: kube-system +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + selector: + matchLabels: + app: csi-azuredisk-node + template: + metadata: + labels: + app: csi-azuredisk-node + spec: + hostNetwork: true + dnsPolicy: Default + serviceAccountName: csi-azuredisk-node-sa + nodeSelector: + kubernetes.io/os: linux + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: type + operator: NotIn + values: + - virtual-kubelet + priorityClassName: system-node-critical + securityContext: + seccompProfile: + type: RuntimeDefault + tolerations: + - operator: "Exists" + containers: + - name: liveness-probe + volumeMounts: + - mountPath: /csi + name: socket-dir + image: mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.12.0 + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --http-endpoint=localhost:29603 + - --v=2 + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + - name: node-driver-registrar + image: mcr.microsoft.com/oss/kubernetes-csi/csi-node-driver-registrar:v2.10.0 + args: + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --v=2 + livenessProbe: + exec: + command: + - /csi-node-driver-registrar + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --mode=kubelet-registration-probe + initialDelaySeconds: 30 + timeoutSeconds: 15 + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/disk.csi.azure.com/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: registration-dir + mountPath: /registration + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + - name: azuredisk + image: mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.30.1 + imagePullPolicy: IfNotPresent + args: + - "--v=5" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--enable-perf-optimization=true" + - "--allow-empty-cloud-config=true" + - "--get-node-info-from-labels=false" + livenessProbe: + failureThreshold: 5 + httpGet: + host: localhost + path: /healthz + port: 29603 + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path + optional: true + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + securityContext: + privileged: true + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /var/lib/kubelet/ + mountPropagation: Bidirectional + name: mountpoint-dir + - mountPath: /etc/kubernetes/ + name: azure-cred + - mountPath: /dev + name: device-dir + - mountPath: /sys/bus/scsi/devices + name: sys-devices-dir + - mountPath: /sys/class/ + name: sys-class + resources: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 20Mi + volumes: + - hostPath: + path: /var/lib/kubelet/plugins/disk.csi.azure.com + type: DirectoryOrCreate + name: socket-dir + - hostPath: + path: /var/lib/kubelet/ + type: DirectoryOrCreate + name: mountpoint-dir + - hostPath: + path: /var/lib/kubelet/plugins_registry/ + type: DirectoryOrCreate + name: registration-dir + - hostPath: + path: /etc/kubernetes/ + type: DirectoryOrCreate + name: azure-cred + - hostPath: + path: /dev + type: Directory + name: device-dir + - hostPath: + path: /sys/bus/scsi/devices + type: Directory + name: sys-devices-dir + - hostPath: + path: /sys/class/ + type: Directory + name: sys-class +--- diff --git a/deploy/v1.30.1/csi-snapshot-controller.yaml b/deploy/v1.30.1/csi-snapshot-controller.yaml new file mode 100644 index 000000000..4fc5c7945 --- /dev/null +++ b/deploy/v1.30.1/csi-snapshot-controller.yaml @@ -0,0 +1,58 @@ +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: csi-snapshot-controller + namespace: kube-system +spec: + replicas: 2 + selector: + matchLabels: + app: csi-snapshot-controller + # the snapshot controller won't be marked as ready if the v1 CRDs are unavailable + # in #504 the snapshot-controller will exit after around 7.5 seconds if it + # can't find the v1 CRDs so this value should be greater than that + minReadySeconds: 15 + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + app: csi-snapshot-controller + spec: + serviceAccountName: csi-snapshot-controller-sa + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + securityContext: + seccompProfile: + type: RuntimeDefault + tolerations: + - key: "node-role.kubernetes.io/master" + operator: "Equal" + value: "true" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/controlplane" + operator: "Equal" + value: "true" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/control-plane" + operator: "Equal" + value: "true" + effect: "NoSchedule" + containers: + - name: csi-snapshot-controller + image: mcr.microsoft.com/oss/kubernetes-csi/snapshot-controller:v6.3.3 + args: + - "--v=2" + - "--leader-election=true" + - "--leader-election-namespace=kube-system" + resources: + limits: + memory: 300Mi + requests: + cpu: 10m + memory: 20Mi diff --git a/deploy/v1.30.1/rbac-csi-azuredisk-controller.yaml b/deploy/v1.30.1/rbac-csi-azuredisk-controller.yaml new file mode 100644 index 000000000..0e59ad15b --- /dev/null +++ b/deploy/v1.30.1/rbac-csi-azuredisk-controller.yaml @@ -0,0 +1,196 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-azuredisk-controller-sa + namespace: kube-system +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-external-provisioner-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["get", "list"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-csi-provisioner-binding +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: azuredisk-external-provisioner-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-external-attacher-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["csi.storage.k8s.io"] + resources: ["csinodeinfos"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments/status"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-csi-attacher-binding +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: azuredisk-external-attacher-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-external-snapshotter-role +rules: + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-csi-snapshotter-binding +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: azuredisk-external-snapshotter-role + apiGroup: rbac.authorization.k8s.io +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-external-resizer-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-csi-resizer-role +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: azuredisk-external-resizer-role + apiGroup: rbac.authorization.k8s.io + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-azuredisk-controller-secret-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-azuredisk-controller-secret-binding +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-azuredisk-controller-secret-role + apiGroup: rbac.authorization.k8s.io diff --git a/deploy/v1.30.1/rbac-csi-azuredisk-node.yaml b/deploy/v1.30.1/rbac-csi-azuredisk-node.yaml new file mode 100644 index 000000000..c77181ee1 --- /dev/null +++ b/deploy/v1.30.1/rbac-csi-azuredisk-node.yaml @@ -0,0 +1,33 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-azuredisk-node-sa + namespace: kube-system + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-azuredisk-node-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-azuredisk-node-secret-binding +subjects: + - kind: ServiceAccount + name: csi-azuredisk-node-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-azuredisk-node-role + apiGroup: rbac.authorization.k8s.io diff --git a/deploy/v1.30.1/rbac-csi-snapshot-controller.yaml b/deploy/v1.30.1/rbac-csi-snapshot-controller.yaml new file mode 100644 index 000000000..03af76542 --- /dev/null +++ b/deploy/v1.30.1/rbac-csi-snapshot-controller.yaml @@ -0,0 +1,78 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-snapshot-controller-sa + namespace: kube-system + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots/status"] + verbs: ["update", "patch"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-binding +subjects: + - kind: ServiceAccount + name: csi-snapshot-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-snapshot-controller-role + apiGroup: rbac.authorization.k8s.io + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-leaderelection-role +rules: + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshot-controller-leaderelection-binding +subjects: + - kind: ServiceAccount + name: csi-snapshot-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-snapshot-controller-leaderelection-role + apiGroup: rbac.authorization.k8s.io diff --git a/docs/install-azuredisk-csi-driver.md b/docs/install-azuredisk-csi-driver.md index 91055a1e2..7ea19f1e4 100644 --- a/docs/install-azuredisk-csi-driver.md +++ b/docs/install-azuredisk-csi-driver.md @@ -4,6 +4,6 @@ > - please use helm install method for more customization, e.g. Azure Stack, RedHat OpenShift support. > - [install CSI driver master version](./install-csi-driver-master.md)(only for testing purpose) - - [install v1.29.0 CSI driver](./install-csi-driver-v1.29.0.md) - - [install v1.28.3 CSI driver](./install-csi-driver-v1.28.3.md) - - [install v1.27.1 CSI driver](./install-csi-driver-v1.27.1.md) + - [install v1.30.1 CSI driver](./install-csi-driver-v1.30.1.md) + - [install v1.29.2 CSI driver](./install-csi-driver-v1.29.2.md) + - [install v1.28.5 CSI driver](./install-csi-driver-v1.28.5.md) diff --git a/docs/install-csi-driver-v1.26.7.md b/docs/install-csi-driver-v1.26.7.md new file mode 100644 index 000000000..e36f0f086 --- /dev/null +++ b/docs/install-csi-driver-v1.26.7.md @@ -0,0 +1,48 @@ +## Install CSI driver development version on a Kubernetes cluster +If you have already installed Helm, you can also use it to install this driver. Please check [Installation with Helm](../charts/README.md). + +### Install by kubectl + - Option#1. remote install +```console +curl -skSL https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/v1.26.7/deploy/install-driver.sh | bash -s v1.26.7 snapshot -- +``` + + - Option#2. local install +```console +git clone https://github.com/kubernetes-sigs/azuredisk-csi-driver.git +cd azuredisk-csi-driver +git checkout v1.26.7 +./deploy/install-driver.sh v1.26.7 local +``` + +### Check pods status: + +```console +kubectl -n kube-system get pod -o wide --watch -l app=csi-azuredisk-controller +kubectl -n kube-system get pod -o wide --watch -l app=csi-azuredisk-node +``` + +example output: + +```console +NAME READY STATUS RESTARTS AGE IP NODE +csi-azuredisk-controller-56bfddd689-dh5tk 6/6 Running 0 35s 10.240.0.19 k8s-agentpool-22533604-0 +csi-azuredisk-controller-56bfddd689-7s8yg 6/6 Running 0 35s 10.240.0.29 k8s-agentpool-22533604-1 +csi-snapshot-controller-84db6dbbb-stzwr 6/6 Running 0 41s 10.240.0.17 k8s-agentpool-22533604-0 +csi-azuredisk-node-cvgbs 3/3 Running 0 7m4s 10.240.0.35 k8s-agentpool-22533604-1 +csi-azuredisk-node-dr4s4 3/3 Running 0 7m4s 10.240.0.4 k8s-agentpool-22533604-0 +``` + +### clean up CSI driver + - Option#1. remote uninstall +```console +curl -skSL https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/v1.26.7/deploy/uninstall-driver.sh | bash -s v1.26.7 -- +``` + + - Option#2. local uninstall +```console +git clone https://github.com/kubernetes-sigs/azuredisk-csi-driver.git +cd azuredisk-csi-driver +git checkout v1.26.7 +./deploy/install-driver.sh v1.26.7 local +``` diff --git a/docs/install-csi-driver-v1.26.8.md b/docs/install-csi-driver-v1.26.8.md new file mode 100644 index 000000000..f0b75782a --- /dev/null +++ b/docs/install-csi-driver-v1.26.8.md @@ -0,0 +1,48 @@ +## Install CSI driver development version on a Kubernetes cluster +If you have already installed Helm, you can also use it to install this driver. Please check [Installation with Helm](../charts/README.md). + +### Install by kubectl + - Option#1. remote install +```console +curl -skSL https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/v1.26.8/deploy/install-driver.sh | bash -s v1.26.8 snapshot -- +``` + + - Option#2. local install +```console +git clone https://github.com/kubernetes-sigs/azuredisk-csi-driver.git +cd azuredisk-csi-driver +git checkout v1.26.8 +./deploy/install-driver.sh v1.26.8 local +``` + +### Check pods status: + +```console +kubectl -n kube-system get pod -o wide --watch -l app=csi-azuredisk-controller +kubectl -n kube-system get pod -o wide --watch -l app=csi-azuredisk-node +``` + +example output: + +```console +NAME READY STATUS RESTARTS AGE IP NODE +csi-azuredisk-controller-56bfddd689-dh5tk 6/6 Running 0 35s 10.240.0.19 k8s-agentpool-22533604-0 +csi-azuredisk-controller-56bfddd689-7s8yg 6/6 Running 0 35s 10.240.0.29 k8s-agentpool-22533604-1 +csi-snapshot-controller-84db6dbbb-stzwr 6/6 Running 0 41s 10.240.0.17 k8s-agentpool-22533604-0 +csi-azuredisk-node-cvgbs 3/3 Running 0 7m4s 10.240.0.35 k8s-agentpool-22533604-1 +csi-azuredisk-node-dr4s4 3/3 Running 0 7m4s 10.240.0.4 k8s-agentpool-22533604-0 +``` + +### clean up CSI driver + - Option#1. remote uninstall +```console +curl -skSL https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/v1.26.8/deploy/uninstall-driver.sh | bash -s v1.26.8 -- +``` + + - Option#2. local uninstall +```console +git clone https://github.com/kubernetes-sigs/azuredisk-csi-driver.git +cd azuredisk-csi-driver +git checkout v1.26.8 +./deploy/install-driver.sh v1.26.8 local +``` diff --git a/docs/install-csi-driver-v1.28.4.md b/docs/install-csi-driver-v1.28.4.md new file mode 100644 index 000000000..61f2d2442 --- /dev/null +++ b/docs/install-csi-driver-v1.28.4.md @@ -0,0 +1,48 @@ +## Install CSI driver development version on a Kubernetes cluster +If you have already installed Helm, you can also use it to install this driver. Please check [Installation with Helm](../charts/README.md). + +### Install by kubectl + - Option#1. remote install +```console +curl -skSL https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/v1.28.4/deploy/install-driver.sh | bash -s v1.28.4 snapshot -- +``` + + - Option#2. local install +```console +git clone https://github.com/kubernetes-sigs/azuredisk-csi-driver.git +cd azuredisk-csi-driver +git checkout v1.28.4 +./deploy/install-driver.sh v1.28.4 local +``` + +### Check pods status: + +```console +kubectl -n kube-system get pod -o wide --watch -l app=csi-azuredisk-controller +kubectl -n kube-system get pod -o wide --watch -l app=csi-azuredisk-node +``` + +example output: + +```console +NAME READY STATUS RESTARTS AGE IP NODE +csi-azuredisk-controller-56bfddd689-dh5tk 6/6 Running 0 35s 10.240.0.19 k8s-agentpool-22533604-0 +csi-azuredisk-controller-56bfddd689-7s8yg 6/6 Running 0 35s 10.240.0.29 k8s-agentpool-22533604-1 +csi-snapshot-controller-84db6dbbb-stzwr 6/6 Running 0 41s 10.240.0.17 k8s-agentpool-22533604-0 +csi-azuredisk-node-cvgbs 3/3 Running 0 7m4s 10.240.0.35 k8s-agentpool-22533604-1 +csi-azuredisk-node-dr4s4 3/3 Running 0 7m4s 10.240.0.4 k8s-agentpool-22533604-0 +``` + +### clean up CSI driver + - Option#1. remote uninstall +```console +curl -skSL https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/v1.28.4/deploy/uninstall-driver.sh | bash -s v1.28.4 -- +``` + + - Option#2. local uninstall +```console +git clone https://github.com/kubernetes-sigs/azuredisk-csi-driver.git +cd azuredisk-csi-driver +git checkout v1.28.4 +./deploy/install-driver.sh v1.28.4 local +``` diff --git a/docs/install-csi-driver-v1.28.5.md b/docs/install-csi-driver-v1.28.5.md new file mode 100644 index 000000000..c18160ccf --- /dev/null +++ b/docs/install-csi-driver-v1.28.5.md @@ -0,0 +1,48 @@ +## Install CSI driver development version on a Kubernetes cluster +If you have already installed Helm, you can also use it to install this driver. Please check [Installation with Helm](../charts/README.md). + +### Install by kubectl + - Option#1. remote install +```console +curl -skSL https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/v1.28.5/deploy/install-driver.sh | bash -s v1.28.5 snapshot -- +``` + + - Option#2. local install +```console +git clone https://github.com/kubernetes-sigs/azuredisk-csi-driver.git +cd azuredisk-csi-driver +git checkout v1.28.5 +./deploy/install-driver.sh v1.28.5 local +``` + +### Check pods status: + +```console +kubectl -n kube-system get pod -o wide --watch -l app=csi-azuredisk-controller +kubectl -n kube-system get pod -o wide --watch -l app=csi-azuredisk-node +``` + +example output: + +```console +NAME READY STATUS RESTARTS AGE IP NODE +csi-azuredisk-controller-56bfddd689-dh5tk 6/6 Running 0 35s 10.240.0.19 k8s-agentpool-22533604-0 +csi-azuredisk-controller-56bfddd689-7s8yg 6/6 Running 0 35s 10.240.0.29 k8s-agentpool-22533604-1 +csi-snapshot-controller-84db6dbbb-stzwr 6/6 Running 0 41s 10.240.0.17 k8s-agentpool-22533604-0 +csi-azuredisk-node-cvgbs 3/3 Running 0 7m4s 10.240.0.35 k8s-agentpool-22533604-1 +csi-azuredisk-node-dr4s4 3/3 Running 0 7m4s 10.240.0.4 k8s-agentpool-22533604-0 +``` + +### clean up CSI driver + - Option#1. remote uninstall +```console +curl -skSL https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/v1.28.5/deploy/uninstall-driver.sh | bash -s v1.28.5 -- +``` + + - Option#2. local uninstall +```console +git clone https://github.com/kubernetes-sigs/azuredisk-csi-driver.git +cd azuredisk-csi-driver +git checkout v1.28.5 +./deploy/install-driver.sh v1.28.5 local +``` diff --git a/docs/install-csi-driver-v1.29.1.md b/docs/install-csi-driver-v1.29.1.md new file mode 100644 index 000000000..c6096f214 --- /dev/null +++ b/docs/install-csi-driver-v1.29.1.md @@ -0,0 +1,48 @@ +## Install CSI driver development version on a Kubernetes cluster +If you have already installed Helm, you can also use it to install this driver. Please check [Installation with Helm](../charts/README.md). + +### Install by kubectl + - Option#1. remote install +```console +curl -skSL https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/v1.29.1/deploy/install-driver.sh | bash -s v1.29.1 snapshot -- +``` + + - Option#2. local install +```console +git clone https://github.com/kubernetes-sigs/azuredisk-csi-driver.git +cd azuredisk-csi-driver +git checkout v1.29.1 +./deploy/install-driver.sh v1.29.1 local +``` + +### Check pods status: + +```console +kubectl -n kube-system get pod -o wide --watch -l app=csi-azuredisk-controller +kubectl -n kube-system get pod -o wide --watch -l app=csi-azuredisk-node +``` + +example output: + +```console +NAME READY STATUS RESTARTS AGE IP NODE +csi-azuredisk-controller-56bfddd689-dh5tk 6/6 Running 0 35s 10.240.0.19 k8s-agentpool-22533604-0 +csi-azuredisk-controller-56bfddd689-7s8yg 6/6 Running 0 35s 10.240.0.29 k8s-agentpool-22533604-1 +csi-snapshot-controller-84db6dbbb-stzwr 6/6 Running 0 41s 10.240.0.17 k8s-agentpool-22533604-0 +csi-azuredisk-node-cvgbs 3/3 Running 0 7m4s 10.240.0.35 k8s-agentpool-22533604-1 +csi-azuredisk-node-dr4s4 3/3 Running 0 7m4s 10.240.0.4 k8s-agentpool-22533604-0 +``` + +### clean up CSI driver + - Option#1. remote uninstall +```console +curl -skSL https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/v1.29.1/deploy/uninstall-driver.sh | bash -s v1.29.1 -- +``` + + - Option#2. local uninstall +```console +git clone https://github.com/kubernetes-sigs/azuredisk-csi-driver.git +cd azuredisk-csi-driver +git checkout v1.29.1 +./deploy/install-driver.sh v1.29.1 local +``` diff --git a/docs/install-csi-driver-v1.29.2.md b/docs/install-csi-driver-v1.29.2.md new file mode 100644 index 000000000..9ae890b95 --- /dev/null +++ b/docs/install-csi-driver-v1.29.2.md @@ -0,0 +1,48 @@ +## Install CSI driver development version on a Kubernetes cluster +If you have already installed Helm, you can also use it to install this driver. Please check [Installation with Helm](../charts/README.md). + +### Install by kubectl + - Option#1. remote install +```console +curl -skSL https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/v1.29.2/deploy/install-driver.sh | bash -s v1.29.2 snapshot -- +``` + + - Option#2. local install +```console +git clone https://github.com/kubernetes-sigs/azuredisk-csi-driver.git +cd azuredisk-csi-driver +git checkout v1.29.2 +./deploy/install-driver.sh v1.29.2 local +``` + +### Check pods status: + +```console +kubectl -n kube-system get pod -o wide --watch -l app=csi-azuredisk-controller +kubectl -n kube-system get pod -o wide --watch -l app=csi-azuredisk-node +``` + +example output: + +```console +NAME READY STATUS RESTARTS AGE IP NODE +csi-azuredisk-controller-56bfddd689-dh5tk 6/6 Running 0 35s 10.240.0.19 k8s-agentpool-22533604-0 +csi-azuredisk-controller-56bfddd689-7s8yg 6/6 Running 0 35s 10.240.0.29 k8s-agentpool-22533604-1 +csi-snapshot-controller-84db6dbbb-stzwr 6/6 Running 0 41s 10.240.0.17 k8s-agentpool-22533604-0 +csi-azuredisk-node-cvgbs 3/3 Running 0 7m4s 10.240.0.35 k8s-agentpool-22533604-1 +csi-azuredisk-node-dr4s4 3/3 Running 0 7m4s 10.240.0.4 k8s-agentpool-22533604-0 +``` + +### clean up CSI driver + - Option#1. remote uninstall +```console +curl -skSL https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/v1.29.2/deploy/uninstall-driver.sh | bash -s v1.29.2 -- +``` + + - Option#2. local uninstall +```console +git clone https://github.com/kubernetes-sigs/azuredisk-csi-driver.git +cd azuredisk-csi-driver +git checkout v1.29.2 +./deploy/install-driver.sh v1.29.2 local +``` diff --git a/docs/install-csi-driver-v1.30.0.md b/docs/install-csi-driver-v1.30.0.md new file mode 100644 index 000000000..049560141 --- /dev/null +++ b/docs/install-csi-driver-v1.30.0.md @@ -0,0 +1,48 @@ +## Install CSI driver development version on a Kubernetes cluster +If you have already installed Helm, you can also use it to install this driver. Please check [Installation with Helm](../charts/README.md). + +### Install by kubectl + - Option#1. remote install +```console +curl -skSL https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/v1.30.0/deploy/install-driver.sh | bash -s v1.30.0 snapshot -- +``` + + - Option#2. local install +```console +git clone https://github.com/kubernetes-sigs/azuredisk-csi-driver.git +cd azuredisk-csi-driver +git checkout v1.30.0 +./deploy/install-driver.sh v1.30.0 local +``` + +### Check pods status: + +```console +kubectl -n kube-system get pod -o wide --watch -l app=csi-azuredisk-controller +kubectl -n kube-system get pod -o wide --watch -l app=csi-azuredisk-node +``` + +example output: + +```console +NAME READY STATUS RESTARTS AGE IP NODE +csi-azuredisk-controller-56bfddd689-dh5tk 6/6 Running 0 35s 10.240.0.19 k8s-agentpool-22533604-0 +csi-azuredisk-controller-56bfddd689-7s8yg 6/6 Running 0 35s 10.240.0.29 k8s-agentpool-22533604-1 +csi-snapshot-controller-84db6dbbb-stzwr 6/6 Running 0 41s 10.240.0.17 k8s-agentpool-22533604-0 +csi-azuredisk-node-cvgbs 3/3 Running 0 7m4s 10.240.0.35 k8s-agentpool-22533604-1 +csi-azuredisk-node-dr4s4 3/3 Running 0 7m4s 10.240.0.4 k8s-agentpool-22533604-0 +``` + +### clean up CSI driver + - Option#1. remote uninstall +```console +curl -skSL https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/v1.30.0/deploy/uninstall-driver.sh | bash -s v1.30.0 -- +``` + + - Option#2. local uninstall +```console +git clone https://github.com/kubernetes-sigs/azuredisk-csi-driver.git +cd azuredisk-csi-driver +git checkout v1.30.0 +./deploy/install-driver.sh v1.30.0 local +``` diff --git a/docs/install-csi-driver-v1.30.1.md b/docs/install-csi-driver-v1.30.1.md new file mode 100644 index 000000000..b97e9d6f5 --- /dev/null +++ b/docs/install-csi-driver-v1.30.1.md @@ -0,0 +1,48 @@ +## Install CSI driver development version on a Kubernetes cluster +If you have already installed Helm, you can also use it to install this driver. Please check [Installation with Helm](../charts/README.md). + +### Install by kubectl + - Option#1. remote install +```console +curl -skSL https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/v1.30.1/deploy/install-driver.sh | bash -s v1.30.1 snapshot -- +``` + + - Option#2. local install +```console +git clone https://github.com/kubernetes-sigs/azuredisk-csi-driver.git +cd azuredisk-csi-driver +git checkout v1.30.1 +./deploy/install-driver.sh v1.30.1 local +``` + +### Check pods status: + +```console +kubectl -n kube-system get pod -o wide --watch -l app=csi-azuredisk-controller +kubectl -n kube-system get pod -o wide --watch -l app=csi-azuredisk-node +``` + +example output: + +```console +NAME READY STATUS RESTARTS AGE IP NODE +csi-azuredisk-controller-56bfddd689-dh5tk 6/6 Running 0 35s 10.240.0.19 k8s-agentpool-22533604-0 +csi-azuredisk-controller-56bfddd689-7s8yg 6/6 Running 0 35s 10.240.0.29 k8s-agentpool-22533604-1 +csi-snapshot-controller-84db6dbbb-stzwr 6/6 Running 0 41s 10.240.0.17 k8s-agentpool-22533604-0 +csi-azuredisk-node-cvgbs 3/3 Running 0 7m4s 10.240.0.35 k8s-agentpool-22533604-1 +csi-azuredisk-node-dr4s4 3/3 Running 0 7m4s 10.240.0.4 k8s-agentpool-22533604-0 +``` + +### clean up CSI driver + - Option#1. remote uninstall +```console +curl -skSL https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/v1.30.1/deploy/uninstall-driver.sh | bash -s v1.30.1 -- +``` + + - Option#2. local uninstall +```console +git clone https://github.com/kubernetes-sigs/azuredisk-csi-driver.git +cd azuredisk-csi-driver +git checkout v1.30.1 +./deploy/install-driver.sh v1.30.1 local +``` diff --git a/go.mod b/go.mod index ef2f424a3..e49d2da69 100644 --- a/go.mod +++ b/go.mod @@ -1,63 +1,70 @@ module sigs.k8s.io/azuredisk-csi-driver -go 1.21 - -toolchain go1.21.1 +go 1.22 require ( github.com/Azure/azure-sdk-for-go v68.0.0+incompatible - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.2 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.1 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.0 + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0 + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0 github.com/Azure/go-autorest/autorest v0.11.29 - github.com/Azure/go-autorest/autorest/date v0.3.0 - github.com/container-storage-interface/spec v1.8.0 - github.com/golang/mock v1.6.0 - github.com/golang/protobuf v1.5.3 - github.com/jongio/azidext/go/azidext v0.5.0 - github.com/kubernetes-csi/csi-lib-utils v0.15.0 + github.com/Azure/go-autorest/autorest/mocks v0.4.2 + github.com/container-storage-interface/spec v1.9.0 + github.com/golang/protobuf v1.5.4 + github.com/kubernetes-csi/csi-lib-utils v0.17.0 github.com/kubernetes-csi/csi-proxy/client v1.1.3 github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0 - github.com/onsi/ginkgo/v2 v2.12.0 - github.com/onsi/gomega v1.27.10 - github.com/pborman/uuid v1.2.1 + github.com/onsi/ginkgo/v2 v2.17.1 + github.com/onsi/gomega v1.32.0 github.com/pelletier/go-toml v1.9.5 - github.com/stretchr/testify v1.8.4 - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.41.0 - go.opentelemetry.io/otel v1.15.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 - go.opentelemetry.io/otel/sdk v1.10.0 - golang.org/x/net v0.15.0 - google.golang.org/grpc v1.58.0 - google.golang.org/protobuf v1.31.0 - k8s.io/api v0.28.1 - k8s.io/apimachinery v0.28.1 - k8s.io/client-go v0.28.1 - k8s.io/cloud-provider v0.28.1 - k8s.io/component-base v0.28.1 - k8s.io/klog/v2 v2.100.1 - k8s.io/kubernetes v1.28.1 - k8s.io/mount-utils v0.28.1 + github.com/stretchr/testify v1.9.0 + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.48.0 + go.opentelemetry.io/otel v1.23.1 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.23.1 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.23.1 + go.opentelemetry.io/otel/sdk v1.23.1 + go.uber.org/mock v0.4.0 + golang.org/x/net v0.23.0 + golang.org/x/sync v0.6.0 + google.golang.org/grpc v1.61.1 + google.golang.org/protobuf v1.33.0 + k8s.io/api v0.29.3 + k8s.io/apimachinery v0.29.3 + k8s.io/client-go v0.29.3 + k8s.io/cloud-provider v0.29.3 + k8s.io/component-base v0.29.3 + k8s.io/klog/v2 v2.120.1 + k8s.io/kubernetes v1.29.2 + k8s.io/mount-utils v0.29.2 k8s.io/pod-security-admission v0.0.0 - k8s.io/utils v0.0.0-20230726121419-3b25d923346b - sigs.k8s.io/cloud-provider-azure v1.27.1-0.20230825065334-8b1cf948b7ed - sigs.k8s.io/yaml v1.3.0 + k8s.io/utils v0.0.0-20240102154912-e7106e64919e + sigs.k8s.io/cloud-provider-azure v1.29.1-0.20240401064333-15fadde40d38 + sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.7 + sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.2 + sigs.k8s.io/yaml v1.4.0 ) -require github.com/edgelesssys/constellation/v2 v2.11.1-0.20231005092022-f69ae2612236 +require github.com/edgelesssys/constellation/v2 v2.16.4 replace github.com/martinjungblut/go-cryptsetup => github.com/daniel-weisse/go-cryptsetup v0.0.0-20230705150314-d8c07bd1723c require ( - github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerregistry/armcontainerregistry v1.2.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.8.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault v1.4.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns v1.2.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest/adal v0.9.23 // indirect - github.com/Azure/go-autorest/autorest/mocks v0.4.2 // indirect + github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect github.com/NYTimes/gziphandler v1.1.1 // indirect github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect @@ -68,125 +75,123 @@ require ( github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/coreos/go-semver v0.3.1 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/docker/distribution v2.8.2+incompatible // indirect - github.com/emicklei/go-restful/v3 v3.10.1 // indirect - github.com/evanphx/json-patch v5.6.0+incompatible // indirect - github.com/felixge/httpsnoop v1.0.3 // indirect - github.com/fsnotify/fsnotify v1.6.0 // indirect - github.com/go-logr/logr v1.2.4 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/distribution/reference v0.5.0 // indirect + github.com/emicklei/go-restful/v3 v3.11.2 // indirect + github.com/evanphx/json-patch v5.9.0+incompatible // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-openapi/jsonpointer v0.19.6 // indirect - github.com/go-openapi/jsonreference v0.20.2 // indirect - github.com/go-openapi/swag v0.22.4 // indirect + github.com/go-openapi/jsonpointer v0.20.2 // indirect + github.com/go-openapi/jsonreference v0.20.4 // indirect + github.com/go-openapi/swag v0.22.9 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect - github.com/golang-jwt/jwt/v5 v5.0.0 // indirect + github.com/golang-jwt/jwt/v5 v5.2.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/google/cel-go v0.16.0 // indirect + github.com/google/cel-go v0.17.7 // indirect github.com/google/gnostic-models v0.6.8 // indirect - github.com/google/go-cmp v0.5.9 // indirect + github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/pprof v0.0.0-20221103000818-d260c55eee4c // indirect - github.com/google/uuid v1.3.1 // indirect + github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/gorilla/websocket v1.5.1 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect - github.com/imdario/mergo v0.3.13 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 // indirect + github.com/imdario/mergo v0.3.16 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/martinjungblut/go-cryptsetup v0.0.0-20220520180014-fd0874fd07a6 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/moby/spdystream v0.2.0 // indirect - github.com/moby/sys/mountinfo v0.6.2 // indirect + github.com/moby/sys/mountinfo v0.7.1 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/selinux v1.10.0 // indirect - github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect + github.com/opencontainers/selinux v1.11.0 // indirect + github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_golang v1.16.0 // indirect - github.com/prometheus/client_model v0.4.0 // indirect - github.com/prometheus/common v0.44.0 // indirect - github.com/prometheus/procfs v0.10.1 // indirect - github.com/spf13/cobra v1.7.0 // indirect + github.com/prometheus/client_golang v1.18.0 // indirect + github.com/prometheus/client_model v0.6.0 // indirect + github.com/prometheus/common v0.47.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect + github.com/spf13/cobra v1.8.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stoewer/go-strcase v1.2.0 // indirect - go.etcd.io/etcd/api/v3 v3.5.9 // indirect - go.etcd.io/etcd/client/pkg/v3 v3.5.9 // indirect - go.etcd.io/etcd/client/v3 v3.5.9 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 // indirect - go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect - go.opentelemetry.io/otel/metric v0.38.0 // indirect - go.opentelemetry.io/otel/trace v1.15.0 // indirect - go.opentelemetry.io/proto/otlp v0.19.0 // indirect + go.etcd.io/etcd/api/v3 v3.5.12 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.5.12 // indirect + go.etcd.io/etcd/client/v3 v3.5.12 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0 // indirect + go.opentelemetry.io/otel/metric v1.23.1 // indirect + go.opentelemetry.io/otel/trace v1.23.1 // indirect + go.opentelemetry.io/proto/otlp v1.1.0 // indirect go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.26.0 // indirect - golang.org/x/crypto v0.13.0 // indirect - golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 // indirect - golang.org/x/mod v0.12.0 // indirect - golang.org/x/oauth2 v0.10.0 // indirect - golang.org/x/sync v0.3.0 // indirect - golang.org/x/sys v0.12.0 // indirect - golang.org/x/term v0.12.0 // indirect - golang.org/x/text v0.13.0 // indirect - golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.13.0 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect + go.uber.org/zap v1.27.0 // indirect + golang.org/x/crypto v0.21.0 // indirect + golang.org/x/exp v0.0.0-20240213143201-ec583247a57a // indirect + golang.org/x/mod v0.15.0 // indirect + golang.org/x/oauth2 v0.17.0 // indirect + golang.org/x/sys v0.18.0 // indirect + golang.org/x/term v0.18.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/time v0.5.0 // indirect + golang.org/x/tools v0.18.0 // indirect + google.golang.org/appengine v1.6.8 // indirect + google.golang.org/genproto v0.0.0-20240221002015-b0ce06bbee7c // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240221002015-b0ce06bbee7c // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240221002015-b0ce06bbee7c // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.27.3 // indirect - k8s.io/apiserver v0.28.1 // indirect - k8s.io/component-helpers v0.28.1 // indirect - k8s.io/controller-manager v0.28.1 // indirect - k8s.io/kms v0.28.1 // indirect - k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect - k8s.io/kubectl v0.27.2 // indirect - k8s.io/kubelet v0.28.0 // indirect - sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 // indirect + k8s.io/apiextensions-apiserver v0.29.0 // indirect + k8s.io/apiserver v0.29.3 // indirect + k8s.io/component-helpers v0.29.3 // indirect + k8s.io/controller-manager v0.29.3 // indirect + k8s.io/kms v0.29.3 // indirect + k8s.io/kube-openapi v0.0.0-20240220201932-37d671a357a5 // indirect + k8s.io/kubectl v0.29.0 // indirect + k8s.io/kubelet v0.29.3 // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) replace ( - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc => go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 - go.opentelemetry.io/otel => go.opentelemetry.io/otel v1.10.0 - go.opentelemetry.io/otel/metric => go.opentelemetry.io/otel/metric v0.31.0 - go.opentelemetry.io/otel/trace => go.opentelemetry.io/otel/trace v1.10.0 - k8s.io/api => k8s.io/api v0.28.1 - k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.28.1 - k8s.io/apimachinery => k8s.io/apimachinery v0.28.1 - k8s.io/apiserver => k8s.io/apiserver v0.28.1 - k8s.io/cli-runtime => k8s.io/cli-runtime v0.28.1 - k8s.io/cloud-provider => k8s.io/cloud-provider v0.28.1 - k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.28.1 - k8s.io/code-generator => k8s.io/code-generator v0.28.1 - k8s.io/component-base => k8s.io/component-base v0.28.1 - k8s.io/component-helpers => k8s.io/component-helpers v0.28.1 - k8s.io/controller-manager => k8s.io/controller-manager v0.28.1 - k8s.io/cri-api => k8s.io/cri-api v0.28.1 - k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.28.1 - k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.28.1 - k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.28.1 - k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.28.1 - k8s.io/kube-proxy => k8s.io/kube-proxy v0.28.1 - k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.28.1 - k8s.io/kubectl => k8s.io/kubectl v0.28.1 - k8s.io/kubelet => k8s.io/kubelet v0.28.1 - k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.28.1 - k8s.io/metrics => k8s.io/metrics v0.28.1 - k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.28.1 - k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.28.1 - k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.28.1 - k8s.io/sample-controller => k8s.io/sample-controller v0.28.1 + k8s.io/api => k8s.io/api v0.29.0 + k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.29.0 + k8s.io/apimachinery => k8s.io/apimachinery v0.29.0 + k8s.io/apiserver => k8s.io/apiserver v0.29.0 + k8s.io/cli-runtime => k8s.io/cli-runtime v0.29.0 + k8s.io/client-go => k8s.io/client-go v0.29.0 + k8s.io/cloud-provider => k8s.io/cloud-provider v0.29.0 + k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.29.0 + k8s.io/code-generator => k8s.io/code-generator v0.29.0 + k8s.io/component-base => k8s.io/component-base v0.29.0 + k8s.io/component-helpers => k8s.io/component-helpers v0.29.0 + k8s.io/controller-manager => k8s.io/controller-manager v0.29.0 + k8s.io/cri-api => k8s.io/cri-api v0.29.0 + k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.29.0 + k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.29.0 + k8s.io/endpointslice => k8s.io/endpointslice v0.29.0 + k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.29.0 + k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.29.0 + k8s.io/kube-proxy => k8s.io/kube-proxy v0.29.0 + k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.29.0 + k8s.io/kubectl => k8s.io/kubectl v0.29.0 + k8s.io/kubelet => k8s.io/kubelet v0.29.0 + k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.29.0 + k8s.io/metrics => k8s.io/metrics v0.29.0 + k8s.io/mount-utils => k8s.io/mount-utils v0.29.0 + k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.29.0 + k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.29.0 + k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.29.0 + k8s.io/sample-controller => k8s.io/sample-controller v0.29.0 ) diff --git a/go.sum b/go.sum index 636ebc3b4..d68041cfe 100644 --- a/go.sum +++ b/go.sum @@ -3,10 +3,10 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= @@ -14,55 +14,630 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.110.4 h1:1JYyxKMN9hd5dR2MYTPWkGUgcoxVVhg0LKNKEo0qvmk= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= +cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= +cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +cloud.google.com/go v0.110.2/go.mod h1:k04UEeEtb6ZBRTv3dZz4CeJC3jKGxyhl0sAiVVquxiw= +cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM= +cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= +cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= +cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= +cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= +cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= +cloud.google.com/go/accesscontextmanager v1.6.0/go.mod h1:8XCvZWfYw3K/ji0iVnp+6pu7huxoQTLmxAbVjbloTtM= +cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= +cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= +cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= +cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= +cloud.google.com/go/aiplatform v1.35.0/go.mod h1:7MFT/vCaOyZT/4IIFfxH4ErVg/4ku6lKv3w0+tFTgXQ= +cloud.google.com/go/aiplatform v1.36.1/go.mod h1:WTm12vJRPARNvJ+v6P52RDHCNe4AhvjcIZ/9/RRHy/k= +cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= +cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= +cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/analytics v0.17.0/go.mod h1:WXFa3WSym4IZ+JiKmavYdJwGG/CvpqiqczmL59bTD9M= +cloud.google.com/go/analytics v0.18.0/go.mod h1:ZkeHGQlcIPkw0R/GW+boWHhCOR43xz9RN/jn7WcqfIE= +cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= +cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= +cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= +cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= +cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= +cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= +cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= +cloud.google.com/go/apigeeregistry v0.4.0/go.mod h1:EUG4PGcsZvxOXAdyEghIdXwAEi/4MEaoqLMLDMIwKXY= +cloud.google.com/go/apigeeregistry v0.5.0/go.mod h1:YR5+s0BVNZfVOUkMa5pAR2xGd0A473vA5M7j247o1wM= +cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= +cloud.google.com/go/apikeys v0.4.0/go.mod h1:XATS/yqZbaBK0HOssf+ALHp8jAlNHUgyfprvNcBIszU= +cloud.google.com/go/apikeys v0.5.0/go.mod h1:5aQfwY4D+ewMMWScd3hm2en3hCj+BROlyrt3ytS7KLI= +cloud.google.com/go/apikeys v0.6.0/go.mod h1:kbpXu5upyiAlGkKrJgQl8A0rKNNJ7dQ377pdroRSSi8= +cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= +cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= +cloud.google.com/go/appengine v1.6.0/go.mod h1:hg6i0J/BD2cKmDJbaFSYHFyZkgBEfQrDg/X0V5fJn84= +cloud.google.com/go/appengine v1.7.0/go.mod h1:eZqpbHFCqRGa2aCdope7eC0SWLV1j0neb/QnMJVWx6A= +cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= +cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= +cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/area120 v0.7.0/go.mod h1:a3+8EUD1SX5RUcCs3MY5YasiO1z6yLiNLRiFrykbynY= +cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= +cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= +cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= +cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= +cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= +cloud.google.com/go/artifactregistry v1.11.1/go.mod h1:lLYghw+Itq9SONbCa1YWBoWs1nOucMH0pwXN1rOBZFI= +cloud.google.com/go/artifactregistry v1.11.2/go.mod h1:nLZns771ZGAwVLzTX/7Al6R9ehma4WUEhZGWV6CeQNQ= +cloud.google.com/go/artifactregistry v1.12.0/go.mod h1:o6P3MIvtzTOnmvGagO9v/rOjjA0HmhJ+/6KAXrmYDCI= +cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= +cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= +cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= +cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= +cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= +cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= +cloud.google.com/go/asset v1.11.1/go.mod h1:fSwLhbRvC9p9CXQHJ3BgFeQNM4c9x10lqlrdEUYXlJo= +cloud.google.com/go/asset v1.12.0/go.mod h1:h9/sFOa4eDIyKmH6QMpm4eUK3pDojWnUhTgJlk762Hg= +cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= +cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= +cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= +cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= +cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= +cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= +cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= +cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= +cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= +cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= +cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= +cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= +cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= +cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= +cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= +cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= +cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= +cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= +cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= +cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= +cloud.google.com/go/beyondcorp v0.4.0/go.mod h1:3ApA0mbhHx6YImmuubf5pyW8srKnCEPON32/5hj+RmM= +cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.21.0 h1:JNBsyXVoOoNJtTQcnEY5uYpZIbeCTYIeDe0Xh1bySMk= -cloud.google.com/go/compute v1.21.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= +cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= +cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= +cloud.google.com/go/bigquery v1.47.0/go.mod h1:sA9XOgy0A8vQK9+MWhEQTY6Tix87M/ZurWFIxmF9I/E= +cloud.google.com/go/bigquery v1.48.0/go.mod h1:QAwSz+ipNgfL5jxiaK7weyOhzdoAy1zFm0Nf1fysJac= +cloud.google.com/go/bigquery v1.49.0/go.mod h1:Sv8hMmTFFYBlt/ftw2uN6dFdQPzBlREY9yBh7Oy7/4Q= +cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= +cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= +cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= +cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= +cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= +cloud.google.com/go/billing v1.12.0/go.mod h1:yKrZio/eu+okO/2McZEbch17O5CB5NpZhhXG6Z766ss= +cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= +cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= +cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= +cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= +cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= +cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= +cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= +cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= +cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= +cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= +cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= +cloud.google.com/go/channel v1.11.0/go.mod h1:IdtI0uWGqhEeatSB62VOoJ8FSUhJ9/+iGkJVqp74CGE= +cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= +cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= +cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= +cloud.google.com/go/cloudbuild v1.6.0/go.mod h1:UIbc/w9QCbH12xX+ezUsgblrWv+Cv4Tw83GiSMHOn9M= +cloud.google.com/go/cloudbuild v1.7.0/go.mod h1:zb5tWh2XI6lR9zQmsm1VRA+7OCuve5d8S+zJUul8KTg= +cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= +cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= +cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= +cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= +cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= +cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= +cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= +cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= +cloud.google.com/go/cloudtasks v1.9.0/go.mod h1:w+EyLsVkLWHcOaqNEyvcKAsWp9p29dL6uL9Nst1cI7Y= +cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= +cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= +cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= +cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= +cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= +cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= +cloud.google.com/go/compute v1.19.3/go.mod h1:qxvISKp/gYnXkSAD1ppcSOveRAmzxicEv/JlizULFrI= +cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute v1.24.0 h1:phWcR2eWzRJaL/kOiJwfFsPs4BaKq1j6vnpZrc1YlVg= +cloud.google.com/go/compute v1.24.0/go.mod h1:kw1/T+h/+tK2LJK0wiPPx1intgdAM3j/g3hFDlscY40= +cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= +cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= +cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= +cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= +cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= +cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= +cloud.google.com/go/container v1.13.1/go.mod h1:6wgbMPeQRw9rSnKBCAJXnds3Pzj03C4JHamr8asWKy4= +cloud.google.com/go/container v1.14.0/go.mod h1:3AoJMPhHfLDxLvrlVWaK57IXzaPnLaZq63WX59aQBfM= +cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= +cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= +cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= +cloud.google.com/go/containeranalysis v0.7.0/go.mod h1:9aUL+/vZ55P2CXfuZjS4UjQ9AgXoSw8Ts6lemfmxBxI= +cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= +cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= +cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= +cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= +cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= +cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= +cloud.google.com/go/datacatalog v1.8.1/go.mod h1:RJ58z4rMp3gvETA465Vg+ag8BGgBdnRPEMMSTr5Uv+M= +cloud.google.com/go/datacatalog v1.12.0/go.mod h1:CWae8rFkfp6LzLumKOnmVh4+Zle4A3NXLzVJ1d1mRm0= +cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= +cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= +cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= +cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= +cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= +cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= +cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= +cloud.google.com/go/dataform v0.6.0/go.mod h1:QPflImQy33e29VuapFdf19oPbE4aYTJxr31OAPV+ulA= +cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= +cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= +cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= +cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= +cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= +cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= +cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= +cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= +cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= +cloud.google.com/go/dataplex v1.5.2/go.mod h1:cVMgQHsmfRoI5KFYq4JtIBEUbYwc3c7tXmIDhRmNNVQ= +cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= +cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= +cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= +cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= +cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= +cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= +cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= +cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= +cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= +cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= +cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= +cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= +cloud.google.com/go/datastream v1.6.0/go.mod h1:6LQSuswqLa7S4rPAOZFVjHIG3wJIjZcZrw8JDEDJuIs= +cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= +cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= +cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= +cloud.google.com/go/deploy v1.6.0/go.mod h1:f9PTHehG/DjCom3QH0cntOVRm93uGBDt2vKzAPwpXQI= +cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= +cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= +cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= +cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= +cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= +cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= +cloud.google.com/go/dialogflow v1.29.0/go.mod h1:b+2bzMe+k1s9V+F2jbJwpHPzrnIyHihAdRFMtn2WXuM= +cloud.google.com/go/dialogflow v1.31.0/go.mod h1:cuoUccuL1Z+HADhyIA7dci3N5zUssgpBJmCzI6fNRB4= +cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= +cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= +cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= +cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= +cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= +cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= +cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= +cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= +cloud.google.com/go/documentai v1.16.0/go.mod h1:o0o0DLTEZ+YnJZ+J4wNfTxmDVyrkzFvttBXXtYRMHkM= +cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= +cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= +cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= +cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= +cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= +cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/edgecontainer v0.3.0/go.mod h1:FLDpP4nykgwwIfcLt6zInhprzw0lEi2P1fjO6Ie0qbc= +cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= +cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= +cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= +cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= +cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= +cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= +cloud.google.com/go/eventarc v1.10.0/go.mod h1:u3R35tmZ9HvswGRBnF48IlYgYeBcPUCjkr4BTdem2Kw= +cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= +cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= +cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= +cloud.google.com/go/filestore v1.5.0/go.mod h1:FqBXDWBp4YLHqRnVGveOkHDf8svj9r5+mUDLupOWEDs= +cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= +cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= +cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= +cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= +cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= +cloud.google.com/go/functions v1.10.0/go.mod h1:0D3hEOe3DbEvCXtYOZHQZmD+SzYsi1YbI7dGvHfldXw= +cloud.google.com/go/functions v1.12.0/go.mod h1:AXWGrF3e2C/5ehvwYo/GH6O5s09tOPksiKhz+hH8WkA= +cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= +cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= +cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= +cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= +cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= +cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= +cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= +cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= +cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= +cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= +cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= +cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= +cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= +cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= +cloud.google.com/go/gkehub v0.11.0/go.mod h1:JOWHlmN+GHyIbuWQPl47/C2RFhnFKH38jH9Ascu3n0E= +cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= +cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= +cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= +cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= +cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= +cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= +cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= +cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= +cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= +cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= +cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= +cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= +cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY= +cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= +cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= +cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= +cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= +cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= +cloud.google.com/go/iap v1.7.0/go.mod h1:beqQx56T9O1G1yNPph+spKpNibDlYIiIixiqsQXxLIo= +cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= +cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= +cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= +cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= +cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= +cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= +cloud.google.com/go/iot v1.5.0/go.mod h1:mpz5259PDl3XJthEmh9+ap0affn/MqNSP4My77Qql9o= +cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= +cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= +cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= +cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= +cloud.google.com/go/kms v1.8.0/go.mod h1:4xFEhYFqvW+4VMELtZyxomGSYtSQKzM178ylFW4jMAg= +cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w= +cloud.google.com/go/kms v1.10.0/go.mod h1:ng3KTUtQQU9bPX3+QGLsflZIHlkbn8amFAMY63m8d24= +cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= +cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= +cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= +cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= +cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= +cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= +cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= +cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= +cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= +cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= +cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= +cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= +cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= +cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= +cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= +cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= +cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= +cloud.google.com/go/maps v0.6.0/go.mod h1:o6DAMMfb+aINHz/p/jbcY+mYeXBoZoxTfdSQ8VAJaCw= +cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= +cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= +cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= +cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= +cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= +cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= +cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= +cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= +cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= +cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= +cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= +cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= +cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= +cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= +cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= +cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= +cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= +cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= +cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= +cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= +cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= +cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= +cloud.google.com/go/networkconnectivity v1.10.0/go.mod h1:UP4O4sWXJG13AqrTdQCD9TnLGEbtNRqjuaaA7bNjF5E= +cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= +cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= +cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= +cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= +cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= +cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= +cloud.google.com/go/networksecurity v0.7.0/go.mod h1:mAnzoxx/8TBSyXEeESMy9OOYwo1v+gZ5eMRnsT5bC8k= +cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= +cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= +cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= +cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= +cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= +cloud.google.com/go/notebooks v1.7.0/go.mod h1:PVlaDGfJgj1fl1S3dUwhFMXFgfYGhYQt2164xOMONmE= +cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= +cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= +cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= +cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= +cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= +cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= +cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= +cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= +cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= +cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= +cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= +cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= +cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= +cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= +cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= +cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= +cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= +cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= +cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= +cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= +cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= +cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= +cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= +cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= +cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= +cloud.google.com/go/policytroubleshooter v1.5.0/go.mod h1:Rz1WfV+1oIpPdN2VvvuboLVRsB1Hclg3CKQ53j9l8vw= +cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= +cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= +cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= +cloud.google.com/go/privatecatalog v0.7.0/go.mod h1:2s5ssIFO69F5csTXcwBP7NPFTZvps26xGzvQ2PQaBYg= +cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= +cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= +cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8= +cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= +cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= +cloud.google.com/go/pubsublite v1.6.0/go.mod h1:1eFCS0U11xlOuMFV/0iBqw3zP12kddMeCbj/F3FSj9k= +cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= +cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= +cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= +cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= +cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= +cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= +cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= +cloud.google.com/go/recaptchaenterprise/v2 v2.6.0/go.mod h1:RPauz9jeLtB3JVzg6nCbe12qNoaa8pXc4d/YukAmcnA= +cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= +cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= +cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= +cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= +cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= +cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= +cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= +cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= +cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= +cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= +cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= +cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= +cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= +cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= +cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= +cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= +cloud.google.com/go/resourcemanager v1.5.0/go.mod h1:eQoXNAiAvCf5PXxWxXjhKQoTMaUSNrEfg+6qdf/wots= +cloud.google.com/go/resourcemanager v1.6.0/go.mod h1:YcpXGRs8fDzcUl1Xw8uOVmI8JEadvhRIkoXXUNVYcVo= +cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= +cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= +cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= +cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= +cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= +cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= +cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= +cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= +cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= +cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= +cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= +cloud.google.com/go/run v0.8.0/go.mod h1:VniEnuBwqjigv0A7ONfQUaEItaiCRVujlMqerPPiktM= +cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= +cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= +cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= +cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= +cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= +cloud.google.com/go/scheduler v1.8.0/go.mod h1:TCET+Y5Gp1YgHT8py4nlg2Sew8nUHMqcpousDgXJVQc= +cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= +cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= +cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= +cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= +cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= +cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= +cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= +cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= +cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= +cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= +cloud.google.com/go/security v1.12.0/go.mod h1:rV6EhrpbNHrrxqlvW0BWAIawFWq3X90SduMJdFwtLB8= +cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= +cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= +cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= +cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= +cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= +cloud.google.com/go/securitycenter v1.18.1/go.mod h1:0/25gAzCM/9OL9vVx4ChPeM/+DlfGQJDwBy/UC8AKK0= +cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= +cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= +cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= +cloud.google.com/go/servicecontrol v1.10.0/go.mod h1:pQvyvSRh7YzUF2efw7H87V92mxU8FnFDawMClGCNuAA= +cloud.google.com/go/servicecontrol v1.11.0/go.mod h1:kFmTzYzTUIuZs0ycVqRHNaNhgR+UMUpw9n02l/pY+mc= +cloud.google.com/go/servicecontrol v1.11.1/go.mod h1:aSnNNlwEFBY+PWGQ2DoM0JJ/QUXqV5/ZD9DOLB7SnUk= +cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= +cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= +cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= +cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= +cloud.google.com/go/servicedirectory v1.8.0/go.mod h1:srXodfhY1GFIPvltunswqXpVxFPpZjf8nkKQT7XcXaY= +cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= +cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= +cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= +cloud.google.com/go/servicemanagement v1.6.0/go.mod h1:aWns7EeeCOtGEX4OvZUWCCJONRZeFKiptqKf1D0l/Jc= +cloud.google.com/go/servicemanagement v1.8.0/go.mod h1:MSS2TDlIEQD/fzsSGfCdJItQveu9NXnUniTrq/L8LK4= +cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= +cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= +cloud.google.com/go/serviceusage v1.5.0/go.mod h1:w8U1JvqUqwJNPEOTQjrMHkw3IaIFLoLsPLvsE3xueec= +cloud.google.com/go/serviceusage v1.6.0/go.mod h1:R5wwQcbOWsyuOfbP9tGdAnCAc6B9DRwPG1xtWMDeuPA= +cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= +cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= +cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= +cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= +cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk= +cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= +cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= +cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= +cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= +cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= +cloud.google.com/go/speech v1.14.1/go.mod h1:gEosVRPJ9waG7zqqnsHpYTOoAS4KouMRLDFMekpJ0J0= +cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= +cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= +cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4= +cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= +cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= +cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4= +cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= +cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= +cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= +cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= +cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= +cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= +cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= +cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= +cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= +cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= +cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= +cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= +cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= +cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= +cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= +cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= +cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= +cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= +cloud.google.com/go/translate v1.5.0/go.mod h1:29YDSYveqqpA1CQFD7NQuP49xymq17RXNaUDdc0mNu0= +cloud.google.com/go/translate v1.6.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= +cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= +cloud.google.com/go/video v1.12.0/go.mod h1:MLQew95eTuaNDEGriQdcYn0dTwf9oWiA4uYebxM5kdg= +cloud.google.com/go/video v1.13.0/go.mod h1:ulzkYlYgCp15N2AokzKjy7MQ9ejuynOJdf1tR5lGthk= +cloud.google.com/go/video v1.14.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= +cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= +cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= +cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= +cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= +cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= +cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= +cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= +cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= +cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= +cloud.google.com/go/vision/v2 v2.6.0/go.mod h1:158Hes0MvOS9Z/bDMSFpjwsUrZ5fPrdwuyyvKSGAGMY= +cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= +cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= +cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= +cloud.google.com/go/vmmigration v1.5.0/go.mod h1:E4YQ8q7/4W9gobHjQg4JJSgXXSgY21nA5r8swQV+Xxc= +cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= +cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= +cloud.google.com/go/vmwareengine v0.2.2/go.mod h1:sKdctNJxb3KLZkE/6Oui94iw/xs9PRNC2wnNLXsHvH8= +cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= +cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= +cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= +cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= +cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= +cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= +cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= +cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= +cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= +cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= +cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= +cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= +cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= +cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= +cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= +cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= +cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= +git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.2 h1:t5+QXLCK9SVi0PPdaY0PrFvYUo24KwA0QwxnaHRSVd4= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.2/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.1 h1:LNHhpdK7hzUcx/k1LIcuh5k7k1LGIWLQfCjaneSj7Fc= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.1/go.mod h1:uE9zaUfEQT/nbQjVi2IblCG9iaLtZsuYZ8ne+PuQ02M= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.0 h1:U/kwEXj0Y+1REAkV4kV8VO1CsEp8tSaQDG/7qC5XuqQ= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.0/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0 h1:ui3YNbxfW7J3tTFIZMH6LIGRjCngp+J+nIFlnizfNTE= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0/go.mod h1:gZmgV+qBqygoznvqo2J9oKZAFziqhLZ2xE/WVUmzkHA= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerregistry/armcontainerregistry v1.2.0 h1:DWlwvVV5r/Wy1561nZ3wrpI1/vDIBRY/Wd1HWaRBZWA= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerregistry/armcontainerregistry v1.2.0/go.mod h1:E7ltexgRDmeJ0fJWv0D/HLwY2xbDdN+uv+X2uZtOx3w= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.8.0 h1:0nGmzwBv5ougvzfGPCO2ljFRHvun57KpNrVCMrlk0ns= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.8.0/go.mod h1:gYq8wyDgv6JLhGbAU6gg8amCPgQWRE+aCvrV2gyzdfs= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0 h1:PTFGRSlMKCQelWwxUyYVEUqseBJVemLyqWJjvMyt0do= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0/go.mod h1:LRr2FzBTQlONPPa5HREE5+RjSCTXl7BwOvYOaWTqCaI= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault v1.4.0 h1:HlZMUZW8S4P9oob1nCHxCCKrytxyLc+24nUJGssoEto= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault v1.4.0/go.mod h1:StGsLbuJh06Bd8IBfnAlIFV3fLb+gkczONWf15hpX2E= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/managementgroups/armmanagementgroups v1.0.0 h1:pPvTJ1dY0sA35JOeFq6TsY2xj6Z85Yo23Pj4wCCvu4o= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/managementgroups/armmanagementgroups v1.0.0/go.mod h1:mLfWfj8v3jfWKsL9G4eoBoXVcsqcIUTapmdKy7uGOp0= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 h1:bXwSugBiSbgtz7rOtbfGf+woewp4f06orW9OP5BjHLA= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0/go.mod h1:Y/HgrePTmGy9HjdSGTqZNa+apUpTVIEVKXJyARP2lrk= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns v1.2.0 h1:9Eih8XcEeQnFD0ntMlUDleKMzfeCeUfa+VbnDCI4AZs= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns v1.2.0/go.mod h1:wGPyTi+aURdqPAGMZDQqnNs9IrShADF8w2WZb6bKeq0= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0 h1:Dd+RhdJn0OTtVGaeDLZpcumkIVCtA/3/Fo42+eoYvVM= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0/go.mod h1:5kakwfW5CjC9KK+Q4wjXAg+ShuIm2mBMua0ZFj2C8PE= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 h1:AifHbc4mg0x9zW52WOpKbsHaDKuRhlI7TVl47thgQ70= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0/go.mod h1:T5RfihdXtBDxt1Ch2wobif3TvzTdumDy29kahv6AV9A= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk= github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8= github.com/Azure/go-autorest/autorest/adal v0.9.23/go.mod h1:5pcMqFkdPhviJdlEy3kC/v1ZLnQl0MH6XA5YCcMhy4c= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw= github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU= @@ -70,16 +645,15 @@ github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+X github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1 h1:WpB/QDNLpMw72xHJc34BNNykqSOeEJDAWkhf0u12/Jk= -github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= @@ -87,9 +661,17 @@ github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb0 github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= +github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM= +github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18= github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM= +github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= +github.com/apache/arrow/go/v11 v11.0.0/go.mod h1:Eg5OsL5H+e299f7u5ssuXsuHQVEGC4xei5aX110hRiI= +github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= @@ -99,9 +681,13 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= @@ -111,98 +697,127 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= -github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/container-storage-interface/spec v1.8.0 h1:D0vhF3PLIZwlwZEf2eNbpujGCNwspwTYf2idJRJx4xI= -github.com/container-storage-interface/spec v1.8.0/go.mod h1:ROLik+GhPslwwWRNFF1KasPzroNARibH2rfz1rkg4H0= +github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101 h1:7To3pQ+pZo0i3dsWEbinPNFs5gPSBOsJtx3wTT94VBY= +github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/container-storage-interface/spec v1.9.0 h1:zKtX4STsq31Knz3gciCYCi1SXtO2HJDecIjDVboYavY= +github.com/container-storage-interface/spec v1.9.0/go.mod h1:ZfDu+3ZRyeVqxZM0Ds19MVLkN2d1XJ5MAfi1L3VjlT0= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/daniel-weisse/go-cryptsetup v0.0.0-20230705150314-d8c07bd1723c h1:ToajP6trZoiqlZ3Z4uoG1P02/wtqSw1AcowOXOYjATk= github.com/daniel-weisse/go-cryptsetup v0.0.0-20230705150314-d8c07bd1723c/go.mod h1:gZoZ0+POlM1ge/VUxWpMmZVNPzzMJ7l436CgkQ5+qzU= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= +github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= -github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/edgelesssys/constellation/v2 v2.11.1-0.20231005092022-f69ae2612236 h1:+elXrNwMmjDGukA/n3/utCBft3uNgGCzFIoEB6Fkvzw= -github.com/edgelesssys/constellation/v2 v2.11.1-0.20231005092022-f69ae2612236/go.mod h1:x+/AAni3uAegCLMWRDE0278Zi/eAFzBnRfxICiLP8LE= +github.com/edgelesssys/constellation/v2 v2.16.4 h1:+TX3rYIAJVz3eVMDMDPkn0wfHGBCqEaRszEq//0CjEA= +github.com/edgelesssys/constellation/v2 v2.16.4/go.mod h1:vgzs/an1+Rup95BUCorTyg9oW1Gjse4iSM2/C+Gk324= github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/emicklei/go-restful/v3 v3.10.1 h1:rc42Y5YTp7Am7CS630D7JmhRjq4UlEUuEKfrDac4bSQ= -github.com/emicklei/go-restful/v3 v3.10.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.11.2 h1:1onLa9DcsMYO9P+CXaL0dStDqQ2EHHXLiz+BtnqkLAU= +github.com/emicklei/go-restful/v3 v3.11.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= +github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= +github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/envoyproxy/protoc-gen-validate v0.10.0/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= -github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= -github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= -github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls= +github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= -github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= +github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= +github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/liberation v0.2.0/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= +github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A= -github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4= -github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q= +github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs= github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= -github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU= +github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= -github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE= +github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE= +github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= +github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang-jwt/jwt/v5 v5.0.0 h1:1n1XNM9hk7O9mnQoNBGolZvzebBQ7p93ULHRc28XJUE= -github.com/golang-jwt/jwt/v5 v5.0.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang-jwt/jwt/v5 v5.2.0 h1:d/ix8ftRUorsN+5eMIlF4T6J8CAt9rch3My2winC1Jw= +github.com/golang-jwt/jwt/v5 v5.2.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -216,7 +831,7 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -233,15 +848,21 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/cel-go v0.16.0 h1:DG9YQ8nFCFXAs/FDDwBxmL1tpKNrdlGUM9U3537bX/Y= -github.com/google/cel-go v0.16.0/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= +github.com/google/cel-go v0.17.7 h1:6ebJFzu1xO2n7TLtN+UBqShGBhlD85bhvglh5DpcfqQ= +github.com/google/cel-go v0.17.7/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= +github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -251,17 +872,25 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -269,22 +898,50 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20221103000818-d260c55eee4c h1:lvddKcYTQ545ADhBujtIJmqQrZBDsGo7XIMbAQe/sNY= -github.com/google/pprof v0.0.0-20221103000818-d260c55eee4c/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= +github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b h1:RMpPgZTSApbPf7xaVel+QkoGPRLFLrwFO89uDUHEGf0= +github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/s2a-go v0.1.0/go.mod h1:OJpEgntRZo8ugHpF9hkoLJbS5dSI20XZeXJ9JVywLlM= +github.com/google/s2a-go v0.1.3/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= +github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= -github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= +github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= -github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= +github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= +github.com/googleapis/gax-go/v2 v2.8.0/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= +github.com/googleapis/gax-go/v2 v2.10.0/go.mod h1:4UOEnMCrxsSqQ940WnTiD6qJ63le2ev3xfyagutxiPw= +github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= +github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= @@ -292,59 +949,71 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92Bcuy github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 h1:Wqo399gCIufwto+VfwCSvsnfGpF/w5E9CNxSwbpD6No= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0/go.mod h1:qmOFXW2epJhM0qSnUUYpldc7gVz2KMQwJ/QYCDIa7XU= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= -github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= +github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= -github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= -github.com/jongio/azidext/go/azidext v0.5.0 h1:uPInXD4NZ3J0k79FPwIA0YXknFn+WcqZqSgs3/jPgvQ= -github.com/jongio/azidext/go/azidext v0.5.0/go.mod h1:TVRX/hJhzbsCKaOIzicH6a8IvOH0hpjWk/JwZZgtXeU= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= +github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kubernetes-csi/csi-lib-utils v0.15.0 h1:YTMO6WilRUmjGh5/73kF4KjNcXev+V37O4bx8Uoxy5A= -github.com/kubernetes-csi/csi-lib-utils v0.15.0/go.mod h1:fsoR7g1fOfl1z0WDpA1WvWPtt4oVvgzChgSUgR3JWDw= +github.com/kubernetes-csi/csi-lib-utils v0.17.0 h1:xEpJ3WYgMyyYF6fvcKHh4cDRtknuTkBS9rG8bYoLTCU= +github.com/kubernetes-csi/csi-lib-utils v0.17.0/go.mod h1:2Ba5/aQgUjbpqyC2uCcFwMF3rnPVs5jhZXm8jAzcT9Q= github.com/kubernetes-csi/csi-proxy/client v1.1.3 h1:FdGU7NtxGhQX2wTfnuscmThG920hq0OaVVpuJW9t2k0= github.com/kubernetes-csi/csi-proxy/client v1.1.3/go.mod h1:SfK4HVKQdMH5KrffivddAWgX5hl3P5KmnuOTBbDNboU= github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0 h1:nHHjmvjitIiyPlUHk/ofpgvBcNcawJLtf4PYHORLjAA= github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0/go.mod h1:YBCo4DoEeDndqvAn6eeu0vWM7QdXmHEeI9cFWplmBys= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= +github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= -github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= -github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= +github.com/moby/sys/mountinfo v0.7.1 h1:/tTvQaSJRr2FshkhXiIpux6fQ2Zvc4j7tAhMTStAG2g= +github.com/moby/sys/mountinfo v0.7.1/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -353,6 +1022,7 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= @@ -370,9 +1040,12 @@ github.com/onsi/ginkgo/v2 v2.8.1/go.mod h1:N1/NbDngAFcSLdyZ+/aYTYGSlq9qMCS/cNKGJ github.com/onsi/ginkgo/v2 v2.9.0/go.mod h1:4xkjoL/tZv4SMWeww56BU5kAt19mVB47gTWxmrTcxyk= github.com/onsi/ginkgo/v2 v2.9.1/go.mod h1:FEcmzVcCHl+4o9bQZVab+4dC9+j+91t2FHSzmGAPfuo= github.com/onsi/ginkgo/v2 v2.9.2/go.mod h1:WHcJJG2dIlcCqVfBAwUCrJxSPFb6v4azBwgxeMeDuts= -github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM= -github.com/onsi/ginkgo/v2 v2.12.0 h1:UIVDowFPwpg6yMUpPjGkYvf06K3RAiJXUhCxEwQVHRI= -github.com/onsi/ginkgo/v2 v2.12.0/go.mod h1:ZNEzXISYlqpb8S36iN71ifqLi3vVD1rVJGvWRCJOUpQ= +github.com/onsi/ginkgo/v2 v2.9.5/go.mod h1:tvAoo1QUJwNEU2ITftXTpR7R1RbCzoZUOs3RonqW57k= +github.com/onsi/ginkgo/v2 v2.9.7/go.mod h1:cxrmXWykAwTwhQsJOPfdIDiJ+l2RYq7U8hFU+M/1uw0= +github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8= +github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= @@ -387,49 +1060,67 @@ github.com/onsi/gomega v1.27.1/go.mod h1:aHX5xOykVYzWOV4WqQy0sy8BQptgukenXpCXfad github.com/onsi/gomega v1.27.3/go.mod h1:5vG284IBtfDAmDyrK+eGyZmUgUlmi+Wngqo557cZ6Gw= github.com/onsi/gomega v1.27.4/go.mod h1:riYq/GJKh8hhoM01HN6Vmuy93AarCXCBGpvFDK3q3fQ= github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= -github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= +github.com/onsi/gomega v1.27.7/go.mod h1:1p8OOlwo2iUUDsHnOrjE5UKYJ+e3W8eQ3qSlRahPmr4= +github.com/onsi/gomega v1.27.8/go.mod h1:2J8vzI/s+2shY9XHRApDkdgPo1TKT7P2u6fXeJKFnNQ= github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= +github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/onsi/gomega v1.32.0 h1:JRYU78fJ1LPxlckP6Txi/EYqJvjtMrDC04/MM5XRHPk= +github.com/onsi/gomega v1.32.0/go.mod h1:a4x4gW6Pz2yK1MAmvluYme5lvYTn61afQ2ETw/8n4Lg= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/selinux v1.10.0 h1:rAiKF8hTcgLI3w0DHm6i0ylVVcOrlgR1kK99DRLDhyU= -github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= -github.com/pborman/uuid v1.2.1 h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw= -github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU= +github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= -github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= +github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= +github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= -github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= +github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= +github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= -github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= -github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= -github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= -github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= -github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos= +github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8= +github.com/prometheus/common v0.47.0 h1:p5Cz0FNHo7SnWOmWmoRozVcjEp0bIVU8cV7OShpjL1k= +github.com/prometheus/common v0.47.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= -github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= -github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= +github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= -github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= -github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= +github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= +github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= @@ -448,8 +1139,9 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= @@ -461,81 +1153,111 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= -go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= -go.etcd.io/etcd/api/v3 v3.5.9 h1:4wSsluwyTbGGmyjJktOf3wFQoTBIURXHnq9n/G/JQHs= -go.etcd.io/etcd/api/v3 v3.5.9/go.mod h1:uyAal843mC8uUVSLWz6eHa/d971iDGnCRpmKd2Z+X8k= -go.etcd.io/etcd/client/pkg/v3 v3.5.9 h1:oidDC4+YEuSIQbsR94rY9gur91UPL6DnxDCIYd2IGsE= -go.etcd.io/etcd/client/pkg/v3 v3.5.9/go.mod h1:y+CzeSmkMpWN2Jyu1npecjB9BBnABxGM4pN8cGuJeL4= -go.etcd.io/etcd/client/v2 v2.305.9 h1:YZ2OLi0OvR0H75AcgSUajjd5uqKDKocQUqROTG11jIo= -go.etcd.io/etcd/client/v2 v2.305.9/go.mod h1:0NBdNx9wbxtEQLwAQtrDHwx58m02vXpDcgSYI2seohQ= -go.etcd.io/etcd/client/v3 v3.5.9 h1:r5xghnU7CwbUxD/fbUtRyJGaYNfDun8sp/gTr1hew6E= -go.etcd.io/etcd/client/v3 v3.5.9/go.mod h1:i/Eo5LrZ5IKqpbtpPDuaUnDOUv471oDg8cjQaUr2MbA= -go.etcd.io/etcd/pkg/v3 v3.5.9 h1:6R2jg/aWd/zB9+9JxmijDKStGJAPFsX3e6BeJkMi6eQ= -go.etcd.io/etcd/pkg/v3 v3.5.9/go.mod h1:BZl0SAShQFk0IpLWR78T/+pyt8AruMHhTNNX73hkNVY= -go.etcd.io/etcd/raft/v3 v3.5.9 h1:ZZ1GIHoUlHsn0QVqiRysAm3/81Xx7+i2d7nSdWxlOiI= -go.etcd.io/etcd/raft/v3 v3.5.9/go.mod h1:WnFkqzFdZua4LVlVXQEGhmooLeyS7mqzS4Pf4BCVqXg= -go.etcd.io/etcd/server/v3 v3.5.9 h1:vomEmmxeztLtS5OEH7d0hBAg4cjVIu9wXuNzUZx2ZA0= -go.etcd.io/etcd/server/v3 v3.5.9/go.mod h1:GgI1fQClQCFIzuVjlvdbMxNbnISt90gdfYyqiAIt65g= +github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= +github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= +go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA= +go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= +go.etcd.io/etcd/api/v3 v3.5.12 h1:W4sw5ZoU2Juc9gBWuLk5U6fHfNVyY1WC5g9uiXZio/c= +go.etcd.io/etcd/api/v3 v3.5.12/go.mod h1:Ot+o0SWSyT6uHhA56al1oCED0JImsRiU9Dc26+C2a+4= +go.etcd.io/etcd/client/pkg/v3 v3.5.12 h1:EYDL6pWwyOsylrQyLp2w+HkQ46ATiOvoEdMarindU2A= +go.etcd.io/etcd/client/pkg/v3 v3.5.12/go.mod h1:seTzl2d9APP8R5Y2hFL3NVlD6qC/dOT+3kvrqPyTas4= +go.etcd.io/etcd/client/v2 v2.305.10 h1:MrmRktzv/XF8CvtQt+P6wLUlURaNpSDJHFZhe//2QE4= +go.etcd.io/etcd/client/v2 v2.305.10/go.mod h1:m3CKZi69HzilhVqtPDcjhSGp+kA1OmbNn0qamH80xjA= +go.etcd.io/etcd/client/v3 v3.5.12 h1:v5lCPXn1pf1Uu3M4laUE2hp/geOTc5uPcYYsNe1lDxg= +go.etcd.io/etcd/client/v3 v3.5.12/go.mod h1:tSbBCakoWmmddL+BKVAJHa9km+O/E+bumDe9mSbPiqw= +go.etcd.io/etcd/pkg/v3 v3.5.10 h1:WPR8K0e9kWl1gAhB5A7gEa5ZBTNkT9NdNWrR8Qpo1CM= +go.etcd.io/etcd/pkg/v3 v3.5.10/go.mod h1:TKTuCKKcF1zxmfKWDkfz5qqYaE3JncKKZPFf8c1nFUs= +go.etcd.io/etcd/raft/v3 v3.5.10 h1:cgNAYe7xrsrn/5kXMSaH8kM/Ky8mAdMqGOxyYwpP0LA= +go.etcd.io/etcd/raft/v3 v3.5.10/go.mod h1:odD6kr8XQXTy9oQnyMPBOr0TVe+gT0neQhElQ6jbGRc= +go.etcd.io/etcd/server/v3 v3.5.10 h1:4NOGyOwD5sUZ22PiWYKmfxqoeh72z6EhYjNosKGLmZg= +go.etcd.io/etcd/server/v3 v3.5.10/go.mod h1:gBplPHfs6YI0L+RpGkTQO7buDbHv5HJGG/Bst0/zIPo= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0/go.mod h1:SY9qHHUES6W3oZnO1H2W8NvsSovIoXRg/A1AH9px8+I= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 h1:sxoY9kG1s1WpSYNyzm24rlwH4lnRYFXUVVBmKMBfRgw= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= -go.opentelemetry.io/otel v1.10.0 h1:Y7DTJMR6zs1xkS/upamJYk0SxxN4C9AqRd77jmZnyY4= -go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 h1:TaB+1rQhddO1sF71MpZOZAuSPW1klK2M8XxfrBMfK7Y= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 h1:pDDYmo0QadUPal5fwXoY1pmMpFcdyhXOmL5drCrI3vU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 h1:KtiUEhQmj/Pa874bVYKGNVdq8NPKiacPbaRRtgXi+t4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= -go.opentelemetry.io/otel/metric v0.31.0 h1:6SiklT+gfWAwWUR0meEMxQBtihpiEs4c+vL9spDTqUs= -go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= -go.opentelemetry.io/otel/sdk v1.10.0 h1:jZ6K7sVn04kk/3DNUdJ4mqRlGDiXAVuIG+MMENpTNdY= -go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= -go.opentelemetry.io/otel/trace v1.10.0 h1:npQMbR8o7mum8uF95yFbOEJffhs1sbCOfDh8zAJiH5E= -go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.48.0 h1:P+/g8GpuJGYbOp2tAdKrIPUX9JO02q8Q0YNlHolpibA= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.48.0/go.mod h1:tIKj3DbO8N9Y2xo52og3irLsPI4GW02DSMtrVgNMgxg= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0 h1:doUP+ExOpH3spVTLS0FcWGLnQrPct/hD/bCPbDRUEAU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0/go.mod h1:rdENBZMT2OE6Ne/KLwpiXudnAsbdrdBaqBvTN8M8BgA= +go.opentelemetry.io/otel v1.23.1 h1:Za4UzOqJYS+MUczKI320AtqZHZb7EqxO00jAHE0jmQY= +go.opentelemetry.io/otel v1.23.1/go.mod h1:Td0134eafDLcTS4y+zQ26GE8u3dEuRBiBCTUIRHaikA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.23.1 h1:o8iWeVFa1BcLtVEV0LzrCxV2/55tB3xLxADr6Kyoey4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.23.1/go.mod h1:SEVfdK4IoBnbT2FXNM/k8yC08MrfbhWk3U4ljM8B3HE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.23.1 h1:p3A5+f5l9e/kuEBwLOrnpkIDHQFlHmbiVxMURWRK6gQ= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.23.1/go.mod h1:OClrnXUjBqQbInvjJFjYSnMxBSCXBF8r3b34WqjiIrQ= +go.opentelemetry.io/otel/metric v1.23.1 h1:PQJmqJ9u2QaJLBOELl1cxIdPcpbwzbkjfEyelTl2rlo= +go.opentelemetry.io/otel/metric v1.23.1/go.mod h1:mpG2QPlAfnK8yNhNJAxDZruU9Y1/HubbC+KyH8FaCWI= +go.opentelemetry.io/otel/sdk v1.23.1 h1:O7JmZw0h76if63LQdsBMKQDWNb5oEcOThG9IrxscV+E= +go.opentelemetry.io/otel/sdk v1.23.1/go.mod h1:LzdEVR5am1uKOOwfBWFef2DCi1nu3SA8XQxx2IerWFk= +go.opentelemetry.io/otel/trace v1.23.1 h1:4LrmmEd8AU2rFvU1zegmvqW7+kWarxtNOPyeL6HmYY8= +go.opentelemetry.io/otel/trace v1.23.1/go.mod h1:4IpnpJFwr1mo/6HL8XIPJaE9y0+u1KcVmuW7dwFSVrI= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= +go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= -go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= -go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= +go.opentelemetry.io/proto/otlp v1.1.0 h1:2Di21piLrCqJ3U3eXGCTPHE9R8Nh+0uglSnOyxikMeI= +go.opentelemetry.io/proto/otlp v1.1.0/go.mod h1:GpBHCBWiqvVLDqmHZsoMM3C5ySeKTC7ej/RNTae6MdY= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU= +go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= -go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= +golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= -golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck= -golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 h1:k/i9J1pBpvlfR+9QsetwPyERsqu1GIbi967PQMq3Ivc= -golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= +golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= +golang.org/x/exp v0.0.0-20240213143201-ec583247a57a h1:HinSgX1tJRX3KsL//Gxynpw5CTOAIPhgL4W8PNiIpVE= +golang.org/x/exp v0.0.0-20240213143201-ec583247a57a/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210607152325-775e3b0c77b9/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20220302094943-723b81ca9867/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -546,6 +1268,8 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -554,7 +1278,11 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= @@ -562,8 +1290,9 @@ golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -593,34 +1322,79 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= -golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8= -golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= +golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= +golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= +golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= +golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= +golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ= +golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -630,11 +1404,17 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -650,11 +1430,11 @@ golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -667,21 +1447,54 @@ golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -690,9 +1503,13 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -704,16 +1521,20 @@ golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= -golang.org/x/term v0.12.0 h1:/ZfYdc3zq+q02Rv9vGqTeSItdzZTSNDmfTi0mBAuidU= -golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= @@ -721,17 +1542,23 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -744,6 +1571,7 @@ golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -772,23 +1600,50 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= -golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= -golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ= -golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= +golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= +golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= +golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= +golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= +gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= +gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= +gonum.org/v1/plot v0.10.1/go.mod h1:VZW5OlhkL1mysU9vaqNHnsy86inf6Ot+jB3r+BczCEo= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -805,14 +1660,60 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= +google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= +google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= +google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= +google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= +google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= +google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0= +google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= +google.golang.org/api v0.118.0/go.mod h1:76TtD3vkgmZ66zZzp72bUUklpmQmKlhh6sYtIjYK+5E= +google.golang.org/api v0.122.0/go.mod h1:gcitW0lvnyWjSp9nKxAbdHKIZ6vF4aajGueeslZOyms= +google.golang.org/api v0.124.0/go.mod h1:xu2HQurE5gi/3t1aFCvhPD781p0a3p11sdunTJ2BlP4= +google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -843,13 +1744,126 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98 h1:Z0hjGZePRE0ZBWotvtrwxFNrNE9CUAGtplaDK5NNI/g= -google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98/go.mod h1:S7mY02OqCJTD0E1OiQy1F72PWFB4bZJ87cAtLPYgDR0= -google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 h1:FmF5cCW94Ij59cfpoLiwTgodWmm60eEV0CjlsVg2fuw= -google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 h1:bVf09lpb+OJbByTj913DRJioFFAjf/ZGxEz7MajTp2U= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= +google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= +google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= +google.golang.org/genproto v0.0.0-20221109142239-94d6d90a7d66/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201204527-e3fa12d562f3/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= +google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230112194545-e10362b5ecf9/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230113154510-dbe35b8444a5/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230123190316-2c411cf9d197/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230127162408-596548ed4efa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44/go.mod h1:8B0gmkoRebU8ukX6HP+4wrVQUY1+6PkQ44BSyIlflHA= +google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= +google.golang.org/genproto v0.0.0-20230223222841-637eb2293923/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= +google.golang.org/genproto v0.0.0-20230303212802-e74f57abe488/go.mod h1:TvhZT5f700eVlTNwND1xoEZQeWTB2RY/65kplwl/bFA= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230320184635-7606e756e683/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230323212658-478b75c54725/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230330154414-c0448cd141ea/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230403163135-c38d8f061ccd/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= +google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= +google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:xZnkP7mREFX5MORlOPEzLMr+90PPZQ2QWzrVTWfAq64= +google.golang.org/genproto v0.0.0-20240221002015-b0ce06bbee7c h1:Zmyn5CV/jxzKnF+3d+xzbomACPwLQqVpLTpyXN5uTaQ= +google.golang.org/genproto v0.0.0-20240221002015-b0ce06bbee7c/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/api v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/api v0.0.0-20240221002015-b0ce06bbee7c h1:9g7erC9qu44ks7UK4gDNlnk4kOxZG707xKm4jVniy6o= +google.golang.org/genproto/googleapis/api v0.0.0-20240221002015-b0ce06bbee7c/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:ylj+BE99M198VPbBh6A8d9n3w8fChvyLK3wwBOjXBFA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234015-3fc162c6f38a/go.mod h1:xURIpW9ES5+/GZhnV6beoEtxQrnkRGIfP5VQG2tCBLc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240221002015-b0ce06bbee7c h1:NUsgEN92SQQqzfA+YtqYNqYmB3DMMYLlIwUZAQFVFbo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240221002015-b0ce06bbee7c/go.mod h1:H4O17MA/PE9BsGx3w+a+W2VOLLD1Qf7oJneAoU6WktY= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -862,14 +1876,38 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.58.0 h1:32JY8YpPMSR45K+c3o6b8VL73V+rR8k+DeMIr4vRH8o= -google.golang.org/grpc v1.58.0/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= +google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= +google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= +google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= +google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8= +google.golang.org/grpc v1.61.1 h1:kLAiWrZs7YeDM6MumDe7m3y4aM6wacLzM1Y/wiLP9XY= +google.golang.org/grpc v1.61.1/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -884,14 +1922,19 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/dnaeon/go-vcr.v3 v3.2.0 h1:Rltp0Vf+Aq0u4rQXgmXgtgoRDStTnFN83cWgSGSoRzM= +gopkg.in/dnaeon/go-vcr.v3 v3.2.0/go.mod h1:2IMOnnlx9I6u9x+YBsM3tAMx6AlOxnJ0pWxQAzZ79Ag= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= @@ -908,7 +1951,6 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -918,65 +1960,106 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.28.1 h1:i+0O8k2NPBCPYaMB+uCkseEbawEt/eFaiRqUx8aB108= -k8s.io/api v0.28.1/go.mod h1:uBYwID+66wiL28Kn2tBjBYQdEU0Xk0z5qF8bIBqk/Dg= -k8s.io/apiextensions-apiserver v0.28.1 h1:l2ThkBRjrWpw4f24uq0Da2HaEgqJZ7pcgiEUTKSmQZw= -k8s.io/apiextensions-apiserver v0.28.1/go.mod h1:sVvrI+P4vxh2YBBcm8n2ThjNyzU4BQGilCQ/JAY5kGs= -k8s.io/apimachinery v0.28.1 h1:EJD40og3GizBSV3mkIoXQBsws32okPOy+MkRyzh6nPY= -k8s.io/apimachinery v0.28.1/go.mod h1:X0xh/chESs2hP9koe+SdIAcXWcQ+RM5hy0ZynB+yEvw= -k8s.io/apiserver v0.28.1 h1:dw2/NKauDZCnOUAzIo2hFhtBRUo6gQK832NV8kuDbGM= -k8s.io/apiserver v0.28.1/go.mod h1:d8aizlSRB6yRgJ6PKfDkdwCy2DXt/d1FDR6iJN9kY1w= -k8s.io/client-go v0.19.0/go.mod h1:H9E/VT95blcFQnlyShFgnFT9ZnJOAceiUHM3MlRC+mU= -k8s.io/client-go v0.28.1 h1:pRhMzB8HyLfVwpngWKE8hDcXRqifh1ga2Z/PU9SXVK8= -k8s.io/client-go v0.28.1/go.mod h1:pEZA3FqOsVkCc07pFVzK076R+P/eXqsgx5zuuRWukNE= -k8s.io/cloud-provider v0.28.1 h1:bR7lIRYBHqxfsOkUsY2hJ7V7vmStxb0wjJJdrID8+7I= -k8s.io/cloud-provider v0.28.1/go.mod h1:7jxsc3c15go606KLXnUq8Cy4nX1R1dxFRgn/czIJp/Q= -k8s.io/code-generator v0.28.1/go.mod h1:ueeSJZJ61NHBa0ccWLey6mwawum25vX61nRZ6WOzN9A= -k8s.io/component-base v0.28.1 h1:LA4AujMlK2mr0tZbQDZkjWbdhTV5bRyEyAFe0TJxlWg= -k8s.io/component-base v0.28.1/go.mod h1:jI11OyhbX21Qtbav7JkhehyBsIRfnO8oEgoAR12ArIU= -k8s.io/component-helpers v0.28.1 h1:ts/vykhyUmPLhUl/hdLdf+a4BWA0giQ3f25HAIhl+RI= -k8s.io/component-helpers v0.28.1/go.mod h1:rHFPj33uXNbgppg+ilmjJ4oR73prZQNRRmg+utVOAb0= -k8s.io/controller-manager v0.28.1 h1:+md/3DAsdLVoMe3AewhyTxljnPLE/gyshTDZ8sX4Rf0= -k8s.io/controller-manager v0.28.1/go.mod h1:yZ8aOBpMYOBTAI/Jd0qpaUzZUlQigmtRcdYg2VgWKiU= -k8s.io/csi-translation-lib v0.28.1 h1:6EdpqKbwgJEcLxRzcGR1GnbyJrcTcUMhHTYfMwFT3LA= -k8s.io/csi-translation-lib v0.28.1/go.mod h1:CyBNplESJxGg+Dkp+Wn0P0LRRgeLpBihu3vJ7eHtU2E= -k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/gengo v0.0.0-20220902162205-c0856e24416d/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= +k8s.io/api v0.29.0 h1:NiCdQMY1QOp1H8lfRyeEf8eOwV6+0xA6XEE44ohDX2A= +k8s.io/api v0.29.0/go.mod h1:sdVmXoz2Bo/cb77Pxi71IPTSErEW32xa4aXwKH7gfBA= +k8s.io/apiextensions-apiserver v0.29.0 h1:0VuspFG7Hj+SxyF/Z/2T0uFbI5gb5LRgEyUVE3Q4lV0= +k8s.io/apiextensions-apiserver v0.29.0/go.mod h1:TKmpy3bTS0mr9pylH0nOt/QzQRrW7/h7yLdRForMZwc= +k8s.io/apimachinery v0.29.0 h1:+ACVktwyicPz0oc6MTMLwa2Pw3ouLAfAon1wPLtG48o= +k8s.io/apimachinery v0.29.0/go.mod h1:eVBxQ/cwiJxH58eK/jd/vAk4mrxmVlnpBH5J2GbMeis= +k8s.io/apiserver v0.29.0 h1:Y1xEMjJkP+BIi0GSEv1BBrf1jLU9UPfAnnGGbbDdp7o= +k8s.io/apiserver v0.29.0/go.mod h1:31n78PsRKPmfpee7/l9NYEv67u6hOL6AfcE761HapDM= +k8s.io/client-go v0.29.0 h1:KmlDtFcrdUzOYrBhXHgKw5ycWzc3ryPX5mQe0SkG3y8= +k8s.io/client-go v0.29.0/go.mod h1:yLkXH4HKMAywcrD82KMSmfYg2DlE8mepPR4JGSo5n38= +k8s.io/cloud-provider v0.29.0 h1:Qgk/jHsSKGRk/ltTlN6e7eaNuuamLROOzVBd0RPp94M= +k8s.io/cloud-provider v0.29.0/go.mod h1:gBCt7YYKFV4oUcJ/0xF9lS/9il4MxKunJ+ZKvh39WGo= +k8s.io/code-generator v0.29.0/go.mod h1:5bqIZoCxs2zTRKMWNYqyQWW/bajc+ah4rh0tMY8zdGA= +k8s.io/component-base v0.29.0 h1:T7rjd5wvLnPBV1vC4zWd/iWRbV8Mdxs+nGaoaFzGw3s= +k8s.io/component-base v0.29.0/go.mod h1:sADonFTQ9Zc9yFLghpDpmNXEdHyQmFIGbiuZbqAXQ1M= +k8s.io/component-helpers v0.29.0 h1:Y8W70NGeitKxWwhsPo/vEQbQx5VqJV+3xfLpP3V1VxU= +k8s.io/component-helpers v0.29.0/go.mod h1:j2coxVfmzTOXWSE6sta0MTgNSr572Dcx68F6DD+8fWc= +k8s.io/controller-manager v0.29.0 h1:kEv9sKLnjDkoSqeouWp2lZ8P33an5wrDJpOMqoyD7pc= +k8s.io/controller-manager v0.29.0/go.mod h1:UKtadWkULF5bfX7vu3hHppzY/hz88C03t70GItg/x08= +k8s.io/csi-translation-lib v0.29.0 h1:we4X1yUlDikvm5Rv0dwMuPHNw6KwjwsQiAuOPWXha8M= +k8s.io/csi-translation-lib v0.29.0/go.mod h1:Cp6t3CNBSm1dXS17V8IImUjkqfIB6KCj8Fs8wf6uyTA= +k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= -k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kms v0.28.1 h1:QLNTIc0k7Yebkt9yobj9Y9qBoRCMB4dq+pFCxVXVBnY= -k8s.io/kms v0.28.1/go.mod h1:I2TwA8oerDRInHWWBOqSUzv1EJDC1+55FQKYkxaPxh0= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= -k8s.io/kubectl v0.28.1 h1:jAq4yKEqQL+fwkWcEsUWxhJ7uIRcOYQraJxx4SyAMTY= -k8s.io/kubectl v0.28.1/go.mod h1:a0nk/lMMeKBulp0lMTJAKbkjZg1ykqfLfz/d6dnv1ak= -k8s.io/kubelet v0.28.1 h1:QRfx+jrzNgkLnMSw/nxGkAN7cjHPO446MDbjPITxLkk= -k8s.io/kubelet v0.28.1/go.mod h1:xYBbbJ0e2Rtb/hv+QFie448lFF81J990ImIptce2AHk= -k8s.io/kubernetes v1.28.1 h1:ZQuukGbpVjSbMypkjNErpbsSHni6RPgoqz+2zDBsuMY= -k8s.io/kubernetes v1.28.1/go.mod h1:rBQpjGYlLBV0KuOLw8EG45N5EBCskWiPpi0xy5liHMI= -k8s.io/mount-utils v0.28.1 h1:oyPtn8ZVxniBfwSlQaBF4fr7QVNYzUuk+gkuxEJgil0= -k8s.io/mount-utils v0.28.1/go.mod h1:AyP8LmZSLgpGdFQr+vzHTerlPiGvXUdP99n98Er47jw= -k8s.io/pod-security-admission v0.28.1 h1:d3jvo/+C6yDR1wnlX9ot1WvLyJ5R4uachJyxhdn9cW8= -k8s.io/pod-security-admission v0.28.1/go.mod h1:Qm1rSy3l96m6QXGNU/8u+cmdpNdmAeA3OYDinrXhi6U= -k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= +k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= +k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kms v0.29.3 h1:ReljsAUhYlm2spdT4yXmY+9a8x8dc/OT4mXvwQPPteQ= +k8s.io/kms v0.29.3/go.mod h1:TBGbJKpRUMk59neTMDMddjIDL+D4HuFUbpuiuzmOPg0= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= +k8s.io/kube-openapi v0.0.0-20240220201932-37d671a357a5 h1:QSpdNrZ9uRlV0VkqLvVO0Rqg8ioKi3oSw7O5P7pJV8M= +k8s.io/kube-openapi v0.0.0-20240220201932-37d671a357a5/go.mod h1:Pa1PvrP7ACSkuX6I7KYomY6cmMA0Tx86waBhDUgoKPw= +k8s.io/kubectl v0.29.0 h1:Oqi48gXjikDhrBF67AYuZRTcJV4lg2l42GmvsP7FmYI= +k8s.io/kubectl v0.29.0/go.mod h1:0jMjGWIcMIQzmUaMgAzhSELv5WtHo2a8pq67DtviAJs= +k8s.io/kubelet v0.29.0 h1:SX5hlznTBcGIrS1scaf8r8p6m3e475KMifwt9i12iOk= +k8s.io/kubelet v0.29.0/go.mod h1:kvKS2+Bz2tgDOG1S1q0TH2z1DasNuVF+8p6Aw7xvKkI= +k8s.io/kubernetes v1.29.2 h1:8hh1cntqdulanjQt7wSSSsJfBgOyx6fUdFWslvGL5m0= +k8s.io/kubernetes v1.29.2/go.mod h1:xZPKU0yO0CBbLTnbd+XGyRmmtmaVuJykDb8gNCkeeUE= +k8s.io/mount-utils v0.29.0 h1:KcUE0bFHONQC10V3SuLWQ6+l8nmJggw9lKLpDftIshI= +k8s.io/mount-utils v0.29.0/go.mod h1:N3lDK/G1B8R/IkAt4NhHyqB07OqEr7P763z3TNge94U= +k8s.io/pod-security-admission v0.29.0 h1:tY/ldtkbBCulMYVSWg6ZDLlgDYDWy6rLj8e/AgmwSj4= +k8s.io/pod-security-admission v0.29.0/go.mod h1:bGIeKCzU0Q0Nl185NHmqcMCiOjTcqTrBfAQaeupwq0E= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20240102154912-e7106e64919e h1:eQ/4ljkx21sObifjzXwlPKpdGLrCfRziVtos3ofG/sQ= +k8s.io/utils v0.0.0-20240102154912-e7106e64919e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.36.2/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.36.3/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc= +modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw= +modernc.org/ccgo/v3 v3.16.4/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.8/go.mod h1:zNjwkizS+fIFDrDjIAgBSCLkWbJuHF+ar3QRn+Z9aws= +modernc.org/ccgo/v3 v3.16.9/go.mod h1:zNMzC9A9xeNUepy6KuZBbugn3c0Mc9TeiJO4lgvkJDo= +modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= +modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= +modernc.org/libc v0.0.0-20220428101251-2d5f3daf273b/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.16.0/go.mod h1:N4LD6DBE9cf+Dzf9buBlzVJndKr/iJHG97vGLHYnb5A= +modernc.org/libc v1.16.1/go.mod h1:JjJE0eu4yeK7tab2n4S1w8tlWd9MxXLRzheaRnAKymU= +modernc.org/libc v1.16.17/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU= +modernc.org/libc v1.16.19/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.17.0/go.mod h1:XsgLldpP4aWlPlsjqKRdHPqCxCjISdHfM/yeWC5GyW0= +modernc.org/libc v1.17.1/go.mod h1:FZ23b+8LjxZs7XtFMbSzL/EhPxNbfZbErxEHc7cbD9s= +modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.2.0/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.2.1/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4= +modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw= +modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= +modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw= +modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 h1:trsWhjU5jZrx6UvFu4WzQDrN7Pga4a7Qg+zcfcj64PA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2/go.mod h1:+qG7ISXqCDVVcyO8hLn12AKVYYUjM7ftlqsqmrhMZE0= -sigs.k8s.io/cloud-provider-azure v1.27.1-0.20230825065334-8b1cf948b7ed h1:tT1CT8ff+GPVYtcPxeopAnUbyapzZ0MaCFz2vgAp14U= -sigs.k8s.io/cloud-provider-azure v1.27.1-0.20230825065334-8b1cf948b7ed/go.mod h1:T86YMaSDRFlMqX5Kmb+KqeASg4Px75GQfcs0sD0yqAw= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y= +sigs.k8s.io/cloud-provider-azure v1.29.1-0.20240401064333-15fadde40d38 h1:vO1Hm+bkN5+rQBxupkSyPT9vRSzKsF2Dde2/wIpKWHc= +sigs.k8s.io/cloud-provider-azure v1.29.1-0.20240401064333-15fadde40d38/go.mod h1:P9469h4lNzbAy86lAdxstlALSdmCHaTuhJTaVAgdfkI= +sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.7 h1:2i90v/+RL6UmN9zE8bDxEy8DpEdIK9WiwmrGS6XoDgk= +sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.7/go.mod h1:4rMNajTN5xEbeVGFWfAeqorkednAmXM5/d8OK+ux+tY= +sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.2 h1:quGUBBYzHpEXd6PSNUS9UpheEcwW+AbDtZBIAuzS0jM= +sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.2/go.mod h1:XG0A5DEGP8oKo97GeDAcd2rXOCVX22JeHNLpiDVgfZg= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/hack/verify-gomod.sh b/hack/verify-gomod.sh index d45a265df..77f67ef7b 100755 --- a/hack/verify-gomod.sh +++ b/hack/verify-gomod.sh @@ -30,3 +30,5 @@ if [[ -n "${diff}" ]]; then exit 1 fi echo "Done" + +go list -mod readonly -m all diff --git a/pkg/azureconstants/azure_constants.go b/pkg/azureconstants/azure_constants.go index 85e3ff8af..2157a47fc 100644 --- a/pkg/azureconstants/azure_constants.go +++ b/pkg/azureconstants/azure_constants.go @@ -39,80 +39,83 @@ import ( ) const ( - AzureDiskCSIDriverName = "azuredisk_csi_driver" - CachingModeField = "cachingmode" - DefaultAzureCredentialFileEnv = "AZURE_CREDENTIAL_FILE" - DefaultCredFilePathLinux = "/etc/kubernetes/azure.json" - DefaultCredFilePathWindows = "C:\\k\\azure.json" - DefaultDriverName = "azuredisk.csi.confidential.cloud" - DesIDField = "diskencryptionsetid" - DiskEncryptionTypeField = "diskencryptiontype" - DiskAccessIDField = "diskaccessid" - DiskIOPSReadWriteField = "diskiopsreadwrite" - DiskMBPSReadWriteField = "diskmbpsreadwrite" - DiskNameField = "diskname" - EnableBurstingField = "enablebursting" - ErrDiskNotFound = "not found" - FsTypeField = "fstype" - IncrementalField = "incremental" - KindField = "kind" - LocationField = "location" - LogicalSectorSizeField = "logicalsectorsize" - LUN = "LUN" - MaxSharesField = "maxshares" - MinimumDiskSizeGiB = 1 - NetworkAccessPolicyField = "networkaccesspolicy" - PublicNetworkAccessField = "publicnetworkaccess" - NotFound = "NotFound" - PerfProfileBasic = "basic" - PerfProfileAdvanced = "advanced" - PerfProfileField = "perfprofile" - PerfProfileNone = "none" - PremiumAccountPrefix = "premium" - PvcNameKey = "csi.storage.k8s.io/pvc/name" - PvcNamespaceKey = "csi.storage.k8s.io/pvc/namespace" - PvcNamespaceTag = "kubernetes.io-created-for-pvc-namespace" - PvcNameTag = "kubernetes.io-created-for-pvc-name" - PvNameKey = "csi.storage.k8s.io/pv/name" - PvNameTag = "kubernetes.io-created-for-pv-name" - RateLimited = "rate limited" - RequestedSizeGib = "requestedsizegib" - ResizeRequired = "resizeRequired" - SubscriptionIDField = "subscriptionid" - ResourceGroupField = "resourcegroup" - DataAccessAuthModeField = "dataaccessauthmode" - ResourceNotFound = "ResourceNotFound" - SkuNameField = "skuname" - SourceDiskSearchMaxDepth = 10 - SourceSnapshot = "snapshot" - SourceVolume = "volume" - StandardSsdAccountPrefix = "standardssd" - StorageAccountTypeField = "storageaccounttype" - TagsField = "tags" - ThrottlingKey = "throttlingKey" - TrueValue = "true" - FalseValue = "false" - UserAgentField = "useragent" - VolumeAttributePartition = "partition" - WellKnownTopologyKey = "topology.kubernetes.io/zone" - InstanceTypeKey = "node.kubernetes.io/instance-type" - WriteAcceleratorEnabled = "writeacceleratorenabled" - ZonedField = "zoned" - EnableAsyncAttachField = "enableasyncattach" - PerformancePlusField = "enableperformanceplus" - AttachDiskInitialDelayField = "attachdiskinitialdelay" - TooManyRequests = "TooManyRequests" - ClientThrottled = "client throttled" - VolumeID = "volumeid" - Node = "node" - SourceResourceID = "source_resource_id" - SnapshotName = "snapshot_name" - SnapshotID = "snapshot_id" - DeviceSettingsKeyPrefix = "device-setting/" - BlockDeviceRootPathLinux = "/sys/block" - DummyBlockDevicePathLinux = "/sys/block/sda" + AzureDiskCSIDriverName = "azuredisk_csi_driver" + CachingModeField = "cachingmode" + DefaultAzureCredentialFileEnv = "AZURE_CREDENTIAL_FILE" + DefaultCredFilePathLinux = "/etc/kubernetes/azure.json" + DefaultCredFilePathWindows = "C:\\k\\azure.json" + DefaultDriverName = "azuredisk.csi.confidential.cloud" + DesIDField = "diskencryptionsetid" + DiskEncryptionTypeField = "diskencryptiontype" + DiskAccessIDField = "diskaccessid" + DiskIOPSReadWriteField = "diskiopsreadwrite" + DiskMBPSReadWriteField = "diskmbpsreadwrite" + DiskNameField = "diskname" + EnableBurstingField = "enablebursting" + ErrDiskNotFound = "not found" + FsTypeField = "fstype" + IncrementalField = "incremental" + KindField = "kind" + LocationField = "location" + LogicalSectorSizeField = "logicalsectorsize" + LUN = "LUN" + MaxSharesField = "maxshares" + MinimumDiskSizeGiB = 1 + NetworkAccessPolicyField = "networkaccesspolicy" + PublicNetworkAccessField = "publicnetworkaccess" + NotFound = "NotFound" + PerfProfileBasic = "basic" + PerfProfileAdvanced = "advanced" + PerfProfileField = "perfprofile" + PerfProfileNone = "none" + PremiumAccountPrefix = "premium" + PvcNameKey = "csi.storage.k8s.io/pvc/name" + PvcNamespaceKey = "csi.storage.k8s.io/pvc/namespace" + PvcNamespaceTag = "kubernetes.io-created-for-pvc-namespace" + PvcNameTag = "kubernetes.io-created-for-pvc-name" + PvNameKey = "csi.storage.k8s.io/pv/name" + PvNameTag = "kubernetes.io-created-for-pv-name" + RateLimited = "rate limited" + RequestedSizeGib = "requestedsizegib" + ResizeRequired = "resizeRequired" + SubscriptionIDField = "subscriptionid" + ResourceGroupField = "resourcegroup" + DataAccessAuthModeField = "dataaccessauthmode" + ResourceNotFound = "ResourceNotFound" + SkuNameField = "skuname" + SourceDiskSearchMaxDepth = 10 + SourceSnapshot = "snapshot" + SourceVolume = "volume" + StandardSsdAccountPrefix = "standardssd" + StorageAccountTypeField = "storageaccounttype" + TagsField = "tags" + GetDiskThrottlingKey = "getdiskthrottlingKey" + CheckDiskLunThrottlingKey = "checkdisklunthrottlingKey" + TrueValue = "true" + FalseValue = "false" + UserAgentField = "useragent" + VolumeAttributePartition = "partition" + WellKnownTopologyKey = "topology.kubernetes.io/zone" + InstanceTypeKey = "node.kubernetes.io/instance-type" + WriteAcceleratorEnabled = "writeacceleratorenabled" + ZonedField = "zoned" + EnableAsyncAttachField = "enableasyncattach" + PerformancePlusField = "enableperformanceplus" + PerformancePlusMinimumDiskSizeGiB = 513 + AttachDiskInitialDelayField = "attachdiskinitialdelay" + TooManyRequests = "TooManyRequests" + ClientThrottled = "client throttled" + VolumeID = "volumeid" + Node = "node" + SourceResourceID = "source_resource_id" + SnapshotName = "snapshot_name" + SnapshotID = "snapshot_id" + DeviceSettingsKeyPrefix = "device-setting/" + BlockDeviceRootPathLinux = "/sys/block" + DummyBlockDevicePathLinux = "/sys/block/sda" // define different sleep time when hit throttling SnapshotOpThrottlingSleepSec = 50 + MaxThrottlingSleepSec = 1200 ) var ( diff --git a/pkg/azuredisk/azure_common_darwin.go b/pkg/azuredisk/azure_common_darwin.go index df34dff9f..09be9dead 100644 --- a/pkg/azuredisk/azure_common_darwin.go +++ b/pkg/azuredisk/azure_common_darwin.go @@ -119,6 +119,6 @@ func rescanAllVolumes(io azureutils.IOHandler) error { return nil } -func GetVolumeStats(ctx context.Context, m *mount.SafeFormatAndMount, target string, hostutil hostUtil) ([]*csi.VolumeUsage, error) { +func (d *DriverCore) GetVolumeStats(ctx context.Context, m *mount.SafeFormatAndMount, volumeID, target string, hostutil hostUtil) ([]*csi.VolumeUsage, error) { return []*csi.VolumeUsage{}, nil } diff --git a/pkg/azuredisk/azure_common_linux.go b/pkg/azuredisk/azure_common_linux.go index 6d832808f..e1711bc64 100644 --- a/pkg/azuredisk/azure_common_linux.go +++ b/pkg/azuredisk/azure_common_linux.go @@ -81,7 +81,7 @@ func getDiskLinkByDevName(io azureutils.IOHandler, devLinkPath, devName string) return "", fmt.Errorf("read %s error: %v", devLinkPath, err) } -func scsiHostRescan(io azureutils.IOHandler, m *mount.SafeFormatAndMount) { +func scsiHostRescan(io azureutils.IOHandler, _ *mount.SafeFormatAndMount) { scsiPath := "/sys/class/scsi_host/" if dirs, err := io.ReadDir(scsiPath); err == nil { for _, f := range dirs { @@ -96,7 +96,7 @@ func scsiHostRescan(io azureutils.IOHandler, m *mount.SafeFormatAndMount) { } } -func findDiskByLun(lun int, io azureutils.IOHandler, m *mount.SafeFormatAndMount) (string, error) { +func findDiskByLun(lun int, io azureutils.IOHandler, _ *mount.SafeFormatAndMount) (string, error) { azureDisks := listAzureDiskPath(io) return findDiskByLunWithConstraint(lun, io, azureDisks) } @@ -196,7 +196,7 @@ func findDiskByLunWithConstraint(lun int, io azureutils.IOHandler, azureDisks [] return "", err } -func preparePublishPath(path string, m *mount.SafeFormatAndMount) error { +func preparePublishPath(_ string, _ *mount.SafeFormatAndMount) error { return nil } @@ -269,7 +269,7 @@ func rescanAllVolumes(io azureutils.IOHandler) error { return nil } -func GetVolumeStats(ctx context.Context, m *mount.SafeFormatAndMount, target string, hostutil hostUtil) ([]*csi.VolumeUsage, error) { +func (d *DriverCore) GetVolumeStats(_ context.Context, m *mount.SafeFormatAndMount, _, target string, hostutil hostUtil) ([]*csi.VolumeUsage, error) { var volUsages []*csi.VolumeUsage _, err := os.Stat(target) if err != nil { diff --git a/pkg/azuredisk/azure_common_windows.go b/pkg/azuredisk/azure_common_windows.go index 390912181..ac5089bb6 100644 --- a/pkg/azuredisk/azure_common_windows.go +++ b/pkg/azuredisk/azure_common_windows.go @@ -25,12 +25,14 @@ import ( "strconv" "github.com/container-storage-interface/spec/lib/go/csi" + "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "k8s.io/klog/v2" "k8s.io/mount-utils" "sigs.k8s.io/azuredisk-csi-driver/pkg/azureutils" "sigs.k8s.io/azuredisk-csi-driver/pkg/mounter" + azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" ) func formatAndMount(source, target, fstype string, options []string, m *mount.SafeFormatAndMount) error { @@ -162,9 +164,22 @@ func rescanAllVolumes(io azureutils.IOHandler) error { return nil } -func GetVolumeStats(ctx context.Context, m *mount.SafeFormatAndMount, target string, hostutil hostUtil) ([]*csi.VolumeUsage, error) { +func (d *DriverCore) GetVolumeStats(ctx context.Context, m *mount.SafeFormatAndMount, volumeID, target string, hostutil hostUtil) ([]*csi.VolumeUsage, error) { + // check if the volume stats is cached + cache, err := d.volStatsCache.Get(volumeID, azcache.CacheReadTypeDefault) + if err != nil { + return nil, status.Errorf(codes.Internal, err.Error()) + } + if cache != nil { + volUsage := cache.(csi.VolumeUsage) + klog.V(6).Infof("NodeGetVolumeStats: volume stats for volume %s path %s is cached", volumeID, target) + return []*csi.VolumeUsage{&volUsage}, nil + } + if proxy, ok := m.Interface.(mounter.CSIProxyMounter); ok { volUsage, err := proxy.GetVolumeStats(ctx, target) + // cache the volume stats per volume + d.volStatsCache.Set(volumeID, *volUsage) return []*csi.VolumeUsage{volUsage}, err } return []*csi.VolumeUsage{}, fmt.Errorf("could not cast to csi proxy class") diff --git a/pkg/azuredisk/azure_controller_common.go b/pkg/azuredisk/azure_controller_common.go new file mode 100644 index 000000000..0600c6382 --- /dev/null +++ b/pkg/azuredisk/azure_controller_common.go @@ -0,0 +1,682 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azuredisk + +import ( + "context" + "errors" + "fmt" + "net/http" + "path" + "regexp" + "strings" + "sync" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" + + "k8s.io/apimachinery/pkg/types" + kwait "k8s.io/apimachinery/pkg/util/wait" + cloudprovider "k8s.io/cloud-provider" + volerr "k8s.io/cloud-provider/volume/errors" + "k8s.io/klog/v2" + "k8s.io/utils/pointer" + + "sigs.k8s.io/azuredisk-csi-driver/pkg/azureutils" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient" + azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" + "sigs.k8s.io/cloud-provider-azure/pkg/consts" + "sigs.k8s.io/cloud-provider-azure/pkg/provider" +) + +const ( + // Disk Caching is not supported for disks 4 TiB and larger + // https://docs.microsoft.com/en-us/azure/virtual-machines/premium-storage-performance#disk-caching + diskCachingLimit = 4096 // GiB + + maxLUN = 64 // max number of LUNs per VM + errStatusCode400 = "statuscode=400" + errInvalidParameter = `code="invalidparameter"` + errTargetInstanceIds = `target="instanceids"` + sourceSnapshot = "snapshot" + sourceVolume = "volume" + attachDiskMapKeySuffix = "attachdiskmap" + detachDiskMapKeySuffix = "detachdiskmap" + + // default initial delay in milliseconds for batch disk attach/detach + defaultAttachDetachInitialDelayInMs = 1000 + + // WriteAcceleratorEnabled support for Azure Write Accelerator on Azure Disks + // https://docs.microsoft.com/azure/virtual-machines/windows/how-to-enable-write-accelerator + WriteAcceleratorEnabled = "writeacceleratorenabled" + + // see https://docs.microsoft.com/en-us/rest/api/compute/disks/createorupdate#create-a-managed-disk-by-copying-a-snapshot. + diskSnapshotPath = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/snapshots/%s" + + // see https://docs.microsoft.com/en-us/rest/api/compute/disks/createorupdate#create-a-managed-disk-from-an-existing-managed-disk-in-the-same-or-different-subscription. + managedDiskPath = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/%s" +) + +var defaultBackOff = kwait.Backoff{ + Steps: 20, + Duration: 2 * time.Second, + Factor: 1.5, + Jitter: 0.0, +} + +var ( + managedDiskPathRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/resourceGroups/(?:.*)/providers/Microsoft.Compute/disks/(.+)`) + diskSnapshotPathRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/resourceGroups/(?:.*)/providers/Microsoft.Compute/snapshots/(.+)`) +) + +type controllerCommon struct { + diskStateMap sync.Map // + lockMap *lockMap + cloud *provider.Cloud + clientFactory azclient.ClientFactory + // disk queue that is waiting for attach or detach on specific node + // > + attachDiskMap sync.Map + detachDiskMap sync.Map + // DisableUpdateCache whether disable update cache in disk attach/detach + DisableUpdateCache bool + // DisableDiskLunCheck whether disable disk lun check after disk attach/detach + DisableDiskLunCheck bool + // AttachDetachInitialDelayInMs determines initial delay in milliseconds for batch disk attach/detach + AttachDetachInitialDelayInMs int + ForceDetachBackoff bool +} + +// ExtendedLocation contains additional info about the location of resources. +type ExtendedLocation struct { + // Name - The name of the extended location. + Name string `json:"name,omitempty"` + // Type - The type of the extended location. + Type string `json:"type,omitempty"` +} + +// AttachDisk attaches a disk to vm +// occupiedLuns is used to avoid conflict with other disk attach in k8s VolumeAttachments +// return (lun, error) +func (c *controllerCommon) AttachDisk(ctx context.Context, diskName, diskURI string, nodeName types.NodeName, + cachingMode armcompute.CachingTypes, disk *armcompute.Disk, occupiedLuns []int) (int32, error) { + diskEncryptionSetID := "" + writeAcceleratorEnabled := false + + // there is possibility that disk is nil when GetDisk is throttled + // don't check disk state when GetDisk is throttled + if disk != nil { + if disk.ManagedBy != nil && (disk.Properties == nil || disk.Properties.MaxShares == nil || *disk.Properties.MaxShares <= 1) { + vmset, err := c.cloud.GetNodeVMSet(nodeName, azcache.CacheReadTypeUnsafe) + if err != nil { + return -1, err + } + attachedNode, err := vmset.GetNodeNameByProviderID(*disk.ManagedBy) + if err != nil { + return -1, err + } + if strings.EqualFold(string(nodeName), string(attachedNode)) { + klog.Warningf("volume %s is actually attached to current node %s, invalidate vm cache and return error", diskURI, nodeName) + // update VM(invalidate vm cache) + if errUpdate := c.UpdateVM(ctx, nodeName); errUpdate != nil { + return -1, errUpdate + } + lun, _, err := c.GetDiskLun(diskName, diskURI, nodeName) + return lun, err + } + + attachErr := fmt.Sprintf( + "disk(%s) already attached to node(%s), could not be attached to node(%s)", + diskURI, *disk.ManagedBy, nodeName) + klog.V(2).Infof("found dangling volume %s attached to node %s, could not be attached to node(%s)", diskURI, attachedNode, nodeName) + return -1, volerr.NewDanglingError(attachErr, attachedNode, "") + } + + if disk.Properties != nil { + if disk.Properties.DiskSizeGB != nil && *disk.Properties.DiskSizeGB >= diskCachingLimit && cachingMode != armcompute.CachingTypesNone { + // Disk Caching is not supported for disks 4 TiB and larger + // https://docs.microsoft.com/en-us/azure/virtual-machines/premium-storage-performance#disk-caching + cachingMode = armcompute.CachingTypesNone + klog.Warningf("size of disk(%s) is %dGB which is bigger than limit(%dGB), set cacheMode as None", + diskURI, *disk.Properties.DiskSizeGB, diskCachingLimit) + } + + if disk.Properties.Encryption != nil && + disk.Properties.Encryption.DiskEncryptionSetID != nil { + diskEncryptionSetID = *disk.Properties.Encryption.DiskEncryptionSetID + } + + if disk.Properties.DiskState != nil && *disk.Properties.DiskState != armcompute.DiskStateUnattached && (disk.Properties.MaxShares == nil || *disk.Properties.MaxShares <= 1) { + return -1, fmt.Errorf("state of disk(%s) is %s, not in expected %s state", diskURI, *disk.Properties.DiskState, armcompute.DiskStateUnattached) + } + } + + if v, ok := disk.Tags[WriteAcceleratorEnabled]; ok { + if v != nil && strings.EqualFold(*v, "true") { + writeAcceleratorEnabled = true + } + } + } + + options := provider.AttachDiskOptions{ + Lun: -1, + DiskName: diskName, + CachingMode: compute.CachingTypes(cachingMode), + DiskEncryptionSetID: diskEncryptionSetID, + WriteAcceleratorEnabled: writeAcceleratorEnabled, + } + node := strings.ToLower(string(nodeName)) + diskuri := strings.ToLower(diskURI) + requestNum, err := c.insertAttachDiskRequest(diskuri, node, &options) + if err != nil { + return -1, err + } + + c.lockMap.LockEntry(node) + unlock := false + defer func() { + if !unlock { + c.lockMap.UnlockEntry(node) + } + }() + + if c.AttachDetachInitialDelayInMs > 0 && requestNum == 1 { + klog.V(2).Infof("wait %dms for more requests on node %s, current disk attach: %s", c.AttachDetachInitialDelayInMs, node, diskURI) + time.Sleep(time.Duration(c.AttachDetachInitialDelayInMs) * time.Millisecond) + } + + diskMap, err := c.cleanAttachDiskRequests(node) + if err != nil { + return -1, err + } + + lun, err := c.SetDiskLun(nodeName, diskuri, diskMap, occupiedLuns) + if err != nil { + return -1, err + } + + klog.V(2).Infof("Trying to attach volume %s lun %d to node %s, diskMap len:%d, %+v", diskURI, lun, nodeName, len(diskMap), diskMap) + if len(diskMap) == 0 { + if !c.DisableDiskLunCheck { + // always check disk lun after disk attach complete + diskLun, vmState, errGetLun := c.GetDiskLun(diskName, diskURI, nodeName) + if errGetLun != nil { + return -1, fmt.Errorf("disk(%s) could not be found on node(%s), vmState: %s, error: %w", diskURI, nodeName, pointer.StringDeref(vmState, ""), errGetLun) + } + lun = diskLun + } + return lun, nil + } + + vmset, err := c.cloud.GetNodeVMSet(nodeName, azcache.CacheReadTypeUnsafe) + if err != nil { + return -1, err + } + c.diskStateMap.Store(disk, "attaching") + defer c.diskStateMap.Delete(disk) + + defer func() { + // invalidate the cache if there is error in disk attach + if err != nil { + _ = vmset.DeleteCacheForNode(string(nodeName)) + } + }() + + err = vmset.AttachDisk(ctx, nodeName, diskMap) + if err != nil { + if IsOperationPreempted(err) { + klog.Errorf("Retry VM Update on node (%s) due to error (%v)", nodeName, err) + err = vmset.UpdateVM(ctx, nodeName) + } + if err != nil { + return -1, err + } + } + + if !c.DisableDiskLunCheck { + // always check disk lun after disk attach complete + diskLun, vmState, errGetLun := c.GetDiskLun(diskName, diskURI, nodeName) + if errGetLun != nil { + return -1, fmt.Errorf("disk(%s) could not be found on node(%s), vmState: %s, error: %w", diskURI, nodeName, pointer.StringDeref(vmState, ""), errGetLun) + } + lun = diskLun + } + return lun, nil +} + +// insertAttachDiskRequest return (attachDiskRequestQueueLength, error) +func (c *controllerCommon) insertAttachDiskRequest(diskURI, nodeName string, options *provider.AttachDiskOptions) (int, error) { + var diskMap map[string]*provider.AttachDiskOptions + attachDiskMapKey := nodeName + attachDiskMapKeySuffix + c.lockMap.LockEntry(attachDiskMapKey) + defer c.lockMap.UnlockEntry(attachDiskMapKey) + v, ok := c.attachDiskMap.Load(nodeName) + if ok { + if diskMap, ok = v.(map[string]*provider.AttachDiskOptions); !ok { + return -1, fmt.Errorf("convert attachDiskMap failure on node(%s)", nodeName) + } + } else { + diskMap = make(map[string]*provider.AttachDiskOptions) + c.attachDiskMap.Store(nodeName, diskMap) + } + // insert attach disk request to queue + _, ok = diskMap[diskURI] + if ok { + klog.V(2).Infof("azureDisk - duplicated attach disk(%s) request on node(%s)", diskURI, nodeName) + } else { + diskMap[diskURI] = options + } + return len(diskMap), nil +} + +// clean up attach disk requests +// return original attach disk requests +func (c *controllerCommon) cleanAttachDiskRequests(nodeName string) (map[string]*provider.AttachDiskOptions, error) { + var diskMap map[string]*provider.AttachDiskOptions + + attachDiskMapKey := nodeName + attachDiskMapKeySuffix + c.lockMap.LockEntry(attachDiskMapKey) + defer c.lockMap.UnlockEntry(attachDiskMapKey) + v, ok := c.attachDiskMap.Load(nodeName) + if !ok { + return diskMap, nil + } + if diskMap, ok = v.(map[string]*provider.AttachDiskOptions); !ok { + return diskMap, fmt.Errorf("convert attachDiskMap failure on node(%s)", nodeName) + } + c.attachDiskMap.Store(nodeName, make(map[string]*provider.AttachDiskOptions)) + return diskMap, nil +} + +// DetachDisk detaches a disk from VM +func (c *controllerCommon) DetachDisk(ctx context.Context, diskName, diskURI string, nodeName types.NodeName) error { + if _, err := c.cloud.InstanceID(ctx, nodeName); err != nil { + if errors.Is(err, cloudprovider.InstanceNotFound) { + // if host doesn't exist, no need to detach + klog.Warningf("azureDisk - failed to get azure instance id(%s), DetachDisk(%s) will assume disk is already detached", + nodeName, diskURI) + return nil + } + klog.Warningf("failed to get azure instance id (%v)", err) + return fmt.Errorf("failed to get azure instance id for node %q: %w", nodeName, err) + } + + vmset, err := c.cloud.GetNodeVMSet(nodeName, azcache.CacheReadTypeUnsafe) + if err != nil { + return err + } + + node := strings.ToLower(string(nodeName)) + disk := strings.ToLower(diskURI) + requestNum, err := c.insertDetachDiskRequest(diskName, disk, node) + if err != nil { + return err + } + + c.lockMap.LockEntry(node) + defer c.lockMap.UnlockEntry(node) + + if c.AttachDetachInitialDelayInMs > 0 && requestNum == 1 { + klog.V(2).Infof("wait %dms for more requests on node %s, current disk detach: %s", c.AttachDetachInitialDelayInMs, node, diskURI) + time.Sleep(time.Duration(c.AttachDetachInitialDelayInMs) * time.Millisecond) + } + diskMap, err := c.cleanDetachDiskRequests(node) + if err != nil { + return err + } + + klog.V(2).Infof("Trying to detach volume %s from node %s, diskMap len:%d, %s", diskURI, nodeName, len(diskMap), diskMap) + if len(diskMap) > 0 { + c.diskStateMap.Store(disk, "detaching") + defer c.diskStateMap.Delete(disk) + if err = vmset.DetachDisk(ctx, nodeName, diskMap, false); err != nil { + if isInstanceNotFoundError(err) { + // if host doesn't exist, no need to detach + klog.Warningf("azureDisk - got InstanceNotFoundError(%v), DetachDisk(%s) will assume disk is already detached", + err, diskURI) + return nil + } + if c.ForceDetachBackoff && !azureutils.IsThrottlingError(err) { + klog.Errorf("azureDisk - DetachDisk(%s) from node %s failed with error: %v, retry with force detach", diskURI, nodeName, err) + err = vmset.DetachDisk(ctx, nodeName, diskMap, true) + } + } + } + + if err != nil { + klog.Errorf("azureDisk - detach disk(%s, %s) failed, err: %v", diskName, diskURI, err) + return err + } + + if !c.DisableDiskLunCheck { + // always check disk lun after disk detach complete + lun, vmState, errGetLun := c.GetDiskLun(diskName, diskURI, nodeName) + if errGetLun == nil || !strings.Contains(errGetLun.Error(), consts.CannotFindDiskLUN) { + return fmt.Errorf("disk(%s) is still attached to node(%s) on lun(%d), vmState: %s, error: %w", diskURI, nodeName, lun, pointer.StringDeref(vmState, ""), errGetLun) + } + } + + klog.V(2).Infof("azureDisk - detach disk(%s, %s) succeeded", diskName, diskURI) + return nil +} + +// UpdateVM updates a vm +func (c *controllerCommon) UpdateVM(ctx context.Context, nodeName types.NodeName) error { + vmset, err := c.cloud.GetNodeVMSet(nodeName, azcache.CacheReadTypeUnsafe) + if err != nil { + return err + } + node := strings.ToLower(string(nodeName)) + c.lockMap.LockEntry(node) + defer c.lockMap.UnlockEntry(node) + + defer func() { + _ = vmset.DeleteCacheForNode(string(nodeName)) + }() + + klog.V(2).Infof("azureDisk - update: vm(%s)", nodeName) + return vmset.UpdateVM(ctx, nodeName) +} + +// insertDetachDiskRequest return (detachDiskRequestQueueLength, error) +func (c *controllerCommon) insertDetachDiskRequest(diskName, diskURI, nodeName string) (int, error) { + var diskMap map[string]string + detachDiskMapKey := nodeName + detachDiskMapKeySuffix + c.lockMap.LockEntry(detachDiskMapKey) + defer c.lockMap.UnlockEntry(detachDiskMapKey) + v, ok := c.detachDiskMap.Load(nodeName) + if ok { + if diskMap, ok = v.(map[string]string); !ok { + return -1, fmt.Errorf("convert detachDiskMap failure on node(%s)", nodeName) + } + } else { + diskMap = make(map[string]string) + c.detachDiskMap.Store(nodeName, diskMap) + } + // insert detach disk request to queue + _, ok = diskMap[diskURI] + if ok { + klog.V(2).Infof("azureDisk - duplicated detach disk(%s) request on node(%s)", diskURI, nodeName) + } else { + diskMap[diskURI] = diskName + } + return len(diskMap), nil +} + +// clean up detach disk requests +// return original detach disk requests +func (c *controllerCommon) cleanDetachDiskRequests(nodeName string) (map[string]string, error) { + var diskMap map[string]string + + detachDiskMapKey := nodeName + detachDiskMapKeySuffix + c.lockMap.LockEntry(detachDiskMapKey) + defer c.lockMap.UnlockEntry(detachDiskMapKey) + v, ok := c.detachDiskMap.Load(nodeName) + if !ok { + return diskMap, nil + } + if diskMap, ok = v.(map[string]string); !ok { + return diskMap, fmt.Errorf("convert detachDiskMap failure on node(%s)", nodeName) + } + // clean up original requests in disk map + c.detachDiskMap.Store(nodeName, make(map[string]string)) + return diskMap, nil +} + +// GetNodeDataDisks invokes vmSet interfaces to get data disks for the node. +func (c *controllerCommon) GetNodeDataDisks(nodeName types.NodeName, crt azcache.AzureCacheReadType) ([]*armcompute.DataDisk, *string, error) { + vmset, err := c.cloud.GetNodeVMSet(nodeName, crt) + if err != nil { + return nil, nil, err + } + + return vmset.GetDataDisks(nodeName, crt) +} + +// GetDiskLun finds the lun on the host that the vhd is attached to, given a vhd's diskName and diskURI. +func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, *string, error) { + // GetNodeDataDisks need to fetch the cached data/fresh data if cache expired here + // to ensure we get LUN based on latest entry. + disks, provisioningState, err := c.GetNodeDataDisks(nodeName, azcache.CacheReadTypeDefault) + if err != nil { + klog.Errorf("error of getting data disks for node %s: %v", nodeName, err) + return -1, provisioningState, err + } + + for _, disk := range disks { + if disk.Lun != nil && (disk.Name != nil && diskName != "" && strings.EqualFold(*disk.Name, diskName)) || + (disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && strings.EqualFold(*disk.Vhd.URI, diskURI)) || + (disk.ManagedDisk != nil && strings.EqualFold(*disk.ManagedDisk.ID, diskURI)) { + if disk.ToBeDetached != nil && *disk.ToBeDetached { + klog.Warningf("azureDisk - find disk(ToBeDetached): lun %d name %s uri %s", *disk.Lun, diskName, diskURI) + } else { + // found the disk + klog.V(2).Infof("azureDisk - find disk: lun %d name %s uri %s", *disk.Lun, diskName, diskURI) + return *disk.Lun, provisioningState, nil + } + } + } + return -1, provisioningState, fmt.Errorf("%s for disk %s", consts.CannotFindDiskLUN, diskName) +} + +// SetDiskLun find unused luns and allocate lun for every disk in diskMap. +// occupiedLuns is used to avoid conflict with other disk attach in k8s VolumeAttachments +// Return lun of diskURI, -1 if all luns are used. +func (c *controllerCommon) SetDiskLun(nodeName types.NodeName, diskURI string, diskMap map[string]*provider.AttachDiskOptions, occupiedLuns []int) (int32, error) { + disks, _, err := c.GetNodeDataDisks(nodeName, azcache.CacheReadTypeDefault) + if err != nil { + klog.Errorf("error of getting data disks for node %s: %v", nodeName, err) + return -1, err + } + + lun := int32(-1) + _, isDiskInMap := diskMap[diskURI] + used := make([]bool, maxLUN) + for _, disk := range disks { + if disk.Lun != nil { + used[*disk.Lun] = true + if !isDiskInMap { + // find lun of diskURI since diskURI is not in diskMap + if disk.ManagedDisk != nil && strings.EqualFold(*disk.ManagedDisk.ID, diskURI) { + lun = *disk.Lun + } + } + } + } + if !isDiskInMap && lun < 0 { + return -1, fmt.Errorf("could not find disk(%s) in current disk list(len: %d) nor in diskMap(%v)", diskURI, len(disks), diskMap) + } + if len(diskMap) == 0 { + // attach disk request is empty, return directly + return lun, nil + } + + for _, lun := range occupiedLuns { + if lun >= 0 && lun <= maxLUN { + used[int32(lun)] = true + } + } + + // allocate lun for every disk in diskMap + var diskLuns []int32 + count := 0 + for k, v := range used { + if !v { + diskLuns = append(diskLuns, int32(k)) + count++ + if count >= len(diskMap) { + break + } + } + } + + if len(diskLuns) != len(diskMap) { + return -1, fmt.Errorf("could not find enough disk luns(current: %d) for diskMap(%v, len=%d), diskURI(%s)", + len(diskLuns), diskMap, len(diskMap), diskURI) + } + + count = 0 + for uri, opt := range diskMap { + if opt == nil { + return -1, fmt.Errorf("unexpected nil pointer in diskMap(%v), diskURI(%s)", diskMap, diskURI) + } + if strings.EqualFold(uri, diskURI) { + lun = diskLuns[count] + } + opt.Lun = diskLuns[count] + count++ + } + if lun < 0 { + return lun, fmt.Errorf("could not find lun of diskURI(%s), diskMap(%v)", diskURI, diskMap) + } + return lun, nil +} + +// DisksAreAttached checks if a list of volumes are attached to the node with the specified NodeName. +func (c *controllerCommon) DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) { + attached := make(map[string]bool) + for _, diskName := range diskNames { + attached[diskName] = false + } + + // doing stalled read for GetNodeDataDisks to ensure we don't call ARM + // for every reconcile call. The cache is invalidated after Attach/Detach + // disk. So the new entry will be fetched and cached the first time reconcile + // loop runs after the Attach/Disk OP which will reflect the latest model. + disks, _, err := c.GetNodeDataDisks(nodeName, azcache.CacheReadTypeUnsafe) + if err != nil { + if errors.Is(err, cloudprovider.InstanceNotFound) { + // if host doesn't exist, no need to detach + klog.Warningf("azureDisk - Cannot find node %s, DisksAreAttached will assume disks %v are not attached to it.", + nodeName, diskNames) + return attached, nil + } + + return attached, err + } + + for _, disk := range disks { + for _, diskName := range diskNames { + if disk.Name != nil && diskName != "" && strings.EqualFold(*disk.Name, diskName) { + attached[diskName] = true + } + } + } + + return attached, nil +} + +func (c *controllerCommon) filterNonExistingDisks(ctx context.Context, unfilteredDisks []*armcompute.DataDisk) []*armcompute.DataDisk { + filteredDisks := []*armcompute.DataDisk{} + for _, disk := range unfilteredDisks { + filter := false + if disk.ManagedDisk != nil && disk.ManagedDisk.ID != nil { + diskURI := *disk.ManagedDisk.ID + exist, err := c.checkDiskExists(ctx, diskURI) + if err != nil { + klog.Errorf("checkDiskExists(%s) failed with error: %v", diskURI, err) + } else { + // only filter disk when checkDiskExists returns + filter = !exist + if filter { + klog.Errorf("disk(%s) does not exist, removed from data disk list", diskURI) + } + } + } + + if !filter { + filteredDisks = append(filteredDisks, disk) + } + } + return filteredDisks +} + +func (c *controllerCommon) checkDiskExists(ctx context.Context, diskURI string) (bool, error) { + diskName := path.Base(diskURI) + resourceGroup, subsID, err := getInfoFromDiskURI(diskURI) + if err != nil { + return false, err + } + + diskClient, err := c.clientFactory.GetDiskClientForSub(subsID) + if err != nil { + return false, err + } + if _, err := diskClient.Get(ctx, resourceGroup, diskName); err != nil { + var respErr = &azcore.ResponseError{} + if errors.As(err, &respErr) && respErr.StatusCode == http.StatusNotFound { + return false, nil + } + return false, err + } + return true, nil +} + +func IsOperationPreempted(err error) bool { + return strings.Contains(err.Error(), consts.OperationPreemptedErrorCode) +} + +func getValidCreationData(subscriptionID, resourceGroup string, options *ManagedDiskOptions) (armcompute.CreationData, error) { + if options.SourceResourceID == "" { + return armcompute.CreationData{ + CreateOption: to.Ptr(armcompute.DiskCreateOptionEmpty), + PerformancePlus: options.PerformancePlus, + }, nil + } + + sourceResourceID := options.SourceResourceID + switch options.SourceType { + case sourceSnapshot: + if match := diskSnapshotPathRE.FindString(sourceResourceID); match == "" { + sourceResourceID = fmt.Sprintf(diskSnapshotPath, subscriptionID, resourceGroup, sourceResourceID) + } + + case sourceVolume: + if match := managedDiskPathRE.FindString(sourceResourceID); match == "" { + sourceResourceID = fmt.Sprintf(managedDiskPath, subscriptionID, resourceGroup, sourceResourceID) + } + default: + return armcompute.CreationData{ + CreateOption: to.Ptr(armcompute.DiskCreateOptionEmpty), + PerformancePlus: options.PerformancePlus, + }, nil + } + + splits := strings.Split(sourceResourceID, "/") + if len(splits) > 9 { + if options.SourceType == sourceSnapshot { + return armcompute.CreationData{}, fmt.Errorf("sourceResourceID(%s) is invalid, correct format: %s", sourceResourceID, diskSnapshotPathRE) + } + return armcompute.CreationData{}, fmt.Errorf("sourceResourceID(%s) is invalid, correct format: %s", sourceResourceID, managedDiskPathRE) + } + return armcompute.CreationData{ + CreateOption: to.Ptr(armcompute.DiskCreateOptionCopy), + SourceResourceID: &sourceResourceID, + PerformancePlus: options.PerformancePlus, + }, nil +} + +func isInstanceNotFoundError(err error) bool { + errMsg := strings.ToLower(err.Error()) + if strings.Contains(errMsg, strings.ToLower(consts.VmssVMNotActiveErrorMessage)) { + return true + } + return strings.Contains(errMsg, errStatusCode400) && strings.Contains(errMsg, errInvalidParameter) && strings.Contains(errMsg, errTargetInstanceIds) +} diff --git a/pkg/azuredisk/azure_controller_common_test.go b/pkg/azuredisk/azure_controller_common_test.go new file mode 100644 index 000000000..9e75240c2 --- /dev/null +++ b/pkg/azuredisk/azure_controller_common_test.go @@ -0,0 +1,1036 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azuredisk + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "strings" + "testing" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" + "github.com/Azure/go-autorest/autorest/azure" + autorestmocks "github.com/Azure/go-autorest/autorest/mocks" + "github.com/stretchr/testify/assert" + "go.uber.org/mock/gomock" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + cloudprovider "k8s.io/cloud-provider" + "k8s.io/utils/pointer" + + "sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient/mock_diskclient" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient/mock_azclient" + "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/mockvmclient" + "sigs.k8s.io/cloud-provider-azure/pkg/consts" + "sigs.k8s.io/cloud-provider-azure/pkg/provider" + "sigs.k8s.io/cloud-provider-azure/pkg/retry" +) + +var ( + conflictingUserInputError = retry.NewError(false, errors.New(`Code="ConflictingUserInput" Message="Cannot attach the disk pvc-xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxx to VM /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxx/resourceGroups/test-rg/providers/Microsoft.Compute/virtualMachineScaleSets/aks-nodepool0-00000000-vmss/virtualMachines/aks-nodepool0-00000000-vmss_0 because it is already attached to VM /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxx/resourceGroups/test-rg/providers/Microsoft.Compute/virtualMachineScaleSets/aks-nodepool0-00000000-vmss/virtualMachines/aks-nodepool0-00000000-vmss_1. A disk can be attached to only one VM at a time."`)) +) + +func fakeUpdateAsync(statusCode int) func(context.Context, string, string, compute.VirtualMachineUpdate, string) (*azure.Future, *retry.Error) { + return func(ctx context.Context, resourceGroup, nodeName string, parameters compute.VirtualMachineUpdate, source string) (*azure.Future, *retry.Error) { + vm := &compute.VirtualMachine{ + Name: &nodeName, + Plan: parameters.Plan, + VirtualMachineProperties: parameters.VirtualMachineProperties, + Identity: parameters.Identity, + Zones: parameters.Zones, + Tags: parameters.Tags, + } + s, err := json.Marshal(vm) + if err != nil { + return nil, retry.NewError(false, err) + } + + body := autorestmocks.NewBodyWithBytes(s) + + r := autorestmocks.NewResponseWithBodyAndStatus(body, statusCode, strconv.Itoa(statusCode)) //nolint: bodyclose + r.Request.Method = http.MethodPut + + f, err := azure.NewFutureFromResponse(r) + if err != nil { + return nil, retry.NewError(false, err) + } + + return &f, nil + } +} + +func TestCommonAttachDisk(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + initVM := func(testCloud *provider.Cloud, expectedVMs []compute.VirtualMachine) { + mockVMsClient := testCloud.VirtualMachinesClient.(*mockvmclient.MockInterface) + for _, vm := range expectedVMs { + mockVMsClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, *vm.Name, gomock.Any()).Return(vm, nil).AnyTimes() + } + if len(expectedVMs) == 0 { + mockVMsClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, gomock.Any(), gomock.Any()).Return(compute.VirtualMachine{}, &retry.Error{HTTPStatusCode: http.StatusNotFound, RawError: cloudprovider.InstanceNotFound}).AnyTimes() + } + } + + defaultSetup := func(testCloud *provider.Cloud, expectedVMs []compute.VirtualMachine, statusCode int, result *retry.Error) { + initVM(testCloud, expectedVMs) + mockVMsClient := testCloud.VirtualMachinesClient.(*mockvmclient.MockInterface) + mockVMsClient.EXPECT().UpdateAsync(gomock.Any(), testCloud.ResourceGroup, gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(fakeUpdateAsync(statusCode)).MaxTimes(1) + mockVMsClient.EXPECT().Update(gomock.Any(), testCloud.ResourceGroup, gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil).MaxTimes(1) + mockVMsClient.EXPECT().WaitForUpdateResult(gomock.Any(), gomock.Any(), testCloud.ResourceGroup, gomock.Any()).Return(nil, result).MaxTimes(1) + } + + maxShare := int32(1) + goodInstanceID := fmt.Sprintf("/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/%s", "vm1") + diskEncryptionSetID := fmt.Sprintf("/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/diskEncryptionSets/%s", "diskEncryptionSet-name") + testTags := make(map[string]*string) + testTags[WriteAcceleratorEnabled] = pointer.String("true") + testCases := []struct { + desc string + diskName string + existedDisk *armcompute.Disk + nodeName types.NodeName + vmList map[string]string + isDataDisksFull bool + isBadDiskURI bool + isDiskUsed bool + setup func(testCloud *provider.Cloud, expectedVMs []compute.VirtualMachine, statusCode int, result *retry.Error) + expectErr bool + isContextDeadlineErr bool + statusCode int + waitResult *retry.Error + expectedLun int32 + contextDuration time.Duration + }{ + { + desc: "correct LUN and no error shall be returned if disk is nil", + vmList: map[string]string{"vm1": "PowerState/Running"}, + nodeName: "vm1", + diskName: "disk-name", + existedDisk: nil, + expectedLun: 3, + expectErr: false, + statusCode: 200, + }, + { + desc: "LUN -1 and error shall be returned if there's no such instance corresponding to given nodeName", + nodeName: "vm1", + diskName: "disk-name", + existedDisk: &armcompute.Disk{Name: pointer.String("disk-name")}, + expectedLun: -1, + expectErr: true, + }, + { + desc: "LUN -1 and error shall be returned if there's no available LUN for instance", + vmList: map[string]string{"vm1": "PowerState/Running"}, + nodeName: "vm1", + isDataDisksFull: true, + diskName: "disk-name", + existedDisk: &armcompute.Disk{Name: pointer.String("disk-name")}, + expectedLun: -1, + expectErr: true, + }, + { + desc: "correct LUN and no error shall be returned if everything is good", + vmList: map[string]string{"vm1": "PowerState/Running"}, + nodeName: "vm1", + diskName: "disk-name", + existedDisk: &armcompute.Disk{Name: pointer.String("disk-name"), + Properties: &armcompute.DiskProperties{ + Encryption: &armcompute.Encryption{DiskEncryptionSetID: &diskEncryptionSetID, Type: to.Ptr(armcompute.EncryptionTypeEncryptionAtRestWithCustomerKey)}, + DiskSizeGB: pointer.Int32(4096), + DiskState: to.Ptr(armcompute.DiskStateUnattached), + }, + Tags: testTags}, + expectedLun: 3, + expectErr: false, + statusCode: 200, + }, + { + desc: "an error shall be returned if disk state is not Unattached", + vmList: map[string]string{"vm1": "PowerState/Running"}, + nodeName: "vm1", + diskName: "disk-name", + existedDisk: &armcompute.Disk{Name: pointer.String("disk-name"), + Properties: &armcompute.DiskProperties{ + Encryption: &armcompute.Encryption{DiskEncryptionSetID: &diskEncryptionSetID, Type: to.Ptr(armcompute.EncryptionTypeEncryptionAtRestWithCustomerKey)}, + DiskSizeGB: pointer.Int32(4096), + DiskState: to.Ptr(armcompute.DiskStateAttached), + }, + Tags: testTags}, + expectedLun: -1, + expectErr: true, + }, + { + desc: "an error shall be returned if attach an already attached disk with good ManagedBy instance id", + vmList: map[string]string{"vm1": "PowerState/Running"}, + nodeName: "vm1", + diskName: "disk-name", + existedDisk: &armcompute.Disk{Name: pointer.String("disk-name"), ManagedBy: pointer.String(goodInstanceID), Properties: &armcompute.DiskProperties{MaxShares: &maxShare}}, + expectedLun: -1, + expectErr: true, + }, + { + desc: "should return a PartialUpdateError type when storage configuration was accepted but wait fails with an error", + vmList: map[string]string{"vm1": "PowerState/Running"}, + nodeName: "vm1", + diskName: "disk-name", + existedDisk: nil, + expectedLun: -1, + expectErr: true, + statusCode: 200, + waitResult: conflictingUserInputError, + }, + { + desc: "should not return a PartialUpdateError type when storage configuration was not accepted", + vmList: map[string]string{"vm1": "PowerState/Running"}, + nodeName: "vm1", + diskName: "disk-name", + existedDisk: nil, + expectedLun: -1, + expectErr: true, + statusCode: 400, + waitResult: conflictingUserInputError, + }, + } + + for i, test := range testCases { + tt := test + t.Run(tt.desc, func(t *testing.T) { + testCloud := provider.GetTestCloud(ctrl) + diskURI := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/%s", + testCloud.SubscriptionID, testCloud.ResourceGroup, test.diskName) + if tt.isBadDiskURI { + diskURI = fmt.Sprintf("/baduri/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/%s", + testCloud.SubscriptionID, testCloud.ResourceGroup, tt.diskName) + } + expectedVMs := setTestVirtualMachines(testCloud, tt.vmList, tt.isDataDisksFull) + if tt.isDiskUsed { + vm0 := setTestVirtualMachines(testCloud, map[string]string{"vm0": "PowerState/Running"}, tt.isDataDisksFull)[0] + expectedVMs = append(expectedVMs, vm0) + } + if tt.setup == nil { + defaultSetup(testCloud, expectedVMs, tt.statusCode, tt.waitResult) + } else { + tt.setup(testCloud, expectedVMs, tt.statusCode, tt.waitResult) + } + + if tt.contextDuration > 0 { + oldCtx := ctx + ctx, cancel = context.WithTimeout(oldCtx, tt.contextDuration) + defer cancel() + defer func() { + ctx = oldCtx + }() + } + testdiskController := &controllerCommon{ + cloud: testCloud, + lockMap: newLockMap(), + DisableDiskLunCheck: true, + } + lun, err := testdiskController.AttachDisk(ctx, test.diskName, diskURI, tt.nodeName, armcompute.CachingTypesReadOnly, tt.existedDisk, nil) + + assert.Equal(t, tt.expectedLun, lun, "TestCase[%d]: %s", i, tt.desc) + assert.Equal(t, tt.expectErr, err != nil, "TestCase[%d]: %s, return error: %v", i, tt.desc, err) + + assert.Equal(t, tt.isContextDeadlineErr, errors.Is(err, context.DeadlineExceeded)) + }) + } +} + +func TestCommonDetachDisk(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + testCases := []struct { + desc string + vmList map[string]string + nodeName types.NodeName + diskName string + expectedErr bool + }{ + { + desc: "error should not be returned if there's no such instance corresponding to given nodeName", + nodeName: "vm1", + expectedErr: false, + }, + { + desc: "no error shall be returned if there's no matching disk according to given diskName", + vmList: map[string]string{"vm1": "PowerState/Running"}, + nodeName: "vm1", + diskName: "diskx", + expectedErr: false, + }, + { + desc: "error shall be returned if the disk exists", + vmList: map[string]string{"vm1": "PowerState/Running"}, + nodeName: "vm1", + diskName: "disk1", + expectedErr: true, + }, + } + + for i, test := range testCases { + testCloud := provider.GetTestCloud(ctrl) + common := &controllerCommon{ + cloud: testCloud, + lockMap: newLockMap(), + } + diskURI := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/disk-name", + testCloud.SubscriptionID, testCloud.ResourceGroup) + expectedVMs := setTestVirtualMachines(testCloud, test.vmList, false) + mockVMsClient := testCloud.VirtualMachinesClient.(*mockvmclient.MockInterface) + for _, vm := range expectedVMs { + mockVMsClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, *vm.Name, gomock.Any()).Return(vm, nil).AnyTimes() + } + if len(expectedVMs) == 0 { + mockVMsClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, gomock.Any(), gomock.Any()).Return(compute.VirtualMachine{}, &retry.Error{HTTPStatusCode: http.StatusNotFound, RawError: cloudprovider.InstanceNotFound}).AnyTimes() + } + mockVMsClient.EXPECT().Update(gomock.Any(), testCloud.ResourceGroup, gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil).AnyTimes() + + err := common.DetachDisk(ctx, test.diskName, diskURI, test.nodeName) + assert.Equal(t, test.expectedErr, err != nil, "TestCase[%d]: %s, err: %v", i, test.desc, err) + } +} + +func TestCommonUpdateVM(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + testCases := []struct { + desc string + vmList map[string]string + nodeName types.NodeName + diskName string + isErrorRetriable bool + expectedErr bool + }{ + { + desc: "error should not be returned if there's no such instance corresponding to given nodeName", + nodeName: "vm1", + expectedErr: false, + }, + { + desc: "an error should be returned if vmset detach failed with isErrorRetriable error", + vmList: map[string]string{"vm1": "PowerState/Running"}, + nodeName: "vm1", + isErrorRetriable: true, + expectedErr: true, + }, + { + desc: "no error shall be returned if there's no matching disk according to given diskName", + vmList: map[string]string{"vm1": "PowerState/Running"}, + nodeName: "vm1", + diskName: "diskx", + expectedErr: false, + }, + { + desc: "no error shall be returned if the disk exists", + vmList: map[string]string{"vm1": "PowerState/Running"}, + nodeName: "vm1", + diskName: "disk1", + expectedErr: false, + }, + } + + for i, test := range testCases { + testCloud := provider.GetTestCloud(ctrl) + common := &controllerCommon{ + cloud: testCloud, + lockMap: newLockMap(), + } + expectedVMs := setTestVirtualMachines(testCloud, test.vmList, false) + mockVMsClient := testCloud.VirtualMachinesClient.(*mockvmclient.MockInterface) + for _, vm := range expectedVMs { + mockVMsClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, *vm.Name, gomock.Any()).Return(vm, nil).AnyTimes() + } + if len(expectedVMs) == 0 { + mockVMsClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, gomock.Any(), gomock.Any()).Return(compute.VirtualMachine{}, &retry.Error{HTTPStatusCode: http.StatusNotFound, RawError: cloudprovider.InstanceNotFound}).AnyTimes() + } + r := autorestmocks.NewResponseWithStatus("200", 200) + r.Body.Close() + r.Request.Method = http.MethodPut + + future, err := azure.NewFutureFromResponse(r) + + mockVMsClient.EXPECT().UpdateAsync(gomock.Any(), testCloud.ResourceGroup, gomock.Any(), gomock.Any(), gomock.Any()).Return(&future, err).AnyTimes() + + if test.isErrorRetriable { + testCloud.CloudProviderBackoff = true + testCloud.ResourceRequestBackoff = wait.Backoff{Steps: 1} + mockVMsClient.EXPECT().WaitForUpdateResult(ctx, &future, testCloud.ResourceGroup, gomock.Any()).Return(nil, &retry.Error{HTTPStatusCode: http.StatusBadRequest, Retriable: true, RawError: fmt.Errorf("Retriable: true")}).AnyTimes() + } else { + mockVMsClient.EXPECT().WaitForUpdateResult(ctx, &future, testCloud.ResourceGroup, gomock.Any()).Return(nil, nil).AnyTimes() + } + + err = common.UpdateVM(ctx, test.nodeName) + assert.Equal(t, test.expectedErr, err != nil, "TestCase[%d]: %s, err: %v", i, test.desc, err) + } +} + +func TestGetDiskLun(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + testCases := []struct { + desc string + diskName string + diskURI string + expectedLun int32 + expectedErr bool + }{ + { + desc: "LUN -1 and error shall be returned if diskName != disk.Name or diskURI != disk.Vhd.URI", + diskName: "diskx", + expectedLun: -1, + expectedErr: true, + }, + { + desc: "correct LUN and no error shall be returned if diskName = disk.Name", + diskName: "disk1", + expectedLun: 0, + expectedErr: false, + }, + } + + for i, test := range testCases { + testCloud := provider.GetTestCloud(ctrl) + common := &controllerCommon{ + cloud: testCloud, + lockMap: newLockMap(), + } + expectedVMs := setTestVirtualMachines(testCloud, map[string]string{"vm1": "PowerState/Running"}, false) + mockVMsClient := testCloud.VirtualMachinesClient.(*mockvmclient.MockInterface) + for _, vm := range expectedVMs { + mockVMsClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, *vm.Name, gomock.Any()).Return(vm, nil).AnyTimes() + } + + lun, _, err := common.GetDiskLun(test.diskName, test.diskURI, "vm1") + assert.Equal(t, test.expectedLun, lun, "TestCase[%d]: %s", i, test.desc) + assert.Equal(t, test.expectedErr, err != nil, "TestCase[%d]: %s", i, test.desc) + } +} + +func TestSetDiskLun(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + testCases := []struct { + desc string + nodeName string + diskURI string + diskMap map[string]*provider.AttachDiskOptions + occupiedLuns []int + isDataDisksFull bool + expectedErr bool + expectedLun int32 + }{ + { + desc: "the minimal LUN shall be returned if there's enough room for extra disks", + nodeName: "nodeName", + diskURI: "diskURI", + occupiedLuns: []int{0, 1, 2}, + diskMap: map[string]*provider.AttachDiskOptions{"diskURI": {}}, + expectedLun: 3, + expectedErr: false, + }, + { + desc: "occupied LUNs shall be skipped", + nodeName: "nodeName", + diskURI: "diskURI", + occupiedLuns: []int{0, 1, 2, 3}, + diskMap: map[string]*provider.AttachDiskOptions{"diskURI": {}}, + expectedLun: 4, + expectedErr: false, + }, + { + desc: "LUN -1 and error shall be returned if there's no available LUN", + nodeName: "nodeName", + diskURI: "diskURI", + diskMap: map[string]*provider.AttachDiskOptions{"diskURI": {}}, + isDataDisksFull: true, + expectedLun: -1, + expectedErr: true, + }, + { + desc: "diskURI1 is not in VM data disk list nor in diskMap", + nodeName: "nodeName", + diskURI: "diskURI1", + diskMap: map[string]*provider.AttachDiskOptions{"diskURI2": {}}, + expectedLun: -1, + expectedErr: true, + }, + } + + for i, test := range testCases { + testCloud := provider.GetTestCloud(ctrl) + common := &controllerCommon{ + cloud: testCloud, + lockMap: newLockMap(), + } + expectedVMs := setTestVirtualMachines(testCloud, map[string]string{test.nodeName: "PowerState/Running"}, test.isDataDisksFull) + mockVMsClient := testCloud.VirtualMachinesClient.(*mockvmclient.MockInterface) + for _, vm := range expectedVMs { + mockVMsClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, *vm.Name, gomock.Any()).Return(vm, nil).AnyTimes() + } + + lun, err := common.SetDiskLun(types.NodeName(test.nodeName), test.diskURI, test.diskMap, test.occupiedLuns) + assert.Equal(t, test.expectedLun, lun, "TestCase[%d]: %s", i, test.desc) + assert.Equal(t, test.expectedErr, err != nil, "TestCase[%d]: %s", i, test.desc) + } +} + +func TestDisksAreAttached(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + testCases := []struct { + desc string + diskNames []string + nodeName types.NodeName + expectedAttached map[string]bool + expectedErr bool + }{ + { + desc: "an error shall be returned if there's no such instance corresponding to given nodeName", + diskNames: []string{"disk1"}, + nodeName: "vm2", + expectedAttached: map[string]bool{"disk1": false}, + expectedErr: false, + }, + { + desc: "proper attach map shall be returned if everything is good", + diskNames: []string{"disk1", "diskx"}, + nodeName: "vm1", + expectedAttached: map[string]bool{"disk1": true, "diskx": false}, + expectedErr: false, + }, + } + + for i, test := range testCases { + testCloud := provider.GetTestCloud(ctrl) + common := &controllerCommon{ + cloud: testCloud, + lockMap: newLockMap(), + } + expectedVMs := setTestVirtualMachines(testCloud, map[string]string{"vm1": "PowerState/Running"}, false) + mockVMsClient := testCloud.VirtualMachinesClient.(*mockvmclient.MockInterface) + for _, vm := range expectedVMs { + mockVMsClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, *vm.Name, gomock.Any()).Return(vm, nil).AnyTimes() + } + mockVMsClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, "vm2", gomock.Any()).Return(compute.VirtualMachine{}, &retry.Error{HTTPStatusCode: http.StatusNotFound, RawError: cloudprovider.InstanceNotFound}).AnyTimes() + + attached, err := common.DisksAreAttached(test.diskNames, test.nodeName) + assert.Equal(t, test.expectedAttached, attached, "TestCase[%d]: %s", i, test.desc) + assert.Equal(t, test.expectedErr, err != nil, "TestCase[%d]: %s", i, test.desc) + } +} + +func TestGetValidCreationData(t *testing.T) { + sourceResourceSnapshotID := "/subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Compute/snapshots/xxx" + sourceResourceVolumeID := "/subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Compute/disks/xxx" + + tests := []struct { + subscriptionID string + resourceGroup string + sourceResourceID string + sourceType string + expected1 armcompute.CreationData + expected2 error + }{ + { + subscriptionID: "", + resourceGroup: "", + sourceResourceID: "", + sourceType: "", + expected1: armcompute.CreationData{ + CreateOption: to.Ptr(armcompute.DiskCreateOptionEmpty), + }, + expected2: nil, + }, + { + subscriptionID: "", + resourceGroup: "", + sourceResourceID: "/subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Compute/snapshots/xxx", + sourceType: sourceSnapshot, + expected1: armcompute.CreationData{ + CreateOption: to.Ptr(armcompute.DiskCreateOptionCopy), + SourceResourceID: &sourceResourceSnapshotID, + }, + expected2: nil, + }, + { + subscriptionID: "xxx", + resourceGroup: "xxx", + sourceResourceID: "xxx", + sourceType: sourceSnapshot, + expected1: armcompute.CreationData{ + CreateOption: to.Ptr(armcompute.DiskCreateOptionCopy), + SourceResourceID: &sourceResourceSnapshotID, + }, + expected2: nil, + }, + { + subscriptionID: "", + resourceGroup: "", + sourceResourceID: "/subscriptions/23/providers/Microsoft.Compute/disks/name", + sourceType: sourceSnapshot, + expected1: armcompute.CreationData{}, + expected2: fmt.Errorf("sourceResourceID(%s) is invalid, correct format: %s", "/subscriptions//resourceGroups//providers/Microsoft.Compute/snapshots//subscriptions/23/providers/Microsoft.Compute/disks/name", diskSnapshotPathRE), + }, + { + subscriptionID: "", + resourceGroup: "", + sourceResourceID: "http://test.com/vhds/name", + sourceType: sourceSnapshot, + expected1: armcompute.CreationData{}, + expected2: fmt.Errorf("sourceResourceID(%s) is invalid, correct format: %s", "/subscriptions//resourceGroups//providers/Microsoft.Compute/snapshots/http://test.com/vhds/name", diskSnapshotPathRE), + }, + { + subscriptionID: "", + resourceGroup: "", + sourceResourceID: "/subscriptions/xxx/snapshots/xxx", + sourceType: sourceSnapshot, + expected1: armcompute.CreationData{}, + expected2: fmt.Errorf("sourceResourceID(%s) is invalid, correct format: %s", "/subscriptions//resourceGroups//providers/Microsoft.Compute/snapshots//subscriptions/xxx/snapshots/xxx", diskSnapshotPathRE), + }, + { + subscriptionID: "", + resourceGroup: "", + sourceResourceID: "/subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Compute/snapshots/xxx/snapshots/xxx/snapshots/xxx", + sourceType: sourceSnapshot, + expected1: armcompute.CreationData{}, + expected2: fmt.Errorf("sourceResourceID(%s) is invalid, correct format: %s", "/subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Compute/snapshots/xxx/snapshots/xxx/snapshots/xxx", diskSnapshotPathRE), + }, + { + subscriptionID: "", + resourceGroup: "", + sourceResourceID: "xxx", + sourceType: "", + expected1: armcompute.CreationData{ + CreateOption: to.Ptr(armcompute.DiskCreateOptionEmpty), + }, + expected2: nil, + }, + { + subscriptionID: "", + resourceGroup: "", + sourceResourceID: "/subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Compute/disks/xxx", + sourceType: sourceVolume, + expected1: armcompute.CreationData{ + CreateOption: to.Ptr(armcompute.DiskCreateOptionCopy), + SourceResourceID: &sourceResourceVolumeID, + }, + expected2: nil, + }, + { + subscriptionID: "xxx", + resourceGroup: "xxx", + sourceResourceID: "xxx", + sourceType: sourceVolume, + expected1: armcompute.CreationData{ + CreateOption: to.Ptr(armcompute.DiskCreateOptionCopy), + SourceResourceID: &sourceResourceVolumeID, + }, + expected2: nil, + }, + { + subscriptionID: "", + resourceGroup: "", + sourceResourceID: "/subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Compute/snapshots/xxx", + sourceType: sourceVolume, + expected1: armcompute.CreationData{}, + expected2: fmt.Errorf("sourceResourceID(%s) is invalid, correct format: %s", "/subscriptions//resourceGroups//providers/Microsoft.Compute/disks//subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Compute/snapshots/xxx", managedDiskPathRE), + }, + } + + for _, test := range tests { + options := ManagedDiskOptions{ + SourceResourceID: test.sourceResourceID, + SourceType: test.sourceType, + } + result, err := getValidCreationData(test.subscriptionID, test.resourceGroup, &options) + if !reflect.DeepEqual(result, test.expected1) || !reflect.DeepEqual(err, test.expected2) { + t.Errorf("input sourceResourceID: %v, sourceType: %v, getValidCreationData result: %v, expected1 : %v, err: %v, expected2: %v", test.sourceResourceID, test.sourceType, result, test.expected1, err, test.expected2) + } + } +} + +func TestCheckDiskExists(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + testCloud := provider.GetTestCloud(ctrl) + mockFactory := mock_azclient.NewMockClientFactory(ctrl) + common := &controllerCommon{ + cloud: testCloud, + clientFactory: mockFactory, + lockMap: newLockMap(), + } + // create a new disk before running test + newDiskName := "newdisk" + newDiskURI := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/%s", + testCloud.SubscriptionID, testCloud.ResourceGroup, newDiskName) + + mockDisksClient := mock_diskclient.NewMockInterface(ctrl) + mockFactory.EXPECT().GetDiskClientForSub(gomock.Any()).Return(mockDisksClient, nil).AnyTimes() + mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, newDiskName).Return(&armcompute.Disk{}, nil).AnyTimes() + mockDisksClient.EXPECT().Get(gomock.Any(), gomock.Not(testCloud.ResourceGroup), gomock.Any()).Return(&armcompute.Disk{}, &azcore.ResponseError{ + StatusCode: http.StatusNotFound, + RawResponse: &http.Response{ + StatusCode: http.StatusNotFound, + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + }, + }).AnyTimes() + + testCases := []struct { + diskURI string + expectedResult bool + expectedErr bool + }{ + { + diskURI: "incorrect disk URI format", + expectedResult: false, + expectedErr: true, + }, + { + diskURI: "/subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Compute/disks/non-existing-disk", + expectedResult: false, + expectedErr: false, + }, + { + diskURI: newDiskURI, + expectedResult: true, + expectedErr: false, + }, + } + + for i, test := range testCases { + exist, err := common.checkDiskExists(ctx, test.diskURI) + assert.Equal(t, test.expectedResult, exist, "TestCase[%d]", i, exist) + assert.Equal(t, test.expectedErr, err != nil, "TestCase[%d], return error: %v", i, err) + } +} + +func TestFilterNonExistingDisksWithSpecialHTTPStatusCode(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + testCloud := provider.GetTestCloud(ctrl) + mockFactory := mock_azclient.NewMockClientFactory(ctrl) + common := &controllerCommon{ + cloud: testCloud, + clientFactory: mockFactory, + lockMap: newLockMap(), + } + // create a new disk before running test + diskURIPrefix := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/", + testCloud.SubscriptionID, testCloud.ResourceGroup) + newDiskName := "specialdisk" + newDiskURI := diskURIPrefix + newDiskName + + mockDisksClient := mock_diskclient.NewMockInterface(ctrl) + mockFactory.EXPECT().GetDiskClientForSub(gomock.Any()).Return(mockDisksClient, nil).AnyTimes() + mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, gomock.Eq(newDiskName)).Return(&armcompute.Disk{}, &azcore.ResponseError{ + StatusCode: http.StatusBadRequest, + RawResponse: &http.Response{ + StatusCode: http.StatusBadRequest, + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + }, + }).AnyTimes() + + disks := []*armcompute.DataDisk{ + { + Name: &newDiskName, + ManagedDisk: &armcompute.ManagedDiskParameters{ + ID: &newDiskURI, + }, + }, + } + + filteredDisks := common.filterNonExistingDisks(ctx, disks) + assert.Equal(t, 1, len(filteredDisks)) + assert.Equal(t, newDiskName, *filteredDisks[0].Name) +} + +func TestIsInstanceNotFoundError(t *testing.T) { + testCases := []struct { + errMsg string + expectedResult bool + }{ + { + errMsg: "", + expectedResult: false, + }, + { + errMsg: "other error", + expectedResult: false, + }, + { + errMsg: "The provided instanceId 857 is not an active Virtual Machine Scale Set VM instanceId.", + expectedResult: true, + }, + { + errMsg: `compute.VirtualMachineScaleSetVMsClient#Update: Failure sending request: StatusCode=400 -- Original Error: Code="InvalidParameter" Message="The provided instanceId 1181 is not an active Virtual Machine Scale Set VM instanceId." Target="instanceIds"`, + expectedResult: true, + }, + } + + for i, test := range testCases { + result := isInstanceNotFoundError(fmt.Errorf(test.errMsg)) + assert.Equal(t, test.expectedResult, result, "TestCase[%d]", i, result) + } +} + +func TestAttachDiskRequestFuncs(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + testCases := []struct { + desc string + diskURI string + nodeName string + diskName string + diskNum int + duplicateDiskRequest bool + expectedErr bool + }{ + { + desc: "one disk request in queue", + diskURI: "diskURI", + nodeName: "nodeName", + diskName: "diskName", + diskNum: 1, + expectedErr: false, + }, + { + desc: "multiple disk requests in queue", + diskURI: "diskURI", + nodeName: "nodeName", + diskName: "diskName", + diskNum: 10, + expectedErr: false, + }, + { + desc: "zero disk request in queue", + diskURI: "diskURI", + nodeName: "nodeName", + diskName: "diskName", + diskNum: 0, + expectedErr: false, + }, + { + desc: "multiple disk requests in queue", + diskURI: "diskURI", + nodeName: "nodeName", + diskName: "diskName", + duplicateDiskRequest: true, + diskNum: 10, + expectedErr: false, + }, + } + + for i, test := range testCases { + testCloud := provider.GetTestCloud(ctrl) + common := &controllerCommon{ + cloud: testCloud, + lockMap: newLockMap(), + } + for i := 1; i <= test.diskNum; i++ { + diskURI := fmt.Sprintf("%s%d", test.diskURI, i) + diskName := fmt.Sprintf("%s%d", test.diskName, i) + ops := &provider.AttachDiskOptions{DiskName: diskName} + _, err := common.insertAttachDiskRequest(diskURI, test.nodeName, ops) + assert.Equal(t, test.expectedErr, err != nil, "TestCase[%d]: %s", i, test.desc) + if test.duplicateDiskRequest { + _, err := common.insertAttachDiskRequest(diskURI, test.nodeName, ops) + assert.Equal(t, test.expectedErr, err != nil, "TestCase[%d]: %s", i, test.desc) + } + } + + diskMap, err := common.cleanAttachDiskRequests(test.nodeName) + assert.Equal(t, test.expectedErr, err != nil, "TestCase[%d]: %s", i, test.desc) + assert.Equal(t, test.diskNum, len(diskMap), "TestCase[%d]: %s", i, test.desc) + for diskURI, opt := range diskMap { + assert.Equal(t, strings.Contains(diskURI, test.diskURI), true, "TestCase[%d]: %s", i, test.desc) + assert.Equal(t, strings.Contains(opt.DiskName, test.diskName), true, "TestCase[%d]: %s", i, test.desc) + } + } +} + +func TestDetachDiskRequestFuncs(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + testCases := []struct { + desc string + diskURI string + nodeName string + diskName string + diskNum int + duplicateDiskRequest bool + expectedErr bool + }{ + { + desc: "one disk request in queue", + diskURI: "diskURI", + nodeName: "nodeName", + diskName: "diskName", + diskNum: 1, + expectedErr: false, + }, + { + desc: "multiple disk requests in queue", + diskURI: "diskURI", + nodeName: "nodeName", + diskName: "diskName", + diskNum: 10, + expectedErr: false, + }, + { + desc: "zero disk request in queue", + diskURI: "diskURI", + nodeName: "nodeName", + diskName: "diskName", + diskNum: 0, + expectedErr: false, + }, + { + desc: "multiple disk requests in queue", + diskURI: "diskURI", + nodeName: "nodeName", + diskName: "diskName", + duplicateDiskRequest: true, + diskNum: 10, + expectedErr: false, + }, + } + + for i, test := range testCases { + testCloud := provider.GetTestCloud(ctrl) + common := &controllerCommon{ + cloud: testCloud, + lockMap: newLockMap(), + } + for i := 1; i <= test.diskNum; i++ { + diskURI := fmt.Sprintf("%s%d", test.diskURI, i) + diskName := fmt.Sprintf("%s%d", test.diskName, i) + _, err := common.insertDetachDiskRequest(diskName, diskURI, test.nodeName) + assert.Equal(t, test.expectedErr, err != nil, "TestCase[%d]: %s", i, test.desc) + if test.duplicateDiskRequest { + _, err := common.insertDetachDiskRequest(diskName, diskURI, test.nodeName) + assert.Equal(t, test.expectedErr, err != nil, "TestCase[%d]: %s", i, test.desc) + } + } + + diskMap, err := common.cleanDetachDiskRequests(test.nodeName) + assert.Equal(t, test.expectedErr, err != nil, "TestCase[%d]: %s", i, test.desc) + assert.Equal(t, test.diskNum, len(diskMap), "TestCase[%d]: %s", i, test.desc) + for diskURI, diskName := range diskMap { + assert.Equal(t, strings.Contains(diskURI, test.diskURI), true, "TestCase[%d]: %s", i, test.desc) + assert.Equal(t, strings.Contains(diskName, test.diskName), true, "TestCase[%d]: %s", i, test.desc) + } + } +} + +// setTestVirtualMachines sets test virtual machine with powerstate. +func setTestVirtualMachines(c *provider.Cloud, vmList map[string]string, isDataDisksFull bool) []compute.VirtualMachine { + expectedVMs := make([]compute.VirtualMachine, 0) + + for nodeName, powerState := range vmList { + nodeName := nodeName + instanceID := fmt.Sprintf("/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/%s", nodeName) + vm := compute.VirtualMachine{ + Name: &nodeName, + ID: &instanceID, + Location: &c.Location, + } + status := []compute.InstanceViewStatus{ + { + Code: pointer.String(powerState), + }, + { + Code: pointer.String("ProvisioningState/succeeded"), + }, + } + vm.VirtualMachineProperties = &compute.VirtualMachineProperties{ + ProvisioningState: pointer.String(string(consts.ProvisioningStateSucceeded)), + HardwareProfile: &compute.HardwareProfile{ + VMSize: compute.StandardA0, + }, + InstanceView: &compute.VirtualMachineInstanceView{ + Statuses: &status, + }, + StorageProfile: &compute.StorageProfile{ + DataDisks: &[]compute.DataDisk{}, + }, + } + if !isDataDisksFull { + vm.StorageProfile.DataDisks = &[]compute.DataDisk{ + { + Lun: pointer.Int32(0), + Name: pointer.String("disk1"), + }, + { + Lun: pointer.Int32(1), + Name: pointer.String("disk2"), + }, + { + Lun: pointer.Int32(2), + Name: pointer.String("disk3"), + }, + } + } else { + dataDisks := make([]compute.DataDisk, maxLUN) + for i := 0; i < maxLUN; i++ { + dataDisks[i] = compute.DataDisk{Lun: pointer.Int32(int32(i))} + } + vm.StorageProfile.DataDisks = &dataDisks + } + + expectedVMs = append(expectedVMs, vm) + } + + return expectedVMs +} diff --git a/pkg/azuredisk/azure_dd_max_disk_count.go b/pkg/azuredisk/azure_dd_max_disk_count.go index 10a074f46..c6b47b365 100644 --- a/pkg/azuredisk/azure_dd_max_disk_count.go +++ b/pkg/azuredisk/azure_dd_max_disk_count.go @@ -304,6 +304,8 @@ var maxDataDiskCountMap = map[string]int64{ "STANDARD_DC16AS_V4": 32, "STANDARD_DC16AS_V5": 32, "STANDARD_DC16DS_V3": 32, + "STANDARD_DC16EDS_V5": 32, + "STANDARD_DC16ES_V5": 32, "STANDARD_DC16S_V3": 32, "STANDARD_DC1AS_V4": 4, "STANDARD_DC1DS_V3": 4, @@ -316,6 +318,8 @@ var maxDataDiskCountMap = map[string]int64{ "STANDARD_DC24DS_V3": 32, "STANDARD_DC24S_V3": 32, "STANDARD_DC2DS_V3": 8, + "STANDARD_DC2EDS_V5": 4, + "STANDARD_DC2ES_V5": 4, "STANDARD_DC2ADS_V5": 4, "STANDARD_DC2AS_V4": 4, "STANDARD_DC2AS_V5": 4, @@ -332,6 +336,8 @@ var maxDataDiskCountMap = map[string]int64{ "STANDARD_DC32AS_V4": 32, "STANDARD_DC32AS_V5": 32, "STANDARD_DC32DS_V3": 32, + "STANDARD_DC48EDS_V5": 32, + "STANDARD_DC48ES_V5": 32, "STANDARD_DC32S_V3": 32, "STANDARD_DC48ADS_CC_V5": 32, "STANDARD_DC48ADS_V5": 32, @@ -345,6 +351,8 @@ var maxDataDiskCountMap = map[string]int64{ "STANDARD_DC4AS_CC_V5": 8, "STANDARD_DC4AS_V5": 8, "STANDARD_DC4DS_V3": 16, + "STANDARD_DC4EDS_V5": 8, + "STANDARD_DC4ES_V5": 8, "STANDARD_DC4S": 4, "STANDARD_DC4S_V2": 4, "STANDARD_DC8DMS_V3": 32, @@ -355,12 +363,16 @@ var maxDataDiskCountMap = map[string]int64{ "STANDARD_DC64ADS_V5": 32, "STANDARD_DC64AS_CC_V5": 32, "STANDARD_DC64AS_V5": 32, + "STANDARD_DC64EDS_V5": 32, + "STANDARD_DC64ES_V5": 32, "STANDARD_DC8ADS_CC_V5": 16, "STANDARD_DC8ADS_V5": 16, "STANDARD_DC8AS_CC_V5": 16, "STANDARD_DC8AS_V4": 32, "STANDARD_DC8AS_V5": 16, "STANDARD_DC8DS_V3": 32, + "STANDARD_DC8EDS_V5": 16, + "STANDARD_DC8ES_V5": 16, "STANDARD_DC8S_V3": 32, "STANDARD_DC8_V2": 8, "STANDARD_DC96ADS_CC_V5": 32, @@ -368,6 +380,8 @@ var maxDataDiskCountMap = map[string]int64{ "STANDARD_DC96AS_CC_V5": 32, "STANDARD_DC96AS_V4": 32, "STANDARD_DC96AS_V5": 32, + "STANDARD_DC96EDS_V5": 32, + "STANDARD_DC96ES_V5": 32, "STANDARD_DS11-1_V2": 8, "STANDARD_DS11": 8, "STANDARD_DS11_V2": 8, @@ -658,36 +672,54 @@ var maxDataDiskCountMap = map[string]int64{ "STANDARD_E96PS_V5": 32, "STANDARD_E96S_V5": 32, "STANDARD_E96_V5": 32, + "STANDARD_EC128EDS_V5": 32, + "STANDARD_EC128ES_V5": 32, + "STANDARD_EC128IEDS_V5": 32, + "STANDARD_EC128IES_V5": 32, "STANDARD_EC16ADS_CC_V5": 32, "STANDARD_EC16ADS_V5": 32, "STANDARD_EC16AS_CC_V5": 32, "STANDARD_EC16AS_V5": 32, + "STANDARD_EC16EDS_V5": 32, + "STANDARD_EC16ES_V5": 32, "STANDARD_EC20ADS_CC_V5": 32, "STANDARD_EC20ADS_V5": 32, "STANDARD_EC20AS_CC_V5": 32, "STANDARD_EC20AS_V5": 32, "STANDARD_EC2ADS_V5": 4, "STANDARD_EC2AS_V5": 4, + "STANDARD_EC2EDS_V5": 4, + "STANDARD_EC2ES_V5": 4, "STANDARD_EC32ADS_CC_V5": 32, "STANDARD_EC32ADS_V5": 32, "STANDARD_EC32AS_CC_V5": 32, "STANDARD_EC32AS_V5": 32, + "STANDARD_EC32EDS_V5": 32, + "STANDARD_EC32ES_V5": 32, "STANDARD_EC48ADS_CC_V5": 32, "STANDARD_EC48ADS_V5": 32, "STANDARD_EC48AS_CC_V5": 32, "STANDARD_EC48AS_V5": 32, + "STANDARD_EC48EDS_V5": 32, + "STANDARD_EC48ES_V5": 32, "STANDARD_EC4ADS_CC_V5": 8, "STANDARD_EC4ADS_V5": 8, "STANDARD_EC4AS_CC_V5": 8, "STANDARD_EC4AS_V5": 8, + "STANDARD_EC4EDS_V5": 8, + "STANDARD_EC4ES_V5": 8, "STANDARD_EC64ADS_CC_V5": 32, "STANDARD_EC64ADS_V5": 32, "STANDARD_EC64AS_CC_V5": 32, "STANDARD_EC64AS_V5": 32, + "STANDARD_EC64EDS_V5": 32, + "STANDARD_EC64ES_V5": 32, "STANDARD_EC8ADS_CC_V5": 16, "STANDARD_EC8ADS_V5": 16, "STANDARD_EC8AS_CC_V5": 16, "STANDARD_EC8AS_V5": 16, + "STANDARD_EC8EDS_V5": 16, + "STANDARD_EC8ES_V5": 16, "STANDARD_EC96ADS_CC_V5": 32, "STANDARD_EC96ADS_V5": 32, "STANDARD_EC96AS_CC_V5": 32, @@ -802,15 +834,23 @@ var maxDataDiskCountMap = map[string]int64{ "STANDARD_M128MS_V2": 64, "STANDARD_M128S": 64, "STANDARD_M128S_V2": 64, + "STANDARD_M12DS_V3": 64, + "STANDARD_M12S_V3": 64, "STANDARD_M16-4MS": 16, "STANDARD_M16-8MS": 16, "STANDARD_M16MS": 16, + "STANDARD_M176DS_3_V3": 64, + "STANDARD_M176DS_4_V3": 64, + "STANDARD_M176S_3_V3": 64, + "STANDARD_M176S_4_V3": 64, "STANDARD_M192IDMS_V2": 64, "STANDARD_M192IDS_V2": 64, "STANDARD_M192IMS_V2": 64, "STANDARD_M192IS_V2": 64, "STANDARD_M208MS_V2": 64, "STANDARD_M208S_V2": 64, + "STANDARD_M24DS_V3": 64, + "STANDARD_M24S_V3": 64, "STANDARD_M32-16MS": 32, "STANDARD_M32-8MS": 32, "STANDARD_M32DMS_V2": 32, @@ -823,6 +863,8 @@ var maxDataDiskCountMap = map[string]int64{ "STANDARD_M416MS_V2": 64, "STANDARD_M416S_8_V2": 64, "STANDARD_M416S_V2": 64, + "STANDARD_M48DS_1_V3": 64, + "STANDARD_M48S_1_V3": 64, "STANDARD_M64-16MS": 64, "STANDARD_M64-32MS": 64, "STANDARD_M64": 64, @@ -839,6 +881,10 @@ var maxDataDiskCountMap = map[string]int64{ "STANDARD_M8-4MS": 8, "STANDARD_M864IXS": 64, "STANDARD_M8MS": 8, + "STANDARD_M96DS_1_V3": 64, + "STANDARD_M96DS_2_V3": 64, + "STANDARD_M96S_1_V3": 64, + "STANDARD_M96S_2_V3": 64, "STANDARD_NC12": 48, "STANDARD_NC12_PROMO": 48, "STANDARD_NC12S_V2": 24, @@ -864,6 +910,8 @@ var maxDataDiskCountMap = map[string]int64{ "STANDARD_NC8ADS_A10_V4": 8, "STANDARD_NC8AS_T4_V3": 16, "STANDARD_NC96ADS_A100_V4": 32, + "STANDARD_ND100IS_H100_V5": 16, + "STANDARD_ND100ISR_H100_V5": 16, "STANDARD_ND112AMSR_A100_V4": 16, "STANDARD_ND120AMSR_A100_V4": 16, "STANDARD_NCC24ADS_A100_V4": 8, @@ -872,13 +920,21 @@ var maxDataDiskCountMap = map[string]int64{ "STANDARD_ND24S": 32, "STANDARD_ND40RS_V2": 8, "STANDARD_ND40S_V3": 32, + "STANDARD_ND46S_H100_V5": 8, "STANDARD_ND48S_H100_V5": 8, + "STANDARD_ND50S_H100_V5": 8, "STANDARD_ND6S": 12, + "STANDARD_ND92IS_H100_V5": 16, + "STANDARD_ND92ISR_H100_V5": 16, "STANDARD_ND96AMS_A100_V4": 16, "STANDARD_ND96AMSR_A100_V4": 16, "STANDARD_ND96ASR_V4": 16, "STANDARD_ND96IS_H100_V5": 16, "STANDARD_ND96ISR_H100_V5": 16, + "STANDARD_NG16ADS_V620_V1": 16, + "STANDARD_NG32ADMS_V620_V1": 32, + "STANDARD_NG32ADS_V620_V1": 32, + "STANDARD_NG8ADS_V620_V1": 8, "STANDARD_NP10S": 8, "STANDARD_NP20S": 16, "STANDARD_NP40S": 32, diff --git a/pkg/azuredisk/azure_managedDiskController.go b/pkg/azuredisk/azure_managedDiskController.go new file mode 100644 index 000000000..11542050a --- /dev/null +++ b/pkg/azuredisk/azure_managedDiskController.go @@ -0,0 +1,426 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azuredisk + +import ( + "context" + "errors" + "fmt" + "net/http" + "path" + "strconv" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + + "k8s.io/apimachinery/pkg/api/resource" + kwait "k8s.io/apimachinery/pkg/util/wait" + volumehelpers "k8s.io/cloud-provider/volume/helpers" + "k8s.io/klog/v2" + "k8s.io/utils/pointer" + + "sigs.k8s.io/cloud-provider-azure/pkg/consts" + "sigs.k8s.io/cloud-provider-azure/pkg/provider" +) + +// ManagedDiskController : managed disk controller struct +type ManagedDiskController struct { + *controllerCommon +} + +func NewManagedDiskController(provider *provider.Cloud) *ManagedDiskController { + common := &controllerCommon{ + cloud: provider, + lockMap: newLockMap(), + AttachDetachInitialDelayInMs: defaultAttachDetachInitialDelayInMs, + clientFactory: provider.ComputeClientFactory, + } + + return &ManagedDiskController{common} +} + +// ManagedDiskOptions specifies the options of managed disks. +type ManagedDiskOptions struct { + // The SKU of storage account. + StorageAccountType armcompute.DiskStorageAccountTypes + // The name of the disk. + DiskName string + // The name of PVC. + PVCName string + // The name of resource group. + ResourceGroup string + // The AvailabilityZone to create the disk. + AvailabilityZone string + // The tags of the disk. + Tags map[string]string + // IOPS Caps for UltraSSD disk + DiskIOPSReadWrite string + // Throughput Cap (MBps) for UltraSSD disk + DiskMBpsReadWrite string + // if SourceResourceID is not empty, then it's a disk copy operation(for snapshot) + SourceResourceID string + // The type of source + SourceType string + // ResourceId of the disk encryption set to use for enabling encryption at rest. + DiskEncryptionSetID string + // DiskEncryption type, available values: EncryptionAtRestWithCustomerKey, EncryptionAtRestWithPlatformAndCustomerKeys + DiskEncryptionType string + // The size in GB. + SizeGB int + // The maximum number of VMs that can attach to the disk at the same time. Value greater than one indicates a disk that can be mounted on multiple VMs at the same time. + MaxShares int32 + // Logical sector size in bytes for Ultra disks + LogicalSectorSize int32 + // SkipGetDiskOperation indicates whether skip GetDisk operation(mainly due to throttling) + SkipGetDiskOperation bool + // PublicNetworkAccess - Possible values include: 'Enabled', 'Disabled' + PublicNetworkAccess armcompute.PublicNetworkAccess + // NetworkAccessPolicy - Possible values include: 'AllowAll', 'AllowPrivate', 'DenyAll' + NetworkAccessPolicy armcompute.NetworkAccessPolicy + // DiskAccessID - ARM id of the DiskAccess resource for using private endpoints on disks. + DiskAccessID *string + // BurstingEnabled - Set to true to enable bursting beyond the provisioned performance target of the disk. + BurstingEnabled *bool + // SubscriptionID - specify a different SubscriptionID + SubscriptionID string + // Location - specify a different location + Location string + // PerformancePlus - Set this flag to true to get a boost on the performance target of the disk deployed + PerformancePlus *bool +} + +// CreateManagedDisk: create managed disk +func (c *ManagedDiskController) CreateManagedDisk(ctx context.Context, options *ManagedDiskOptions) (string, error) { + var err error + klog.V(4).Infof("azureDisk - creating new managed Name:%s StorageAccountType:%s Size:%v", options.DiskName, options.StorageAccountType, options.SizeGB) + + var createZones []string + if len(options.AvailabilityZone) > 0 { + requestedZone := c.cloud.GetZoneID(options.AvailabilityZone) + if requestedZone != "" { + createZones = append(createZones, requestedZone) + } + } + + // insert original tags to newTags + newTags := make(map[string]*string) + azureDDTag := "kubernetes-azure-dd" + newTags[consts.CreatedByTag] = &azureDDTag + if options.Tags != nil { + for k, v := range options.Tags { + value := v + newTags[k] = &value + } + } + + diskSizeGB := int32(options.SizeGB) + diskSku := options.StorageAccountType + + rg := c.cloud.ResourceGroup + if options.ResourceGroup != "" { + rg = options.ResourceGroup + } + if options.SubscriptionID != "" && !strings.EqualFold(options.SubscriptionID, c.cloud.SubscriptionID) && options.ResourceGroup == "" { + return "", fmt.Errorf("resourceGroup must be specified when subscriptionID(%s) is not empty", options.SubscriptionID) + } + subsID := c.cloud.SubscriptionID + if options.SubscriptionID != "" { + subsID = options.SubscriptionID + } + + creationData, err := getValidCreationData(subsID, rg, options) + if err != nil { + return "", err + } + diskProperties := armcompute.DiskProperties{ + DiskSizeGB: &diskSizeGB, + CreationData: &creationData, + BurstingEnabled: options.BurstingEnabled, + } + + if options.PublicNetworkAccess != "" { + diskProperties.PublicNetworkAccess = to.Ptr(options.PublicNetworkAccess) + } + + if options.NetworkAccessPolicy != "" { + diskProperties.NetworkAccessPolicy = to.Ptr(options.NetworkAccessPolicy) + if options.NetworkAccessPolicy == armcompute.NetworkAccessPolicyAllowPrivate { + if options.DiskAccessID == nil { + return "", fmt.Errorf("DiskAccessID should not be empty when NetworkAccessPolicy is AllowPrivate") + } + diskProperties.DiskAccessID = options.DiskAccessID + } else { + if options.DiskAccessID != nil { + return "", fmt.Errorf("DiskAccessID(%s) must be empty when NetworkAccessPolicy(%s) is not AllowPrivate", *options.DiskAccessID, options.NetworkAccessPolicy) + } + } + } + + if diskSku == armcompute.DiskStorageAccountTypesUltraSSDLRS || diskSku == armcompute.DiskStorageAccountTypesPremiumV2LRS { + if options.DiskIOPSReadWrite == "" { + if diskSku == armcompute.DiskStorageAccountTypesUltraSSDLRS { + diskIOPSReadWrite := int64(consts.DefaultDiskIOPSReadWrite) + diskProperties.DiskIOPSReadWrite = pointer.Int64(diskIOPSReadWrite) + } + } else { + v, err := strconv.Atoi(options.DiskIOPSReadWrite) + if err != nil { + return "", fmt.Errorf("AzureDisk - failed to parse DiskIOPSReadWrite: %w", err) + } + diskIOPSReadWrite := int64(v) + diskProperties.DiskIOPSReadWrite = pointer.Int64(diskIOPSReadWrite) + } + + if options.DiskMBpsReadWrite == "" { + if diskSku == armcompute.DiskStorageAccountTypesUltraSSDLRS { + diskMBpsReadWrite := int64(consts.DefaultDiskMBpsReadWrite) + diskProperties.DiskMBpsReadWrite = pointer.Int64(diskMBpsReadWrite) + } + } else { + v, err := strconv.Atoi(options.DiskMBpsReadWrite) + if err != nil { + return "", fmt.Errorf("AzureDisk - failed to parse DiskMBpsReadWrite: %w", err) + } + diskMBpsReadWrite := int64(v) + diskProperties.DiskMBpsReadWrite = pointer.Int64(diskMBpsReadWrite) + } + + if options.LogicalSectorSize != 0 { + klog.V(2).Infof("AzureDisk - requested LogicalSectorSize: %v", options.LogicalSectorSize) + diskProperties.CreationData.LogicalSectorSize = pointer.Int32(options.LogicalSectorSize) + } + } else { + if options.DiskIOPSReadWrite != "" { + return "", fmt.Errorf("AzureDisk - DiskIOPSReadWrite parameter is only applicable in UltraSSD_LRS disk type") + } + if options.DiskMBpsReadWrite != "" { + return "", fmt.Errorf("AzureDisk - DiskMBpsReadWrite parameter is only applicable in UltraSSD_LRS disk type") + } + if options.LogicalSectorSize != 0 { + return "", fmt.Errorf("AzureDisk - LogicalSectorSize parameter is only applicable in UltraSSD_LRS disk type") + } + } + + if options.DiskEncryptionSetID != "" { + if strings.Index(strings.ToLower(options.DiskEncryptionSetID), "/subscriptions/") != 0 { + return "", fmt.Errorf("AzureDisk - format of DiskEncryptionSetID(%s) is incorrect, correct format: %s", options.DiskEncryptionSetID, consts.DiskEncryptionSetIDFormat) + } + encryptionType := armcompute.EncryptionTypeEncryptionAtRestWithCustomerKey + if options.DiskEncryptionType != "" { + encryptionType = armcompute.EncryptionType(options.DiskEncryptionType) + klog.V(4).Infof("azureDisk - DiskEncryptionType: %s, DiskEncryptionSetID: %s", options.DiskEncryptionType, options.DiskEncryptionSetID) + } + diskProperties.Encryption = &armcompute.Encryption{ + DiskEncryptionSetID: &options.DiskEncryptionSetID, + Type: to.Ptr(encryptionType), + } + } else { + if options.DiskEncryptionType != "" { + return "", fmt.Errorf("AzureDisk - DiskEncryptionType(%s) should be empty when DiskEncryptionSetID is not set", options.DiskEncryptionType) + } + } + + if options.MaxShares > 1 { + diskProperties.MaxShares = &options.MaxShares + } + + location := c.cloud.Location + if options.Location != "" { + location = options.Location + } + model := armcompute.Disk{ + Location: &location, + Tags: newTags, + SKU: &armcompute.DiskSKU{ + Name: to.Ptr(diskSku), + }, + Properties: &diskProperties, + } + + if c.cloud.HasExtendedLocation() { + model.ExtendedLocation = &armcompute.ExtendedLocation{ + Name: pointer.String(c.cloud.ExtendedLocationName), + Type: to.Ptr(armcompute.ExtendedLocationTypes(c.cloud.ExtendedLocationType)), + } + } + + if len(createZones) > 0 { + model.Zones = to.SliceOfPtrs(createZones...) + } + diskClient, err := c.clientFactory.GetDiskClientForSub(subsID) + if err != nil { + return "", err + } + if _, err := diskClient.CreateOrUpdate(ctx, rg, options.DiskName, model); err != nil { + return "", err + } + + diskID := fmt.Sprintf(managedDiskPath, subsID, rg, options.DiskName) + + if options.SkipGetDiskOperation { + klog.Warningf("azureDisk - GetDisk(%s, StorageAccountType:%s) is throttled, unable to confirm provisioningState in poll process", options.DiskName, options.StorageAccountType) + } else { + err = kwait.ExponentialBackoffWithContext(ctx, defaultBackOff, func(ctx context.Context) (bool, error) { + provisionState, id, err := c.GetDisk(ctx, subsID, rg, options.DiskName) + if err == nil { + if id != "" { + diskID = id + } + } else { + // We are waiting for provisioningState==Succeeded + // We don't want to hand-off managed disks to k8s while they are + //still being provisioned, this is to avoid some race conditions + return false, err + } + if strings.ToLower(provisionState) == "succeeded" { + return true, nil + } + return false, nil + }) + + if err != nil { + klog.Warningf("azureDisk - created new MD Name:%s StorageAccountType:%s Size:%v but was unable to confirm provisioningState in poll process", options.DiskName, options.StorageAccountType, options.SizeGB) + } + } + + klog.V(2).Infof("azureDisk - created new MD Name:%s StorageAccountType:%s Size:%v", options.DiskName, options.StorageAccountType, options.SizeGB) + return diskID, nil +} + +// DeleteManagedDisk : delete managed disk +func (c *ManagedDiskController) DeleteManagedDisk(ctx context.Context, diskURI string) error { + resourceGroup, subsID, err := getInfoFromDiskURI(diskURI) + if err != nil { + return err + } + + if state, ok := c.diskStateMap.Load(strings.ToLower(diskURI)); ok { + return fmt.Errorf("failed to delete disk(%s) since it's in %s state", diskURI, state.(string)) + } + + diskName := path.Base(diskURI) + diskClient, err := c.clientFactory.GetDiskClientForSub(subsID) + if err != nil { + return err + } + + disk, err := diskClient.Get(ctx, resourceGroup, diskName) + if err != nil { + var respErr = &azcore.ResponseError{} + if errors.As(err, &respErr) && respErr.StatusCode == http.StatusNotFound { + klog.V(2).Infof("azureDisk - disk(%s) is already deleted", diskURI) + return nil + } + return err + } + if disk.ManagedBy != nil { + return fmt.Errorf("disk(%s) already attached to node(%s), could not be deleted", diskURI, *disk.ManagedBy) + } + + if err = diskClient.Delete(ctx, resourceGroup, diskName); err != nil { + return err + } + // We don't need poll here, k8s will immediately stop referencing the disk + // the disk will be eventually deleted - cleanly - by ARM + + klog.V(2).Infof("azureDisk - deleted a managed disk: %s", diskURI) + + return nil +} + +// GetDisk return: disk provisionState, diskID, error +func (c *ManagedDiskController) GetDisk(ctx context.Context, subsID, resourceGroup, diskName string) (string, string, error) { + diskclient, err := c.clientFactory.GetDiskClientForSub(subsID) + if err != nil { + return "", "", err + } + + result, err := diskclient.Get(ctx, resourceGroup, diskName) + if err != nil { + return "", "", err + } + + if result.Properties != nil && (*result.Properties).ProvisioningState != nil { + return *(*result.Properties).ProvisioningState, *result.ID, nil + } + return "", "", nil +} + +// ResizeDisk Expand the disk to new size +func (c *ManagedDiskController) ResizeDisk(ctx context.Context, diskURI string, oldSize resource.Quantity, newSize resource.Quantity, supportOnlineResize bool) (resource.Quantity, error) { + diskName := path.Base(diskURI) + resourceGroup, subsID, err := getInfoFromDiskURI(diskURI) + if err != nil { + return oldSize, err + } + diskClient, err := c.clientFactory.GetDiskClientForSub(subsID) + if err != nil { + return oldSize, err + } + result, err := diskClient.Get(ctx, resourceGroup, diskName) + if err != nil { + return oldSize, err + } + + if result.Properties == nil || result.Properties.DiskSizeGB == nil { + return oldSize, fmt.Errorf("DiskProperties of disk(%s) is nil", diskName) + } + + // Azure resizes in chunks of GiB (not GB) + requestGiB, err := volumehelpers.RoundUpToGiBInt32(newSize) + if err != nil { + return oldSize, err + } + + newSizeQuant := resource.MustParse(fmt.Sprintf("%dGi", requestGiB)) + + klog.V(2).Infof("azureDisk - begin to resize disk(%s) with new size(%d), old size(%v)", diskName, requestGiB, oldSize) + // If disk already of greater or equal size than requested we return + if *result.Properties.DiskSizeGB >= requestGiB { + return newSizeQuant, nil + } + + if !supportOnlineResize && *result.Properties.DiskState != armcompute.DiskStateUnattached { + return oldSize, fmt.Errorf("azureDisk - disk resize is only supported on Unattached disk, current disk state: %s, already attached to %s", *result.Properties.DiskState, pointer.StringDeref(result.ManagedBy, "")) + } + + diskParameter := armcompute.DiskUpdate{ + Properties: &armcompute.DiskUpdateProperties{ + DiskSizeGB: &requestGiB, + }, + } + + if _, err := diskClient.Patch(ctx, resourceGroup, diskName, diskParameter); err != nil { + return oldSize, err + } + + klog.V(2).Infof("azureDisk - resize disk(%s) with new size(%d) completed", diskName, requestGiB) + return newSizeQuant, nil +} + +// get resource group name, subs id from a managed disk URI, e.g. return {group-name}, {sub-id} according to +// /subscriptions/{sub-id}/resourcegroups/{group-name}/providers/microsoft.compute/disks/{disk-id} +// according to https://docs.microsoft.com/en-us/rest/api/compute/disks/get +func getInfoFromDiskURI(diskURI string) (string, string, error) { + fields := strings.Split(diskURI, "/") + if len(fields) != 9 || strings.ToLower(fields[3]) != "resourcegroups" { + return "", "", fmt.Errorf("invalid disk URI: %s", diskURI) + } + return fields[4], fields[2], nil +} diff --git a/pkg/azuredisk/azure_managedDiskController_test.go b/pkg/azuredisk/azure_managedDiskController_test.go new file mode 100644 index 000000000..6d4e72278 --- /dev/null +++ b/pkg/azuredisk/azure_managedDiskController_test.go @@ -0,0 +1,611 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azuredisk + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "github.com/stretchr/testify/assert" + "go.uber.org/mock/gomock" + + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/utils/pointer" + + "sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient/mock_diskclient" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient/mock_azclient" + "sigs.k8s.io/cloud-provider-azure/pkg/consts" + "sigs.k8s.io/cloud-provider-azure/pkg/provider" +) + +const ( + fakeGetDiskFailed = "fakeGetDiskFailed" + disk1Name = "disk1" + disk1ID = "diskid1" +) + +func TestCreateManagedDisk(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + maxShare := int32(2) + goodDiskEncryptionSetID := fmt.Sprintf("/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/diskEncryptionSets/%s", "diskEncryptionSet-name") + badDiskEncryptionSetID := "badDiskEncryptionSetID" + testTags := make(map[string]*string) + testTags[WriteAcceleratorEnabled] = pointer.String("true") + testCases := []struct { + desc string + diskID string + diskName string + storageAccountType armcompute.DiskStorageAccountTypes + diskIOPSReadWrite string + diskMBPSReadWrite string + diskEncryptionSetID string + diskEncryptionType string + subscriptionID string + resouceGroup string + publicNetworkAccess armcompute.PublicNetworkAccess + networkAccessPolicy armcompute.NetworkAccessPolicy + diskAccessID *string + expectedDiskID string + existedDisk *armcompute.Disk + expectedErr bool + expectedErrMsg error + }{ + { + desc: "disk Id and no error shall be returned if everything is good with DiskStorageAccountTypesUltraSSDLRS storage account", + diskID: disk1ID, + diskName: disk1Name, + storageAccountType: armcompute.DiskStorageAccountTypesUltraSSDLRS, + diskIOPSReadWrite: "100", + diskMBPSReadWrite: "100", + diskEncryptionSetID: goodDiskEncryptionSetID, + expectedDiskID: disk1ID, + existedDisk: &armcompute.Disk{ID: pointer.String(disk1ID), Name: pointer.String(disk1Name), Properties: &armcompute.DiskProperties{Encryption: &armcompute.Encryption{DiskEncryptionSetID: &goodDiskEncryptionSetID, Type: to.Ptr(armcompute.EncryptionTypeEncryptionAtRestWithCustomerKey)}, ProvisioningState: pointer.String("Succeeded")}, Tags: testTags}, + expectedErr: false, + }, + { + desc: "disk Id and no error shall be returned if everything is good with PremiumV2LRS storage account", + diskID: disk1ID, + diskName: disk1Name, + storageAccountType: armcompute.DiskStorageAccountTypesPremiumV2LRS, + diskIOPSReadWrite: "100", + diskMBPSReadWrite: "100", + diskEncryptionSetID: goodDiskEncryptionSetID, + expectedDiskID: disk1ID, + existedDisk: &armcompute.Disk{ID: pointer.String(disk1ID), Name: pointer.String(disk1Name), Properties: &armcompute.DiskProperties{Encryption: &armcompute.Encryption{DiskEncryptionSetID: &goodDiskEncryptionSetID, Type: to.Ptr(armcompute.EncryptionTypeEncryptionAtRestWithCustomerKey)}, ProvisioningState: pointer.String("Succeeded")}, Tags: testTags}, + expectedErr: false, + }, + { + desc: "disk Id and no error shall be returned if everything is good with PremiumV2LRS storage account", + diskID: disk1ID, + diskName: disk1Name, + storageAccountType: armcompute.DiskStorageAccountTypesPremiumV2LRS, + diskIOPSReadWrite: "", + diskMBPSReadWrite: "", + diskEncryptionSetID: goodDiskEncryptionSetID, + expectedDiskID: disk1ID, + existedDisk: &armcompute.Disk{ID: pointer.String(disk1ID), Name: pointer.String(disk1Name), Properties: &armcompute.DiskProperties{Encryption: &armcompute.Encryption{DiskEncryptionSetID: &goodDiskEncryptionSetID, Type: to.Ptr(armcompute.EncryptionTypeEncryptionAtRestWithCustomerKey)}, ProvisioningState: pointer.String("Succeeded")}, Tags: testTags}, + expectedErr: false, + }, + { + desc: "disk Id and no error shall be returned if everything is good with DiskStorageAccountTypesStandardLRS storage account", + diskID: disk1ID, + diskName: disk1Name, + storageAccountType: armcompute.DiskStorageAccountTypesStandardLRS, + diskIOPSReadWrite: "", + diskMBPSReadWrite: "", + diskEncryptionSetID: goodDiskEncryptionSetID, + expectedDiskID: disk1ID, + existedDisk: &armcompute.Disk{ID: pointer.String(disk1ID), Name: pointer.String(disk1Name), Properties: &armcompute.DiskProperties{Encryption: &armcompute.Encryption{DiskEncryptionSetID: &goodDiskEncryptionSetID, Type: to.Ptr(armcompute.EncryptionTypeEncryptionAtRestWithCustomerKey)}, ProvisioningState: pointer.String("Succeeded")}, Tags: testTags}, + expectedErr: false, + }, + { + desc: "empty diskid and an error shall be returned if everything is good with DiskStorageAccountTypesUltraSSDLRS storage account but DiskIOPSReadWrite is invalid", + diskID: disk1ID, + diskName: disk1Name, + storageAccountType: armcompute.DiskStorageAccountTypesUltraSSDLRS, + diskIOPSReadWrite: "invalid", + diskMBPSReadWrite: "100", + diskEncryptionSetID: goodDiskEncryptionSetID, + expectedDiskID: "", + existedDisk: &armcompute.Disk{ID: pointer.String(disk1ID), Name: pointer.String(disk1Name), Properties: &armcompute.DiskProperties{Encryption: &armcompute.Encryption{DiskEncryptionSetID: &goodDiskEncryptionSetID, Type: to.Ptr(armcompute.EncryptionTypeEncryptionAtRestWithCustomerKey)}, ProvisioningState: pointer.String("Succeeded")}, Tags: testTags}, + expectedErr: true, + expectedErrMsg: fmt.Errorf("AzureDisk - failed to parse DiskIOPSReadWrite: strconv.Atoi: parsing \"invalid\": invalid syntax"), + }, + { + desc: "empty diskid and an error shall be returned if everything is good with DiskStorageAccountTypesUltraSSDLRS storage account but DiskMBPSReadWrite is invalid", + diskID: disk1ID, + diskName: disk1Name, + storageAccountType: armcompute.DiskStorageAccountTypesUltraSSDLRS, + diskIOPSReadWrite: "100", + diskMBPSReadWrite: "invalid", + diskEncryptionSetID: goodDiskEncryptionSetID, + expectedDiskID: "", + existedDisk: &armcompute.Disk{ID: pointer.String(disk1ID), Name: pointer.String(disk1Name), Properties: &armcompute.DiskProperties{Encryption: &armcompute.Encryption{DiskEncryptionSetID: &goodDiskEncryptionSetID, Type: to.Ptr(armcompute.EncryptionTypeEncryptionAtRestWithCustomerKey)}, ProvisioningState: pointer.String("Succeeded")}, Tags: testTags}, + expectedErr: true, + expectedErrMsg: fmt.Errorf("AzureDisk - failed to parse DiskMBpsReadWrite: strconv.Atoi: parsing \"invalid\": invalid syntax"), + }, + { + desc: "empty diskid and an error shall be returned if everything is good with DiskStorageAccountTypesUltraSSDLRS storage account with bad Disk EncryptionSetID", + diskID: disk1ID, + diskName: disk1Name, + storageAccountType: armcompute.DiskStorageAccountTypesUltraSSDLRS, + diskIOPSReadWrite: "100", + diskMBPSReadWrite: "100", + diskEncryptionSetID: badDiskEncryptionSetID, + expectedDiskID: "", + existedDisk: &armcompute.Disk{ID: pointer.String(disk1ID), Name: pointer.String(disk1Name), Properties: &armcompute.DiskProperties{Encryption: &armcompute.Encryption{DiskEncryptionSetID: &goodDiskEncryptionSetID, Type: to.Ptr(armcompute.EncryptionTypeEncryptionAtRestWithCustomerKey)}, ProvisioningState: pointer.String("Succeeded")}, Tags: testTags}, + expectedErr: true, + expectedErrMsg: fmt.Errorf("AzureDisk - format of DiskEncryptionSetID(%s) is incorrect, correct format: %s", badDiskEncryptionSetID, consts.DiskEncryptionSetIDFormat), + }, + { + desc: "DiskEncryptionType should be empty when DiskEncryptionSetID is not set", + diskID: disk1ID, + diskName: disk1Name, + storageAccountType: armcompute.DiskStorageAccountTypesStandardLRS, + diskEncryptionSetID: "", + diskEncryptionType: "EncryptionAtRestWithCustomerKey", + expectedDiskID: "", + existedDisk: &armcompute.Disk{ID: pointer.String(disk1ID), Name: pointer.String(disk1Name), Properties: &armcompute.DiskProperties{Encryption: &armcompute.Encryption{DiskEncryptionSetID: &goodDiskEncryptionSetID, Type: to.Ptr(armcompute.EncryptionTypeEncryptionAtRestWithCustomerKey)}, ProvisioningState: pointer.String("Succeeded")}, Tags: testTags}, + expectedErr: true, + expectedErrMsg: fmt.Errorf("AzureDisk - DiskEncryptionType(EncryptionAtRestWithCustomerKey) should be empty when DiskEncryptionSetID is not set"), + }, + { + desc: "disk Id and no error shall be returned if everything is good with DiskStorageAccountTypesStandardLRS storage account with not empty diskIOPSReadWrite", + diskID: disk1ID, + diskName: disk1Name, + storageAccountType: armcompute.DiskStorageAccountTypesStandardLRS, + diskIOPSReadWrite: "100", + diskMBPSReadWrite: "", + diskEncryptionSetID: goodDiskEncryptionSetID, + expectedDiskID: "", + existedDisk: &armcompute.Disk{ID: pointer.String(disk1ID), Name: pointer.String(disk1Name), Properties: &armcompute.DiskProperties{Encryption: &armcompute.Encryption{DiskEncryptionSetID: &goodDiskEncryptionSetID, Type: to.Ptr(armcompute.EncryptionTypeEncryptionAtRestWithCustomerKey)}, ProvisioningState: pointer.String("Succeeded")}, Tags: testTags}, + expectedErr: true, + expectedErrMsg: fmt.Errorf("AzureDisk - DiskIOPSReadWrite parameter is only applicable in UltraSSD_LRS disk type"), + }, + { + desc: "disk Id and no error shall be returned if everything is good with DiskStorageAccountTypesStandardLRS storage account with not empty diskMBPSReadWrite", + diskID: disk1ID, + diskName: disk1Name, + storageAccountType: armcompute.DiskStorageAccountTypesStandardLRS, + diskIOPSReadWrite: "", + diskMBPSReadWrite: "100", + diskEncryptionSetID: goodDiskEncryptionSetID, + expectedDiskID: "", + existedDisk: &armcompute.Disk{ID: pointer.String(disk1ID), Name: pointer.String(disk1Name), Properties: &armcompute.DiskProperties{Encryption: &armcompute.Encryption{DiskEncryptionSetID: &goodDiskEncryptionSetID, Type: to.Ptr(armcompute.EncryptionTypeEncryptionAtRestWithCustomerKey)}, ProvisioningState: pointer.String("Succeeded")}, Tags: testTags}, + expectedErr: true, + expectedErrMsg: fmt.Errorf("AzureDisk - DiskMBpsReadWrite parameter is only applicable in UltraSSD_LRS disk type"), + }, + { + desc: "correct NetworkAccessPolicy(DenyAll) setting", + diskID: disk1ID, + diskName: disk1Name, + storageAccountType: armcompute.DiskStorageAccountTypesStandardLRS, + diskEncryptionSetID: goodDiskEncryptionSetID, + networkAccessPolicy: armcompute.NetworkAccessPolicyDenyAll, + publicNetworkAccess: armcompute.PublicNetworkAccessDisabled, + expectedDiskID: disk1ID, + existedDisk: &armcompute.Disk{ID: pointer.String(disk1ID), Name: pointer.String(disk1Name), Properties: &armcompute.DiskProperties{Encryption: &armcompute.Encryption{DiskEncryptionSetID: &goodDiskEncryptionSetID, Type: to.Ptr(armcompute.EncryptionTypeEncryptionAtRestWithCustomerKey)}, ProvisioningState: pointer.String("Succeeded")}, Tags: testTags}, + expectedErr: false, + }, + { + desc: "correct NetworkAccessPolicy(AllowAll) setting", + diskID: disk1ID, + diskName: disk1Name, + storageAccountType: armcompute.DiskStorageAccountTypesStandardLRS, + diskEncryptionSetID: goodDiskEncryptionSetID, + diskEncryptionType: "EncryptionAtRestWithCustomerKey", + networkAccessPolicy: armcompute.NetworkAccessPolicyAllowAll, + publicNetworkAccess: armcompute.PublicNetworkAccessEnabled, + expectedDiskID: disk1ID, + existedDisk: &armcompute.Disk{ID: pointer.String(disk1ID), Name: pointer.String(disk1Name), Properties: &armcompute.DiskProperties{Encryption: &armcompute.Encryption{DiskEncryptionSetID: &goodDiskEncryptionSetID, Type: to.Ptr(armcompute.EncryptionTypeEncryptionAtRestWithCustomerKey)}, ProvisioningState: pointer.String("Succeeded")}, Tags: testTags}, + expectedErr: false, + }, + { + desc: "DiskAccessID should not be empty when NetworkAccessPolicy is AllowPrivate", + diskID: disk1ID, + diskName: disk1Name, + storageAccountType: armcompute.DiskStorageAccountTypesStandardLRS, + diskEncryptionSetID: goodDiskEncryptionSetID, + networkAccessPolicy: armcompute.NetworkAccessPolicyAllowPrivate, + expectedDiskID: "", + existedDisk: &armcompute.Disk{ID: pointer.String(disk1ID), Name: pointer.String(disk1Name), Properties: &armcompute.DiskProperties{Encryption: &armcompute.Encryption{DiskEncryptionSetID: &goodDiskEncryptionSetID, Type: to.Ptr(armcompute.EncryptionTypeEncryptionAtRestWithCustomerKey)}, ProvisioningState: pointer.String("Succeeded")}, Tags: testTags}, + expectedErr: true, + expectedErrMsg: fmt.Errorf("DiskAccessID should not be empty when NetworkAccessPolicy is AllowPrivate"), + }, + { + desc: "DiskAccessID(%s) must be empty when NetworkAccessPolicy(%s) is not AllowPrivate", + diskID: disk1ID, + diskName: disk1Name, + storageAccountType: armcompute.DiskStorageAccountTypesStandardLRS, + diskEncryptionSetID: goodDiskEncryptionSetID, + networkAccessPolicy: armcompute.NetworkAccessPolicyAllowAll, + diskAccessID: pointer.String("diskAccessID"), + expectedDiskID: "", + existedDisk: &armcompute.Disk{ID: pointer.String(disk1ID), Name: pointer.String(disk1Name), Properties: &armcompute.DiskProperties{Encryption: &armcompute.Encryption{DiskEncryptionSetID: &goodDiskEncryptionSetID, Type: to.Ptr(armcompute.EncryptionTypeEncryptionAtRestWithCustomerKey)}, ProvisioningState: pointer.String("Succeeded")}, Tags: testTags}, + expectedErr: true, + expectedErrMsg: fmt.Errorf("DiskAccessID(diskAccessID) must be empty when NetworkAccessPolicy(AllowAll) is not AllowPrivate"), + }, + { + desc: "resourceGroup must be specified when subscriptionID is not empty", + diskID: "", + diskName: disk1Name, + subscriptionID: "abc", + resouceGroup: "", + expectedDiskID: "", + existedDisk: &armcompute.Disk{ID: pointer.String(disk1ID), Name: pointer.String(disk1Name), Properties: &armcompute.DiskProperties{Encryption: &armcompute.Encryption{DiskEncryptionSetID: &goodDiskEncryptionSetID, Type: to.Ptr(armcompute.EncryptionTypeEncryptionAtRestWithCustomerKey)}, ProvisioningState: pointer.String("Succeeded")}, Tags: testTags}, + expectedErr: true, + expectedErrMsg: fmt.Errorf("resourceGroup must be specified when subscriptionID(abc) is not empty"), + }, + } + + for i, test := range testCases { + testCloud := provider.GetTestCloud(ctrl) + + common := &controllerCommon{ + cloud: testCloud, + lockMap: newLockMap(), + AttachDetachInitialDelayInMs: defaultAttachDetachInitialDelayInMs, + clientFactory: testCloud.ComputeClientFactory, + } + + managedDiskController := &ManagedDiskController{common} + volumeOptions := &ManagedDiskOptions{ + DiskName: test.diskName, + StorageAccountType: test.storageAccountType, + ResourceGroup: test.resouceGroup, + SizeGB: 1, + Tags: map[string]string{"tag1": "azure-tag1"}, + AvailabilityZone: "westus-testzone", + DiskIOPSReadWrite: test.diskIOPSReadWrite, + DiskMBpsReadWrite: test.diskMBPSReadWrite, + DiskEncryptionSetID: test.diskEncryptionSetID, + DiskEncryptionType: test.diskEncryptionType, + MaxShares: maxShare, + NetworkAccessPolicy: test.networkAccessPolicy, + PublicNetworkAccess: test.publicNetworkAccess, + DiskAccessID: test.diskAccessID, + SubscriptionID: test.subscriptionID, + } + + mockDisksClient := mock_diskclient.NewMockInterface(ctrl) + common.clientFactory.(*mock_azclient.MockClientFactory).EXPECT().GetDiskClientForSub(testCloud.SubscriptionID).Return(mockDisksClient, nil).AnyTimes() + //disk := getTestDisk(test.diskName) + mockDisksClient.EXPECT().CreateOrUpdate(gomock.Any(), gomock.Any(), test.diskName, gomock.Any()).Return(test.existedDisk, nil).AnyTimes() + mockDisksClient.EXPECT().Get(gomock.Any(), gomock.Any(), test.diskName).Return(test.existedDisk, nil).AnyTimes() + + actualDiskID, err := managedDiskController.CreateManagedDisk(ctx, volumeOptions) + assert.Equal(t, test.expectedDiskID, actualDiskID, "TestCase[%d]: %s", i, test.desc) + assert.Equal(t, test.expectedErr, err != nil, "TestCase[%d]: %s, return error: %v", i, test.desc, err) + if test.expectedErr { + assert.EqualError(t, test.expectedErrMsg, err.Error(), "TestCase[%d]: %s, expected error: %v, return error: %v", i, test.desc, test.expectedErrMsg, err) + } + } +} + +func TestCreateManagedDiskWithExtendedLocation(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + testCloud := provider.GetTestCloudWithExtendedLocation(ctrl) + diskName := disk1Name + expectedDiskID := disk1ID + el := &armcompute.ExtendedLocation{ + Name: pointer.String("microsoftlosangeles1"), + Type: to.Ptr(armcompute.ExtendedLocationTypesEdgeZone), + } + + diskreturned := armcompute.Disk{ + ID: pointer.String(expectedDiskID), + Name: pointer.String(diskName), + ExtendedLocation: el, + Properties: &armcompute.DiskProperties{ + ProvisioningState: pointer.String("Succeeded"), + }, + } + + common := &controllerCommon{ + cloud: testCloud, + lockMap: newLockMap(), + AttachDetachInitialDelayInMs: defaultAttachDetachInitialDelayInMs, + clientFactory: testCloud.ComputeClientFactory, + } + + managedDiskController := &ManagedDiskController{common} + volumeOptions := &ManagedDiskOptions{ + DiskName: diskName, + StorageAccountType: armcompute.DiskStorageAccountTypes(armcompute.EdgeZoneStorageAccountTypePremiumLRS), + ResourceGroup: "", + SizeGB: 1, + AvailabilityZone: "westus-testzone", + } + + mockDisksClient := mock_diskclient.NewMockInterface(ctrl) + common.clientFactory.(*mock_azclient.MockClientFactory).EXPECT().GetDiskClientForSub(testCloud.SubscriptionID).Return(mockDisksClient, nil).AnyTimes() + mockDisksClient.EXPECT().CreateOrUpdate(gomock.Any(), testCloud.ResourceGroup, diskName, gomock.Any()). + Do(func(ctx interface{}, rg, dn string, disk armcompute.Disk) { + assert.Equal(t, el.Name, disk.ExtendedLocation.Name, "The extended location name should match.") + assert.Equal(t, el.Type, disk.ExtendedLocation.Type, "The extended location type should match.") + }).Return(to.Ptr(diskreturned), nil) + + mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, diskName).Return(&diskreturned, nil).AnyTimes() + + actualDiskID, err := managedDiskController.CreateManagedDisk(ctx, volumeOptions) + assert.Equal(t, expectedDiskID, actualDiskID, "Disk ID does not match.") + assert.Nil(t, err, "There should not be an error.") +} + +func TestDeleteManagedDisk(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + testCases := []struct { + desc string + diskName string + diskState string + existedDisk *armcompute.Disk + expectedErr bool + expectedErrMsg error + }{ + { + desc: "an error shall be returned if delete an attaching disk", + diskName: disk1Name, + diskState: "attaching", + existedDisk: &armcompute.Disk{Name: pointer.String(disk1Name)}, + expectedErr: true, + expectedErrMsg: fmt.Errorf("failed to delete disk(/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/disks/disk1) since it's in attaching state"), + }, + { + desc: "no error shall be returned if everything is good", + diskName: disk1Name, + existedDisk: &armcompute.Disk{Name: pointer.String(disk1Name)}, + expectedErr: false, + }, + { + desc: "an error shall be returned if get disk failed", + diskName: fakeGetDiskFailed, + existedDisk: &armcompute.Disk{Name: pointer.String(fakeGetDiskFailed)}, + expectedErr: true, + expectedErrMsg: fmt.Errorf("Get Disk failed"), + }, + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + for i, test := range testCases { + testCloud := provider.GetTestCloud(ctrl) + + common := &controllerCommon{ + cloud: testCloud, + lockMap: newLockMap(), + AttachDetachInitialDelayInMs: defaultAttachDetachInitialDelayInMs, + clientFactory: testCloud.ComputeClientFactory, + } + + managedDiskController := &ManagedDiskController{common} + diskURI := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/%s", + testCloud.SubscriptionID, testCloud.ResourceGroup, *test.existedDisk.Name) + if test.diskState == "attaching" { + managedDiskController.diskStateMap.Store(strings.ToLower(diskURI), test.diskState) + } + + mockDisksClient := mock_diskclient.NewMockInterface(ctrl) + common.clientFactory.(*mock_azclient.MockClientFactory).EXPECT().GetDiskClientForSub(testCloud.SubscriptionID).Return(mockDisksClient, nil).AnyTimes() + if test.diskName == fakeGetDiskFailed { + mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, test.diskName).Return(test.existedDisk, fmt.Errorf("Get Disk failed")).AnyTimes() + } else { + mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, test.diskName).Return(test.existedDisk, nil).AnyTimes() + } + mockDisksClient.EXPECT().Delete(gomock.Any(), testCloud.ResourceGroup, test.diskName).Return(nil).AnyTimes() + + err := managedDiskController.DeleteManagedDisk(ctx, diskURI) + assert.Equal(t, test.expectedErr, err != nil, "TestCase[%d]: %s, return error: %v", i, test.desc, err) + if test.expectedErr { + assert.EqualError(t, test.expectedErrMsg, err.Error(), "TestCase[%d]: %s, expected: %v, return: %v", i, test.desc, test.expectedErrMsg, err) + } + } +} + +func TestGetDisk(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + testCases := []struct { + desc string + diskName string + existedDisk *armcompute.Disk + expectedErr bool + expectedErrMsg error + expectedProvisioningState string + expectedDiskID string + }{ + { + desc: "no error shall be returned if get a normal disk without DiskProperties", + diskName: disk1Name, + existedDisk: &armcompute.Disk{Name: pointer.String(disk1Name)}, + expectedErr: false, + expectedProvisioningState: "", + expectedDiskID: "", + }, + { + desc: "an error shall be returned if get disk failed", + diskName: fakeGetDiskFailed, + existedDisk: &armcompute.Disk{Name: pointer.String(fakeGetDiskFailed)}, + expectedErr: true, + expectedErrMsg: fmt.Errorf("Get Disk failed"), + expectedProvisioningState: "", + expectedDiskID: "", + }, + } + + for i, test := range testCases { + testCloud := provider.GetTestCloud(ctrl) + managedDiskController := &ManagedDiskController{ + controllerCommon: &controllerCommon{ + cloud: testCloud, + lockMap: newLockMap(), + DisableDiskLunCheck: true, + clientFactory: testCloud.ComputeClientFactory, + }, + } + + mockDisksClient := mock_diskclient.NewMockInterface(ctrl) + managedDiskController.controllerCommon.clientFactory.(*mock_azclient.MockClientFactory).EXPECT().GetDiskClientForSub("").Return(mockDisksClient, nil).AnyTimes() + if test.diskName == fakeGetDiskFailed { + mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, test.diskName).Return(test.existedDisk, fmt.Errorf("Get Disk failed")).AnyTimes() + } else { + mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, test.diskName).Return(test.existedDisk, nil).AnyTimes() + } + + provisioningState, diskid, err := managedDiskController.GetDisk(ctx, "", testCloud.ResourceGroup, test.diskName) + assert.Equal(t, test.expectedErr, err != nil, "TestCase[%d]: %s, return error: %v", i, test.desc, err) + if test.expectedErr { + assert.EqualError(t, test.expectedErrMsg, err.Error(), "TestCase[%d]: %s, expected: %v, return: %v", i, test.desc, test.expectedErrMsg, err) + } + assert.Equal(t, test.expectedProvisioningState, provisioningState, "TestCase[%d]: %s, expected ProvisioningState: %v, return ProvisioningState: %v", i, test.desc, test.expectedProvisioningState, provisioningState) + assert.Equal(t, test.expectedDiskID, diskid, "TestCase[%d]: %s, expected DiskID: %v, return DiskID: %v", i, test.desc, test.expectedDiskID, diskid) + } +} + +func TestResizeDisk(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + diskSizeGB := int32(2) + diskName := disk1Name + fakeCreateDiskFailed := "fakeCreateDiskFailed" + testCases := []struct { + desc string + diskName string + oldSize resource.Quantity + newSize resource.Quantity + existedDisk *armcompute.Disk + expectedQuantity resource.Quantity + expectedErr bool + expectedErrMsg error + }{ + { + desc: "new quantity and no error shall be returned if everything is good", + diskName: diskName, + oldSize: *resource.NewQuantity(2*(1024*1024*1024), resource.BinarySI), + newSize: *resource.NewQuantity(3*(1024*1024*1024), resource.BinarySI), + existedDisk: &armcompute.Disk{Name: pointer.String(disk1Name), Properties: &armcompute.DiskProperties{DiskSizeGB: &diskSizeGB, DiskState: to.Ptr(armcompute.DiskStateUnattached)}}, + expectedQuantity: *resource.NewQuantity(3*(1024*1024*1024), resource.BinarySI), + expectedErr: false, + }, + { + desc: "new quantity and no error shall be returned if everything is good with DiskProperties is null", + diskName: diskName, + oldSize: *resource.NewQuantity(2*(1024*1024*1024), resource.BinarySI), + newSize: *resource.NewQuantity(3*(1024*1024*1024), resource.BinarySI), + existedDisk: &armcompute.Disk{Name: pointer.String(disk1Name)}, + expectedQuantity: *resource.NewQuantity(2*(1024*1024*1024), resource.BinarySI), + expectedErr: true, + expectedErrMsg: fmt.Errorf("DiskProperties of disk(%s) is nil", diskName), + }, + { + desc: "new quantity and no error shall be returned if everything is good with disk already of greater or equal size than requested", + diskName: diskName, + oldSize: *resource.NewQuantity(1*(1024*1024*1024), resource.BinarySI), + newSize: *resource.NewQuantity(2*(1024*1024*1024), resource.BinarySI), + existedDisk: &armcompute.Disk{Name: pointer.String(disk1Name), Properties: &armcompute.DiskProperties{DiskSizeGB: &diskSizeGB, DiskState: to.Ptr(armcompute.DiskStateUnattached)}}, + expectedQuantity: *resource.NewQuantity(2*(1024*1024*1024), resource.BinarySI), + expectedErr: false, + }, + { + desc: "an error shall be returned if everything is good but get disk failed", + diskName: fakeGetDiskFailed, + oldSize: *resource.NewQuantity(2*(1024*1024*1024), resource.BinarySI), + newSize: *resource.NewQuantity(3*(1024*1024*1024), resource.BinarySI), + existedDisk: &armcompute.Disk{Name: pointer.String(fakeGetDiskFailed), Properties: &armcompute.DiskProperties{DiskSizeGB: &diskSizeGB, DiskState: to.Ptr(armcompute.DiskStateUnattached)}}, + expectedQuantity: *resource.NewQuantity(2*(1024*1024*1024), resource.BinarySI), + expectedErr: true, + expectedErrMsg: fmt.Errorf("Get Disk failed"), + }, + { + desc: "an error shall be returned if everything is good but create disk failed", + diskName: fakeCreateDiskFailed, + oldSize: *resource.NewQuantity(2*(1024*1024*1024), resource.BinarySI), + newSize: *resource.NewQuantity(3*(1024*1024*1024), resource.BinarySI), + existedDisk: &armcompute.Disk{Name: pointer.String(fakeCreateDiskFailed), Properties: &armcompute.DiskProperties{DiskSizeGB: &diskSizeGB, DiskState: to.Ptr(armcompute.DiskStateUnattached)}}, + expectedQuantity: *resource.NewQuantity(2*(1024*1024*1024), resource.BinarySI), + expectedErr: true, + expectedErrMsg: fmt.Errorf("Create Disk failed"), + }, + { + desc: "an error shall be returned if disk is not in Unattached state", + diskName: fakeCreateDiskFailed, + oldSize: *resource.NewQuantity(2*(1024*1024*1024), resource.BinarySI), + newSize: *resource.NewQuantity(3*(1024*1024*1024), resource.BinarySI), + existedDisk: &armcompute.Disk{Name: pointer.String(fakeCreateDiskFailed), Properties: &armcompute.DiskProperties{DiskSizeGB: &diskSizeGB, DiskState: to.Ptr(armcompute.DiskStateAttached)}}, + expectedQuantity: *resource.NewQuantity(2*(1024*1024*1024), resource.BinarySI), + expectedErr: true, + expectedErrMsg: fmt.Errorf("azureDisk - disk resize is only supported on Unattached disk, current disk state: Attached, already attached to "), + }, + } + + for i, test := range testCases { + testCloud := provider.GetTestCloud(ctrl) + managedDiskController := &ManagedDiskController{ + controllerCommon: &controllerCommon{ + cloud: testCloud, + lockMap: newLockMap(), + DisableDiskLunCheck: true, + clientFactory: testCloud.ComputeClientFactory, + }, + } + diskURI := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/%s", + testCloud.SubscriptionID, testCloud.ResourceGroup, *test.existedDisk.Name) + + mockDisksClient := mock_diskclient.NewMockInterface(ctrl) + managedDiskController.controllerCommon.clientFactory.(*mock_azclient.MockClientFactory).EXPECT().GetDiskClientForSub(testCloud.SubscriptionID).Return(mockDisksClient, nil).AnyTimes() + if test.diskName == fakeGetDiskFailed { + mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, test.diskName).Return(test.existedDisk, fmt.Errorf("Get Disk failed")).AnyTimes() + } else { + mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, test.diskName).Return(test.existedDisk, nil).AnyTimes() + } + if test.diskName == fakeCreateDiskFailed { + mockDisksClient.EXPECT().Patch(gomock.Any(), testCloud.ResourceGroup, test.diskName, gomock.Any()).Return(test.existedDisk, fmt.Errorf("Create Disk failed")).AnyTimes() + } else { + mockDisksClient.EXPECT().Patch(gomock.Any(), testCloud.ResourceGroup, test.diskName, gomock.Any()).Return(test.existedDisk, nil).AnyTimes() + } + + result, err := managedDiskController.ResizeDisk(ctx, diskURI, test.oldSize, test.newSize, false) + assert.Equal(t, test.expectedErr, err != nil, "TestCase[%d]: %s, return error: %v", i, test.desc, err) + if test.expectedErr { + assert.EqualError(t, test.expectedErrMsg, err.Error(), "TestCase[%d]: %s, expected: %v, return: %v", i, test.desc, test.expectedErrMsg, err) + } + assert.Equal(t, test.expectedQuantity.Value(), result.Value(), "TestCase[%d]: %s, expected Quantity: %v, return Quantity: %v", i, test.desc, test.expectedQuantity, result) + } +} diff --git a/pkg/azuredisk/azuredisk.go b/pkg/azuredisk/azuredisk.go index c6f1805c1..5f0aaae54 100644 --- a/pkg/azuredisk/azuredisk.go +++ b/pkg/azuredisk/azuredisk.go @@ -36,26 +36,32 @@ package azuredisk import ( "context" + "errors" "fmt" "reflect" "strconv" "strings" "time" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "github.com/container-storage-interface/spec/lib/go/csi" + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" "github.com/edgelesssys/constellation/v2/csi/cryptmapper" cryptKms "github.com/edgelesssys/constellation/v2/csi/kms" + "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/volume/util/hostutil" "k8s.io/mount-utils" + "k8s.io/utils/pointer" consts "sigs.k8s.io/azuredisk-csi-driver/pkg/azureconstants" "sigs.k8s.io/azuredisk-csi-driver/pkg/azureutils" @@ -64,49 +70,19 @@ import ( "sigs.k8s.io/azuredisk-csi-driver/pkg/optimization" "sigs.k8s.io/azuredisk-csi-driver/pkg/util" volumehelper "sigs.k8s.io/azuredisk-csi-driver/pkg/util" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient" azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" azurecloudconsts "sigs.k8s.io/cloud-provider-azure/pkg/consts" azure "sigs.k8s.io/cloud-provider-azure/pkg/provider" ) -// DriverOptions defines driver parameters specified in driver deployment -type DriverOptions struct { - NodeID string - DriverName string - VolumeAttachLimit int64 - EnablePerfOptimization bool - CloudConfigSecretName string - CloudConfigSecretNamespace string - CustomUserAgent string - UserAgentSuffix string - UseCSIProxyGAInterface bool - EnableDiskOnlineResize bool - AllowEmptyCloudConfig bool - EnableAsyncAttach bool - EnableListVolumes bool - EnableListSnapshots bool - SupportZone bool - GetNodeInfoFromLabels bool - EnableDiskCapacityCheck bool - DisableUpdateCache bool - EnableTrafficManager bool - TrafficManagerPort int64 - AttachDetachInitialDelayInMs int64 - VMSSCacheTTLInSeconds int64 - VMType string - EnableWindowsHostProcess bool - GetNodeIDFromIMDS bool - EnableOtelTracing bool - KMSAddr string -} - // CSIDriver defines the interface for a CSI driver. type CSIDriver interface { csi.ControllerServer csi.NodeServer csi.IdentityServer - Run(endpoint, kubeconfig string, disableAVSetNodes, testMode bool) + Run(ctx context.Context) error } type hostUtil interface { @@ -128,8 +104,9 @@ type DriverCore struct { cloudConfigSecretNamespace string customUserAgent string userAgentSuffix string - kubeconfig string cloud *azure.Cloud + clientFactory azclient.ClientFactory + diskController *ManagedDiskController mounter *mount.SafeFormatAndMount deviceHelper optimization.Interface nodeInfo *optimization.NodeInfo @@ -138,7 +115,6 @@ type DriverCore struct { useCSIProxyGAInterface bool enableDiskOnlineResize bool allowEmptyCloudConfig bool - enableAsyncAttach bool enableListVolumes bool enableListSnapshots bool supportZone bool @@ -148,21 +124,34 @@ type DriverCore struct { enableTrafficManager bool trafficManagerPort int64 vmssCacheTTLInSeconds int64 + volStatsCacheExpireInMinutes int64 attachDetachInitialDelayInMs int64 vmType string enableWindowsHostProcess bool getNodeIDFromIMDS bool enableOtelTracing bool - getVolumeName func(string) (string, error) - cryptMapper cryptMapper + shouldWaitForSnapshotReady bool + checkDiskLUNCollision bool + forceDetachBackoff bool + endpoint string + disableAVSetNodes bool + kubeClient kubernetes.Interface + // a timed cache storing volume stats + volStatsCache azcache.Resource + + // [Edgeless] Constellation fields + getVolumeName func(string) (string, error) + cryptMapper cryptMapper } // Driver is the v1 implementation of the Azure Disk CSI Driver. type Driver struct { DriverCore volumeLocks *volumehelper.VolumeLocks - // a timed cache GetDisk throttling - getDiskThrottlingCache azcache.Resource + // a timed cache for throttling + throttlingCache azcache.Resource + // a timed cache for disk lun collision check throttling + checkDiskLunThrottlingCache azcache.Resource } // newDriverV1 Creates a NewCSIDriver object. Assumes vendor version is equal to driver version & @@ -181,7 +170,6 @@ func newDriverV1(options *DriverOptions) *Driver { driver.useCSIProxyGAInterface = options.UseCSIProxyGAInterface driver.enableDiskOnlineResize = options.EnableDiskOnlineResize driver.allowEmptyCloudConfig = options.AllowEmptyCloudConfig - driver.enableAsyncAttach = options.EnableAsyncAttach driver.enableListVolumes = options.EnableListVolumes driver.enableListSnapshots = options.EnableListVolumes driver.supportZone = options.SupportZone @@ -192,13 +180,23 @@ func newDriverV1(options *DriverOptions) *Driver { driver.enableTrafficManager = options.EnableTrafficManager driver.trafficManagerPort = options.TrafficManagerPort driver.vmssCacheTTLInSeconds = options.VMSSCacheTTLInSeconds + driver.volStatsCacheExpireInMinutes = options.VolStatsCacheExpireInMinutes driver.vmType = options.VMType driver.enableWindowsHostProcess = options.EnableWindowsHostProcess driver.getNodeIDFromIMDS = options.GetNodeIDFromIMDS driver.enableOtelTracing = options.EnableOtelTracing + driver.shouldWaitForSnapshotReady = options.WaitForSnapshotReady + driver.checkDiskLUNCollision = options.CheckDiskLUNCollision + driver.forceDetachBackoff = options.ForceDetachBackoff + driver.endpoint = options.Endpoint + driver.disableAVSetNodes = options.DisableAVSetNodes driver.volumeLocks = volumehelper.NewVolumeLocks() driver.ioHandler = azureutils.NewOSIOHandler() driver.hostUtil = hostutil.NewHostUtil() + if driver.NodeID == "" { + // nodeid is not needed in controller component + klog.Warning("nodeid is empty") + } // [Edgeless] set up dm-crypt driver.getVolumeName = util.GetVolumeName @@ -206,81 +204,84 @@ func newDriverV1(options *DriverOptions) *Driver { topologyKey = fmt.Sprintf("topology.%s/zone", driver.Name) - cache, err := azcache.NewTimedCache(5*time.Minute, func(key string) (interface{}, error) { - return nil, nil - }, false) - if err != nil { + getter := func(key string) (interface{}, error) { return nil, nil } + var err error + if driver.throttlingCache, err = azcache.NewTimedCache(5*time.Minute, getter, false); err != nil { + klog.Fatalf("%v", err) + } + if driver.checkDiskLunThrottlingCache, err = azcache.NewTimedCache(30*time.Minute, getter, false); err != nil { klog.Fatalf("%v", err) } - driver.getDiskThrottlingCache = cache - return &driver -} -// Run driver initialization -func (d *Driver) Run(endpoint, kubeconfig string, disableAVSetNodes, testingMock bool) { - versionMeta, err := GetVersionYAML(d.Name) - if err != nil { + if options.VolStatsCacheExpireInMinutes <= 0 { + options.VolStatsCacheExpireInMinutes = 10 // default expire in 10 minutes + } + if driver.volStatsCache, err = azcache.NewTimedCache(time.Duration(options.VolStatsCacheExpireInMinutes)*time.Minute, getter, false); err != nil { klog.Fatalf("%v", err) } - klog.Infof("\nDRIVER INFORMATION:\n-------------------\n%s\n\nStreaming logs below:", versionMeta) - userAgent := GetUserAgent(d.Name, d.customUserAgent, d.userAgentSuffix) + userAgent := GetUserAgent(driver.Name, driver.customUserAgent, driver.userAgentSuffix) klog.V(2).Infof("driver userAgent: %s", userAgent) - cloud, err := azureutils.GetCloudProvider(context.Background(), kubeconfig, d.cloudConfigSecretName, d.cloudConfigSecretNamespace, - userAgent, d.allowEmptyCloudConfig, d.enableTrafficManager, d.trafficManagerPort) + kubeClient, err := azureutils.GetKubeClient(options.Kubeconfig) if err != nil { - klog.Fatalf("failed to get Azure Cloud Provider, error: %v", err) + klog.Warningf("get kubeconfig(%s) failed with error: %v", options.Kubeconfig, err) } - d.cloud = cloud - d.kubeconfig = kubeconfig + driver.kubeClient = kubeClient - if d.cloud != nil { - if d.vmType != "" { - klog.V(2).Infof("override VMType(%s) in cloud config as %s", d.cloud.VMType, d.vmType) - d.cloud.VMType = d.vmType + cloud, err := azureutils.GetCloudProviderFromClient(context.Background(), kubeClient, driver.cloudConfigSecretName, driver.cloudConfigSecretNamespace, + userAgent, driver.allowEmptyCloudConfig, driver.enableTrafficManager, driver.trafficManagerPort) + if err != nil { + klog.Fatalf("failed to get Azure Cloud Provider, error: %v", err) + } + driver.cloud = cloud + + if driver.cloud != nil { + driver.diskController = NewManagedDiskController(driver.cloud) + driver.diskController.DisableUpdateCache = driver.disableUpdateCache + driver.diskController.AttachDetachInitialDelayInMs = int(driver.attachDetachInitialDelayInMs) + driver.diskController.ForceDetachBackoff = driver.forceDetachBackoff + driver.clientFactory = driver.cloud.ComputeClientFactory + if driver.vmType != "" { + klog.V(2).Infof("override VMType(%s) in cloud config as %s", driver.cloud.VMType, driver.vmType) + driver.cloud.VMType = driver.vmType } - if d.NodeID == "" { + if driver.NodeID == "" { // Disable UseInstanceMetadata for controller to mitigate a timeout issue using IMDS // https://github.com/kubernetes-sigs/azuredisk-csi-driver/issues/168 klog.V(2).Infof("disable UseInstanceMetadata for controller") - d.cloud.Config.UseInstanceMetadata = false + driver.cloud.Config.UseInstanceMetadata = false - if d.cloud.VMType == azurecloudconsts.VMTypeStandard && d.cloud.DisableAvailabilitySetNodes { - klog.V(2).Infof("set DisableAvailabilitySetNodes as false since VMType is %s", d.cloud.VMType) - d.cloud.DisableAvailabilitySetNodes = false + if driver.cloud.VMType == azurecloudconsts.VMTypeStandard && driver.cloud.DisableAvailabilitySetNodes { + klog.V(2).Infof("set DisableAvailabilitySetNodes as false since VMType is %s", driver.cloud.VMType) + driver.cloud.DisableAvailabilitySetNodes = false } - if d.cloud.VMType == azurecloudconsts.VMTypeVMSS && !d.cloud.DisableAvailabilitySetNodes && disableAVSetNodes { + if driver.cloud.VMType == azurecloudconsts.VMTypeVMSS && !driver.cloud.DisableAvailabilitySetNodes && driver.disableAVSetNodes { klog.V(2).Infof("DisableAvailabilitySetNodes for controller since current VMType is vmss") - d.cloud.DisableAvailabilitySetNodes = true + driver.cloud.DisableAvailabilitySetNodes = true } - klog.V(2).Infof("cloud: %s, location: %s, rg: %s, VMType: %s, PrimaryScaleSetName: %s, PrimaryAvailabilitySetName: %s, DisableAvailabilitySetNodes: %v", d.cloud.Cloud, d.cloud.Location, d.cloud.ResourceGroup, d.cloud.VMType, d.cloud.PrimaryScaleSetName, d.cloud.PrimaryAvailabilitySetName, d.cloud.DisableAvailabilitySetNodes) + klog.V(2).Infof("cloud: %s, location: %s, rg: %s, VMType: %s, PrimaryScaleSetName: %s, PrimaryAvailabilitySetName: %s, DisableAvailabilitySetNodes: %v", driver.cloud.Cloud, driver.cloud.Location, driver.cloud.ResourceGroup, driver.cloud.VMType, driver.cloud.PrimaryScaleSetName, driver.cloud.PrimaryAvailabilitySetName, driver.cloud.DisableAvailabilitySetNodes) } - if d.vmssCacheTTLInSeconds > 0 { - klog.V(2).Infof("reset vmssCacheTTLInSeconds as %d", d.vmssCacheTTLInSeconds) - d.cloud.VMCacheTTLInSeconds = int(d.vmssCacheTTLInSeconds) - d.cloud.VmssCacheTTLInSeconds = int(d.vmssCacheTTLInSeconds) - } - - if d.cloud.ManagedDiskController != nil { - d.cloud.DisableUpdateCache = d.disableUpdateCache - d.cloud.AttachDetachInitialDelayInMs = int(d.attachDetachInitialDelayInMs) + if driver.vmssCacheTTLInSeconds > 0 { + klog.V(2).Infof("reset vmssCacheTTLInSeconds as %d", driver.vmssCacheTTLInSeconds) + driver.cloud.VMCacheTTLInSeconds = int(driver.vmssCacheTTLInSeconds) + driver.cloud.VmssCacheTTLInSeconds = int(driver.vmssCacheTTLInSeconds) } } - d.deviceHelper = optimization.NewSafeDeviceHelper() + driver.deviceHelper = optimization.NewSafeDeviceHelper() - if d.getPerfOptimizationEnabled() { - d.nodeInfo, err = optimization.NewNodeInfo(context.TODO(), d.getCloud(), d.NodeID) + if driver.getPerfOptimizationEnabled() { + driver.nodeInfo, err = optimization.NewNodeInfo(context.TODO(), driver.getCloud(), driver.NodeID) if err != nil { klog.Warningf("Failed to get node info. Error: %v", err) } } - d.mounter, err = mounter.NewSafeMounter(d.enableWindowsHostProcess, d.useCSIProxyGAInterface) + driver.mounter, err = mounter.NewSafeMounter(driver.enableWindowsHostProcess, driver.useCSIProxyGAInterface) if err != nil { klog.Fatalf("Failed to get safe mounter. Error: %v", err) } @@ -292,42 +293,96 @@ func (d *Driver) Run(endpoint, kubeconfig string, disableAVSetNodes, testingMock csi.ControllerServiceCapability_RPC_CLONE_VOLUME, csi.ControllerServiceCapability_RPC_EXPAND_VOLUME, } - if d.enableListVolumes { + if driver.enableListVolumes { controllerCap = append(controllerCap, csi.ControllerServiceCapability_RPC_LIST_VOLUMES, csi.ControllerServiceCapability_RPC_LIST_VOLUMES_PUBLISHED_NODES) } - if d.enableListSnapshots { + if driver.enableListSnapshots { controllerCap = append(controllerCap, csi.ControllerServiceCapability_RPC_LIST_SNAPSHOTS) } - d.AddControllerServiceCapabilities(controllerCap) - d.AddVolumeCapabilityAccessModes( + driver.AddControllerServiceCapabilities(controllerCap) + driver.AddVolumeCapabilityAccessModes( []csi.VolumeCapability_AccessMode_Mode{ csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY, csi.VolumeCapability_AccessMode_SINGLE_NODE_SINGLE_WRITER, }) - d.AddNodeServiceCapabilities([]csi.NodeServiceCapability_RPC_Type{ + driver.AddNodeServiceCapabilities([]csi.NodeServiceCapability_RPC_Type{ csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME, csi.NodeServiceCapability_RPC_EXPAND_VOLUME, csi.NodeServiceCapability_RPC_GET_VOLUME_STATS, }) + return &driver +} + +// Run driver initialization +func (d *Driver) Run(ctx context.Context) error { + versionMeta, err := GetVersionYAML(d.Name) + if err != nil { + klog.Fatalf("%v", err) + } + klog.Infof("\nDRIVER INFORMATION:\n-------------------\n%s\n\nStreaming logs below:", versionMeta) - s := csicommon.NewNonBlockingGRPCServer() + grpcInterceptor := grpc.UnaryInterceptor(csicommon.LogGRPC) + opts := []grpc.ServerOption{ + grpcInterceptor, + } + if d.enableOtelTracing { + exporter, err := InitOtelTracing() + if err != nil { + klog.Fatalf("Failed to initialize otel tracing: %v", err) + } + // Exporter will flush traces on shutdown + defer func() { + if err := exporter.Shutdown(context.Background()); err != nil { + klog.Errorf("Could not shutdown otel exporter: %v", err) + } + }() + opts = append(opts, grpc.StatsHandler(otelgrpc.NewServerHandler())) + } + + s := grpc.NewServer(opts...) + csi.RegisterIdentityServer(s, d) + csi.RegisterControllerServer(s, d) + csi.RegisterNodeServer(s, d) + + go func() { + //graceful shutdown + <-ctx.Done() + s.GracefulStop() + }() // Driver d act as IdentityServer, ControllerServer and NodeServer - s.Start(endpoint, d, d, d, testingMock, d.enableOtelTracing) - s.Wait() + listener, err := csicommon.Listen(ctx, d.endpoint) + if err != nil { + klog.Fatalf("failed to listen to endpoint, error: %v", err) + } + err = s.Serve(listener) + if errors.Is(err, grpc.ErrServerStopped) { + klog.Infof("gRPC server stopped serving") + return nil + } + return err } func (d *Driver) isGetDiskThrottled() bool { - cache, err := d.getDiskThrottlingCache.Get(consts.ThrottlingKey, azcache.CacheReadTypeDefault) + cache, err := d.throttlingCache.Get(consts.GetDiskThrottlingKey, azcache.CacheReadTypeDefault) if err != nil { - klog.Warningf("getDiskThrottlingCache(%s) return with error: %s", consts.ThrottlingKey, err) + klog.Warningf("throttlingCache(%s) return with error: %s", consts.GetDiskThrottlingKey, err) return false } return cache != nil } -func (d *Driver) checkDiskExists(ctx context.Context, diskURI string) (*compute.Disk, error) { +func (d *Driver) isCheckDiskLunThrottled() bool { + cache, err := d.throttlingCache.Get(consts.CheckDiskLunThrottlingKey, azcache.CacheReadTypeDefault) + if err != nil { + klog.Warningf("throttlingCache(%s) return with error: %s", consts.CheckDiskLunThrottlingKey, err) + return false + } + return cache != nil +} + +func (d *Driver) checkDiskExists(ctx context.Context, diskURI string) (*armcompute.Disk, error) { diskName, err := azureutils.GetDiskName(diskURI) if err != nil { return nil, err @@ -343,17 +398,15 @@ func (d *Driver) checkDiskExists(ctx context.Context, diskURI string) (*compute. return nil, nil } subsID := azureutils.GetSubscriptionIDFromURI(diskURI) - disk, rerr := d.cloud.DisksClient.Get(ctx, subsID, resourceGroup, diskName) - if rerr != nil { - if rerr.IsThrottled() || strings.Contains(rerr.RawError.Error(), consts.RateLimited) { - klog.Warningf("checkDiskExists(%s) is throttled with error: %v", diskURI, rerr.Error()) - d.getDiskThrottlingCache.Set(consts.ThrottlingKey, "") - return nil, nil - } - return nil, rerr.Error() + diskClient, err := d.diskController.clientFactory.GetDiskClientForSub(subsID) + if err != nil { + return nil, err } - - return &disk, nil + disk, err := diskClient.Get(ctx, resourceGroup, diskName) + if err != nil { + return nil, err + } + return disk, nil } func (d *Driver) checkDiskCapacity(ctx context.Context, subsID, resourceGroup, diskName string, requestGiB int) (bool, error) { @@ -361,18 +414,16 @@ func (d *Driver) checkDiskCapacity(ctx context.Context, subsID, resourceGroup, d klog.Warningf("skip checkDiskCapacity(%s, %s) since it's still in throttling", resourceGroup, diskName) return true, nil } - - disk, rerr := d.cloud.DisksClient.Get(ctx, subsID, resourceGroup, diskName) + diskClient, err := d.clientFactory.GetDiskClientForSub(subsID) + if err != nil { + return false, err + } + disk, err := diskClient.Get(ctx, resourceGroup, diskName) // Because we can not judge the reason of the error. Maybe the disk does not exist. // So here we do not handle the error. - if rerr == nil { - if !reflect.DeepEqual(disk, compute.Disk{}) && disk.DiskSizeGB != nil && int(*disk.DiskSizeGB) != requestGiB { - return false, status.Errorf(codes.AlreadyExists, "the request volume already exists, but its capacity(%v) is different from (%v)", *disk.DiskProperties.DiskSizeGB, requestGiB) - } - } else { - if rerr.IsThrottled() || strings.Contains(rerr.RawError.Error(), consts.RateLimited) { - klog.Warningf("checkDiskCapacity(%s, %s) is throttled with error: %v", resourceGroup, diskName, rerr.Error()) - d.getDiskThrottlingCache.Set(consts.ThrottlingKey, "") + if err == nil { + if !reflect.DeepEqual(disk, armcompute.Disk{}) && disk.Properties.DiskSizeGB != nil && int(*disk.Properties.DiskSizeGB) != requestGiB { + return false, status.Errorf(codes.AlreadyExists, "the request volume already exists, but its capacity(%v) is different from (%v)", *disk.Properties.DiskSizeGB, requestGiB) } } return true, nil @@ -451,29 +502,35 @@ func (d *DriverCore) getHostUtil() hostUtil { return d.hostUtil } -// getSnapshotCopyCompletionPercent returns the completion percent of copy snapshot -func (d *DriverCore) getSnapshotCopyCompletionPercent(ctx context.Context, subsID, resourceGroup, copySnapshotName string) (float64, error) { - copySnapshot, rerr := d.cloud.SnapshotsClient.Get(ctx, subsID, resourceGroup, copySnapshotName) - if rerr != nil { - return 0.0, rerr.Error() +// getSnapshotCompletionPercent returns the completion percent of snapshot +func (d *DriverCore) getSnapshotCompletionPercent(ctx context.Context, subsID, resourceGroup, snapshotName string) (float32, error) { + snapshotClient, err := d.clientFactory.GetSnapshotClientForSub(subsID) + if err != nil { + return 0.0, err + } + copySnapshot, err := snapshotClient.Get(ctx, resourceGroup, snapshotName) + if err != nil { + return 0.0, err } - if copySnapshot.SnapshotProperties == nil || copySnapshot.SnapshotProperties.CompletionPercent == nil { - return 0.0, fmt.Errorf("copy snapshot(%s) under rg(%s) has no SnapshotProperties or CompletionPercent is nil", copySnapshotName, resourceGroup) + if copySnapshot.Properties == nil || copySnapshot.Properties.CompletionPercent == nil { + // If CompletionPercent is nil, it means the snapshot is complete + klog.V(2).Infof("snapshot(%s) under rg(%s) has no SnapshotProperties or CompletionPercent is nil", snapshotName, resourceGroup) + return 100.0, nil } - return *copySnapshot.SnapshotProperties.CompletionPercent, nil + return *copySnapshot.Properties.CompletionPercent, nil } -// waitForSnapshotCopy wait for copy incremental snapshot to a new region until completionPercent is 100.0 -func (d *DriverCore) waitForSnapshotCopy(ctx context.Context, subsID, resourceGroup, copySnapshotName string, intervel, timeout time.Duration) error { - completionPercent, err := d.getSnapshotCopyCompletionPercent(ctx, subsID, resourceGroup, copySnapshotName) +// waitForSnapshotReady wait for completionPercent of snapshot is 100.0 +func (d *DriverCore) waitForSnapshotReady(ctx context.Context, subsID, resourceGroup, snapshotName string, intervel, timeout time.Duration) error { + completionPercent, err := d.getSnapshotCompletionPercent(ctx, subsID, resourceGroup, snapshotName) if err != nil { return err } - if completionPercent >= float64(100.0) { - klog.V(2).Infof("copy snapshot(%s) under rg(%s) complete", copySnapshotName, resourceGroup) + if completionPercent >= float32(100.0) { + klog.V(2).Infof("snapshot(%s) under rg(%s) complete", snapshotName, resourceGroup) return nil } @@ -482,20 +539,74 @@ func (d *DriverCore) waitForSnapshotCopy(ctx context.Context, subsID, resourceGr for { select { case <-timeTick: - completionPercent, err = d.getSnapshotCopyCompletionPercent(ctx, subsID, resourceGroup, copySnapshotName) + completionPercent, err = d.getSnapshotCompletionPercent(ctx, subsID, resourceGroup, snapshotName) if err != nil { return err } - if completionPercent >= float64(100.0) { - klog.V(2).Infof("copy snapshot(%s) under rg(%s) complete", copySnapshotName, resourceGroup) + if completionPercent >= float32(100.0) { + klog.V(2).Infof("snapshot(%s) under rg(%s) complete", snapshotName, resourceGroup) return nil } - klog.V(2).Infof("copy snapshot(%s) under rg(%s) completionPercent: %f", copySnapshotName, resourceGroup, completionPercent) + klog.V(2).Infof("snapshot(%s) under rg(%s) completionPercent: %f", snapshotName, resourceGroup, completionPercent) case <-timeAfter: - return fmt.Errorf("timeout waiting for copy snapshot(%s) under rg(%s)", copySnapshotName, resourceGroup) + return fmt.Errorf("timeout waiting for snapshot(%s) under rg(%s)", snapshotName, resourceGroup) + } + } +} + +// getUsedLunsFromVolumeAttachments returns a list of used luns from VolumeAttachments +func (d *DriverCore) getUsedLunsFromVolumeAttachments(ctx context.Context, nodeName string) ([]int, error) { + kubeClient := d.cloud.KubeClient + if kubeClient == nil || kubeClient.StorageV1() == nil || kubeClient.StorageV1().VolumeAttachments() == nil { + return nil, fmt.Errorf("kubeClient or kubeClient.StorageV1() or kubeClient.StorageV1().VolumeAttachments() is nil") + } + + volumeAttachments, err := kubeClient.StorageV1().VolumeAttachments().List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, err + } + + usedLuns := make([]int, 0) + if volumeAttachments == nil { + klog.V(2).Infof("volumeAttachments is nil") + return usedLuns, nil + } + for _, va := range volumeAttachments.Items { + klog.V(6).Infof("attacher: %s, nodeName: %s, Status: %v, PV: %s, attachmentMetadata: %v", va.Spec.Attacher, va.Spec.NodeName, + va.Status.Attached, pointer.StringDeref(va.Spec.Source.PersistentVolumeName, ""), va.Status.AttachmentMetadata) + if va.Spec.Attacher == consts.DefaultDriverName && strings.EqualFold(va.Spec.NodeName, nodeName) && va.Status.Attached { + if k, ok := va.Status.AttachmentMetadata[consts.LUN]; ok { + lun, err := strconv.Atoi(k) + if err != nil { + klog.Warningf("VolumeAttachment(%s) lun(%s) is not a valid integer", va.Name, k) + continue + } + usedLuns = append(usedLuns, lun) + } + } + } + return usedLuns, nil +} + +// getUsedLunsFromNode returns a list of sorted used luns from Node +func (d *DriverCore) getUsedLunsFromNode(nodeName types.NodeName) ([]int, error) { + disks, _, err := d.diskController.GetNodeDataDisks(nodeName, azcache.CacheReadTypeDefault) + if err != nil { + klog.Errorf("error of getting data disks for node %s: %v", nodeName, err) + return nil, err + } + + usedLuns := make([]int, 0) + // get all disks attached to the node + for _, disk := range disks { + if disk.Lun == nil { + klog.Warningf("disk(%s) lun is nil", *disk.Name) + continue } + usedLuns = append(usedLuns, int(*disk.Lun)) } + return usedLuns, nil } // getNodeInfoFromLabels get zone, instanceType from node labels diff --git a/pkg/azuredisk/azuredisk_option.go b/pkg/azuredisk/azuredisk_option.go new file mode 100644 index 000000000..37384c7c5 --- /dev/null +++ b/pkg/azuredisk/azuredisk_option.go @@ -0,0 +1,111 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azuredisk + +import ( + "flag" + + consts "sigs.k8s.io/azuredisk-csi-driver/pkg/azureconstants" +) + +// DriverOptions defines driver parameters specified in driver deployment +type DriverOptions struct { + // Common options + NodeID string + DriverName string + VolumeAttachLimit int64 + ReservedDataDiskSlotNum int64 + EnablePerfOptimization bool + CloudConfigSecretName string + CloudConfigSecretNamespace string + CustomUserAgent string + UserAgentSuffix string + UseCSIProxyGAInterface bool + EnableOtelTracing bool + + //only used in v1 + EnableDiskOnlineResize bool + AllowEmptyCloudConfig bool + EnableListVolumes bool + EnableListSnapshots bool + SupportZone bool + GetNodeInfoFromLabels bool + EnableDiskCapacityCheck bool + DisableUpdateCache bool + EnableTrafficManager bool + TrafficManagerPort int64 + AttachDetachInitialDelayInMs int64 + VMSSCacheTTLInSeconds int64 + VolStatsCacheExpireInMinutes int64 + VMType string + EnableWindowsHostProcess bool + GetNodeIDFromIMDS bool + WaitForSnapshotReady bool + CheckDiskLUNCollision bool + ForceDetachBackoff bool + Kubeconfig string + Endpoint string + DisableAVSetNodes bool + + // [Edgeless] Constellation options + KMSAddr string +} + +func (o *DriverOptions) AddFlags() *flag.FlagSet { + if o == nil { + return nil + } + fs := flag.NewFlagSet("", flag.ExitOnError) + fs.StringVar(&o.NodeID, "nodeid", "", "node id") + fs.StringVar(&o.DriverName, "drivername", consts.DefaultDriverName, "name of the driver") + fs.Int64Var(&o.VolumeAttachLimit, "volume-attach-limit", -1, "maximum number of attachable volumes per node") + fs.Int64Var(&o.ReservedDataDiskSlotNum, "reserved-data-disk-slot-num", 0, "reserved data disk slot number per node") + fs.BoolVar(&o.EnablePerfOptimization, "enable-perf-optimization", false, "boolean flag to enable disk perf optimization") + fs.StringVar(&o.CloudConfigSecretName, "cloud-config-secret-name", "azure-cloud-provider", "cloud config secret name") + fs.StringVar(&o.CloudConfigSecretNamespace, "cloud-config-secret-namespace", "kube-system", "cloud config secret namespace") + fs.StringVar(&o.CustomUserAgent, "custom-user-agent", "", "custom userAgent") + fs.StringVar(&o.UserAgentSuffix, "user-agent-suffix", "", "userAgent suffix") + fs.BoolVar(&o.UseCSIProxyGAInterface, "use-csiproxy-ga-interface", true, "boolean flag to enable csi-proxy GA interface on Windows") + fs.BoolVar(&o.EnableOtelTracing, "enable-otel-tracing", false, "If set, enable opentelemetry tracing for the driver. The tracing is disabled by default. Configure the exporter endpoint with OTEL_EXPORTER_OTLP_ENDPOINT and other env variables, see https://opentelemetry.io/docs/specs/otel/configuration/sdk-environment-variables/#general-sdk-configuration.") + //only used in v1 + fs.BoolVar(&o.EnableDiskOnlineResize, "enable-disk-online-resize", true, "boolean flag to enable disk online resize") + fs.BoolVar(&o.AllowEmptyCloudConfig, "allow-empty-cloud-config", true, "Whether allow running driver without cloud config") + fs.BoolVar(&o.EnableListVolumes, "enable-list-volumes", false, "boolean flag to enable ListVolumes on controller") + fs.BoolVar(&o.EnableListSnapshots, "enable-list-snapshots", false, "boolean flag to enable ListSnapshots on controller") + fs.BoolVar(&o.SupportZone, "support-zone", true, "boolean flag to get zone info in NodeGetInfo") + fs.BoolVar(&o.GetNodeInfoFromLabels, "get-node-info-from-labels", false, "boolean flag to get zone info from node labels in NodeGetInfo") + fs.BoolVar(&o.EnableDiskCapacityCheck, "enable-disk-capacity-check", false, "boolean flag to enable volume capacity check in CreateVolume") + fs.BoolVar(&o.DisableUpdateCache, "disable-update-cache", false, "boolean flag to disable update cache during disk attach/detach") + fs.BoolVar(&o.EnableTrafficManager, "enable-traffic-manager", false, "boolean flag to enable traffic manager") + fs.Int64Var(&o.TrafficManagerPort, "traffic-manager-port", 7788, "default traffic manager port") + fs.Int64Var(&o.AttachDetachInitialDelayInMs, "attach-detach-initial-delay-ms", 1000, "initial delay in milliseconds for batch disk attach/detach") + fs.Int64Var(&o.VMSSCacheTTLInSeconds, "vmss-cache-ttl-seconds", -1, "vmss cache TTL in seconds (600 by default)") + fs.Int64Var(&o.VolStatsCacheExpireInMinutes, "vol-stats-cache-expire-in-minutes", 10, "The cache expire time in minutes for volume stats cache") + fs.StringVar(&o.VMType, "vm-type", "", "type of agent node. available values: vmss, standard") + fs.BoolVar(&o.EnableWindowsHostProcess, "enable-windows-host-process", false, "enable windows host process") + fs.BoolVar(&o.GetNodeIDFromIMDS, "get-nodeid-from-imds", false, "boolean flag to get NodeID from IMDS") + fs.BoolVar(&o.WaitForSnapshotReady, "wait-for-snapshot-ready", true, "boolean flag to wait for snapshot ready when creating snapshot in same region") + fs.BoolVar(&o.CheckDiskLUNCollision, "check-disk-lun-collision", true, "boolean flag to check disk lun collisio before attaching disk") + fs.BoolVar(&o.ForceDetachBackoff, "force-detach-backoff", true, "boolean flag to force detach in disk detach backoff") + fs.StringVar(&o.Kubeconfig, "kubeconfig", "", "Absolute path to the kubeconfig file. Required only when running out of cluster.") + fs.BoolVar(&o.DisableAVSetNodes, "disable-avset-nodes", false, "disable DisableAvailabilitySetNodes in cloud config for controller") + fs.StringVar(&o.Endpoint, "endpoint", "unix://tmp/csi.sock", "CSI endpoint") + + fs.StringVar(&o.KMSAddr, "kms-addr", "kms-service.kube-system:9000", "Address of Constellation's KMS. Used to request keys (default: kms.kube-system:9000") + + return fs +} diff --git a/pkg/azuredisk/azuredisk_option_test.go b/pkg/azuredisk/azuredisk_option_test.go new file mode 100644 index 000000000..2320ef4da --- /dev/null +++ b/pkg/azuredisk/azuredisk_option_test.go @@ -0,0 +1,37 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azuredisk + +import ( + "flag" + "reflect" + "testing" +) + +func TestDriverOptions_AddFlags(t *testing.T) { + o := &DriverOptions{} + typeInfo := reflect.TypeOf(*o) + + got := o.AddFlags() + count := 0 + got.VisitAll(func(f *flag.Flag) { + count++ + }) + if count != typeInfo.NumField() { + t.Errorf("DriverOptions.AddFlags() = %v, want %v", count, typeInfo.NumField()) + } +} diff --git a/pkg/azuredisk/azuredisk_test.go b/pkg/azuredisk/azuredisk_test.go index 5d60ab8de..f0d044a9a 100644 --- a/pkg/azuredisk/azuredisk_test.go +++ b/pkg/azuredisk/azuredisk_test.go @@ -25,18 +25,21 @@ import ( "testing" "time" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" - "github.com/Azure/go-autorest/autorest/date" - "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" + "go.uber.org/mock/gomock" + "golang.org/x/sync/errgroup" "google.golang.org/grpc/status" + "k8s.io/apimachinery/pkg/types" clientset "k8s.io/client-go/kubernetes" "k8s.io/utils/pointer" consts "sigs.k8s.io/azuredisk-csi-driver/pkg/azureconstants" - "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/mockdiskclient" - "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient/mocksnapshotclient" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient/mock_diskclient" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient/mock_azclient" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient/snapshotclient/mock_snapshotclient" + "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/mockvmclient" azure "sigs.k8s.io/cloud-provider-azure/pkg/provider" - "sigs.k8s.io/cloud-provider-azure/pkg/retry" ) func TestNewDriverV1(t *testing.T) { @@ -45,21 +48,27 @@ func TestNewDriverV1(t *testing.T) { DriverName: consts.DefaultDriverName, VolumeAttachLimit: 16, EnablePerfOptimization: false, + Kubeconfig: "", + AllowEmptyCloudConfig: true, }) assert.NotNil(t, d) } func TestCheckDiskCapacity(t *testing.T) { - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) size := int32(10) diskName := "unit-test" resourceGroup := "unit-test" - disk := compute.Disk{ - DiskProperties: &compute.DiskProperties{ + disk := &armcompute.Disk{ + Properties: &armcompute.DiskProperties{ DiskSizeGB: &size, }, } - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClientForSub("").Return(diskClient, nil).AnyTimes() + diskClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() flag, err := d.checkDiskCapacity(context.TODO(), "", resourceGroup, diskName, 10) assert.Equal(t, flag, true) assert.Nil(t, err) @@ -100,15 +109,33 @@ func TestRun(t *testing.T) { t.Setenv(consts.DefaultAzureCredentialFileEnv, fakeCredFile) - d, _ := NewFakeDriver(t) - d.Run("tcp://127.0.0.1:0", "", true, true) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) + ctx, cancelFn := context.WithCancel(context.Background()) + var routines errgroup.Group + routines.Go(func() error { return d.Run(ctx) }) + time.Sleep(time.Millisecond * 500) + cancelFn() + time.Sleep(time.Millisecond * 500) + err := routines.Wait() + assert.Nil(t, err) }, }, { name: "Successful run without cloud config", testFunc: func(t *testing.T) { - d, _ := NewFakeDriver(t) - d.Run("tcp://127.0.0.1:0", "", true, true) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) + ctx, cancelFn := context.WithCancel(context.Background()) + var routines errgroup.Group + routines.Go(func() error { return d.Run(ctx) }) + time.Sleep(time.Millisecond * 500) + cancelFn() + time.Sleep(time.Millisecond * 500) + err := routines.Wait() + assert.Nil(t, err) }, }, { @@ -126,10 +153,19 @@ func TestRun(t *testing.T) { t.Setenv(consts.DefaultAzureCredentialFileEnv, fakeCredFile) - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) d.setCloud(&azure.Cloud{}) d.setNodeID("") - d.Run("tcp://127.0.0.1:0", "", true, true) + ctx, cancelFn := context.WithCancel(context.Background()) + var routines errgroup.Group + routines.Go(func() error { return d.Run(ctx) }) + time.Sleep(time.Millisecond * 500) + cancelFn() + time.Sleep(time.Millisecond * 500) + err := routines.Wait() + assert.Nil(t, err) }, }, { @@ -155,8 +191,59 @@ func TestRun(t *testing.T) { EnablePerfOptimization: true, VMSSCacheTTLInSeconds: 10, VMType: "vmss", + Endpoint: "tcp://127.0.0.1:0", }) - d.Run("tcp://127.0.0.1:0", "", true, true) + ctx, cancelFn := context.WithCancel(context.Background()) + var routines errgroup.Group + routines.Go(func() error { return d.Run(ctx) }) + time.Sleep(time.Millisecond * 500) + cancelFn() + time.Sleep(time.Millisecond * 500) + err := routines.Wait() + assert.Nil(t, err) + }, + }, + { + name: "Successful run with federated workload identity azure client", + testFunc: func(t *testing.T) { + if err := os.WriteFile(fakeCredFile, []byte(fakeCredContent), 0666); err != nil { + t.Error(err) + } + + defer func() { + if err := os.Remove(fakeCredFile); err != nil { + t.Error(err) + } + }() + + t.Setenv(consts.DefaultAzureCredentialFileEnv, fakeCredFile) + t.Setenv("AZURE_TENANT_ID", "1234") + t.Setenv("AZURE_CLIENT_ID", "123456") + t.Setenv("AZURE_FEDERATED_TOKEN_FILE", "fake-token-file") + + d := newDriverV1(&DriverOptions{ + NodeID: "", + DriverName: consts.DefaultDriverName, + EnableListVolumes: true, + EnableListSnapshots: true, + EnablePerfOptimization: true, + VMSSCacheTTLInSeconds: 10, + VMType: "vmss", + Endpoint: "tcp://127.0.0.1:0", + }) + + ctx, cancel := context.WithCancel(context.Background()) + ch := make(chan error) + go func() { + err := d.Run(ctx) + ch <- err + }() + cancel() + assert.Nil(t, <-ch) + assert.Equal(t, d.cloud.UseFederatedWorkloadIdentityExtension, true) + assert.Equal(t, d.cloud.AADFederatedTokenFile, "fake-token-file") + assert.Equal(t, d.cloud.AADClientID, "123456") + assert.Equal(t, d.cloud.TenantID, "1234") }, }, } @@ -167,7 +254,9 @@ func TestRun(t *testing.T) { } func TestDriver_checkDiskExists(t *testing.T) { - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) _, err := d.checkDiskExists(context.TODO(), "testurl/subscriptions/12/providers/Microsoft.Compute/disks/name") assert.NotEqual(t, err, nil) } @@ -259,7 +348,7 @@ func TestGetDefaultDiskMBPSReadWrite(t *testing.T) { } } -func TestWaitForSnapshotCopy(t *testing.T) { +func TestWaitForSnapshot(t *testing.T) { testCases := []struct { name string testFunc func(t *testing.T) @@ -267,39 +356,42 @@ func TestWaitForSnapshotCopy(t *testing.T) { { name: "snapshotID not valid", testFunc: func(t *testing.T) { - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) subID := "subs" resourceGroup := "rg" intervel := 1 * time.Millisecond timeout := 10 * time.Millisecond snapshotID := "test" - snapshot := compute.Snapshot{ - SnapshotProperties: &compute.SnapshotProperties{}, - ID: &snapshotID} + snapshot := &armcompute.Snapshot{ + Properties: &armcompute.SnapshotProperties{}, + ID: &snapshotID} ctrl := gomock.NewController(t) defer ctrl.Finish() - mockSnapshotClient := mocksnapshotclient.NewMockInterface(ctrl) - d.getCloud().SnapshotsClient = mockSnapshotClient - rerr := &retry.Error{ - RawError: fmt.Errorf("invalid snapshotID"), - } - mockSnapshotClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(snapshot, rerr).AnyTimes() - err := d.waitForSnapshotCopy(context.Background(), subID, resourceGroup, snapshotID, intervel, timeout) + mockSnapshotClient := mock_snapshotclient.NewMockInterface(ctrl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetSnapshotClientForSub(subID).Return(mockSnapshotClient, nil).AnyTimes() + + mockSnapshotClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(snapshot, fmt.Errorf("invalid snapshotID")).AnyTimes() + err := d.waitForSnapshotReady(context.Background(), subID, resourceGroup, snapshotID, intervel, timeout) wantErr := true subErrMsg := "invalid snapshotID" if (err != nil) != wantErr { - t.Errorf("waitForSnapshotCopy() error = %v, wantErr %v", err, wantErr) + t.Errorf("waitForSnapshotReady() error = %v, wantErr %v", err, wantErr) } if err != nil && !strings.Contains(err.Error(), subErrMsg) { - t.Errorf("waitForSnapshotCopy() error = %v, wantErr %v", err, subErrMsg) + t.Errorf("waitForSnapshotReady() error = %v, wantErr %v", err, subErrMsg) } }, }, + //nolint:dupl { name: "timeout for waiting snapshot copy cross region", testFunc: func(t *testing.T) { - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) subID := "subs" resourceGroup := "rg" intervel := 1 * time.Millisecond @@ -309,37 +401,41 @@ func TestWaitForSnapshotCopy(t *testing.T) { snapshotID := "test" location := "loc" provisioningState := "succeeded" - snapshot := compute.Snapshot{ - SnapshotProperties: &compute.SnapshotProperties{ - TimeCreated: &date.Time{}, + snapshot := &armcompute.Snapshot{ + Properties: &armcompute.SnapshotProperties{ + TimeCreated: &time.Time{}, ProvisioningState: &provisioningState, DiskSizeGB: &DiskSize, - CreationData: &compute.CreationData{SourceResourceID: &volumeID}, - CompletionPercent: pointer.Float64(0.0), + CreationData: &armcompute.CreationData{SourceResourceID: &volumeID}, + CompletionPercent: pointer.Float32(0.0), }, Location: &location, ID: &snapshotID} ctrl := gomock.NewController(t) defer ctrl.Finish() - mockSnapshotClient := mocksnapshotclient.NewMockInterface(ctrl) - d.getCloud().SnapshotsClient = mockSnapshotClient - mockSnapshotClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(snapshot, nil).AnyTimes() - err := d.waitForSnapshotCopy(context.Background(), subID, resourceGroup, snapshotID, intervel, timeout) + mockSnapshotClient := mock_snapshotclient.NewMockInterface(ctrl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetSnapshotClientForSub(subID).Return(mockSnapshotClient, nil).AnyTimes() + + mockSnapshotClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(snapshot, nil).AnyTimes() + err := d.waitForSnapshotReady(context.Background(), subID, resourceGroup, snapshotID, intervel, timeout) wantErr := true subErrMsg := "timeout" if (err != nil) != wantErr { - t.Errorf("waitForSnapshotCopy() error = %v, wantErr %v", err, wantErr) + t.Errorf("waitForSnapshotReady() error = %v, wantErr %v", err, wantErr) } if err != nil && !strings.Contains(err.Error(), subErrMsg) { - t.Errorf("waitForSnapshotCopy() error = %v, wantErr %v", err, subErrMsg) + t.Errorf("waitForSnapshotReady() error = %v, wantErr %v", err, subErrMsg) } }, }, + //nolint:dupl { name: "succeed for waiting snapshot copy cross region", testFunc: func(t *testing.T) { - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) subID := "subs" resourceGroup := "rg" intervel := 1 * time.Millisecond @@ -349,30 +445,30 @@ func TestWaitForSnapshotCopy(t *testing.T) { snapshotID := "test" location := "loc" provisioningState := "succeeded" - snapshot := compute.Snapshot{ - SnapshotProperties: &compute.SnapshotProperties{ - TimeCreated: &date.Time{}, + snapshot := &armcompute.Snapshot{ + Properties: &armcompute.SnapshotProperties{ + TimeCreated: &time.Time{}, ProvisioningState: &provisioningState, DiskSizeGB: &DiskSize, - CreationData: &compute.CreationData{SourceResourceID: &volumeID}, - CompletionPercent: pointer.Float64(100.0), + CreationData: &armcompute.CreationData{SourceResourceID: &volumeID}, + CompletionPercent: pointer.Float32(100.0), }, Location: &location, ID: &snapshotID} ctrl := gomock.NewController(t) defer ctrl.Finish() - mockSnapshotClient := mocksnapshotclient.NewMockInterface(ctrl) - d.getCloud().SnapshotsClient = mockSnapshotClient - mockSnapshotClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(snapshot, nil).AnyTimes() - err := d.waitForSnapshotCopy(context.Background(), subID, resourceGroup, snapshotID, intervel, timeout) + mockSnapshotClient := mock_snapshotclient.NewMockInterface(ctrl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetSnapshotClientForSub(subID).Return(mockSnapshotClient, nil).AnyTimes() + mockSnapshotClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(snapshot, nil).AnyTimes() + err := d.waitForSnapshotReady(context.Background(), subID, resourceGroup, snapshotID, intervel, timeout) wantErr := false subErrMsg := "" if (err != nil) != wantErr { - t.Errorf("waitForSnapshotCopy() error = %v, wantErr %v", err, wantErr) + t.Errorf("waitForSnapshotReady() error = %v, wantErr %v", err, wantErr) } if err != nil && !strings.Contains(err.Error(), subErrMsg) { - t.Errorf("waitForSnapshotCopy() error = %v, wantErr %v", err, subErrMsg) + t.Errorf("waitForSnapshotReady() error = %v, wantErr %v", err, subErrMsg) } }, }, @@ -425,3 +521,71 @@ func TestGetVMSSInstanceName(t *testing.T) { } } } + +func TestGetUsedLunsFromVolumeAttachments(t *testing.T) { + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) + tests := []struct { + name string + nodeName string + expectedUsedLunList []int + expectedErr error + }{ + { + name: "nil kubeClient", + nodeName: "test-node", + expectedUsedLunList: nil, + expectedErr: fmt.Errorf("kubeClient or kubeClient.StorageV1() or kubeClient.StorageV1().VolumeAttachments() is nil"), + }, + } + for _, test := range tests { + result, err := d.getUsedLunsFromVolumeAttachments(context.Background(), test.nodeName) + if !reflect.DeepEqual(err, test.expectedErr) { + t.Errorf("test(%s): err(%v) != expected err(%v)", test.name, err, test.expectedErr) + } + if !reflect.DeepEqual(result, test.expectedUsedLunList) { + t.Errorf("test(%s): result(%v) != expected result(%v)", test.name, result, test.expectedUsedLunList) + } + } +} + +func TestGetUsedLunsFromNode(t *testing.T) { + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) + vm := compute.VirtualMachine{} + dataDisks := make([]compute.DataDisk, 2) + dataDisks[0] = compute.DataDisk{Lun: pointer.Int32(int32(0)), Name: &testVolumeName} + dataDisks[1] = compute.DataDisk{Lun: pointer.Int32(int32(2)), Name: &testVolumeName} + vm.VirtualMachineProperties = &compute.VirtualMachineProperties{ + StorageProfile: &compute.StorageProfile{ + DataDisks: &dataDisks, + }, + } + mockVMsClient := d.getCloud().VirtualMachinesClient.(*mockvmclient.MockInterface) + mockVMsClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(vm, nil).AnyTimes() + + tests := []struct { + name string + nodeName string + expectedUsedLunList []int + expectedErr error + }{ + { + name: "lun 0 and 2 are used", + nodeName: "test-node", + expectedUsedLunList: []int{0, 2}, + expectedErr: nil, + }, + } + for _, test := range tests { + result, err := d.getUsedLunsFromNode(types.NodeName(test.nodeName)) + if !reflect.DeepEqual(err, test.expectedErr) { + t.Errorf("test(%s): err(%v) != expected err(%v)", test.name, err, test.expectedErr) + } + if !reflect.DeepEqual(result, test.expectedUsedLunList) { + t.Errorf("test(%s): result(%v) != expected result(%v)", test.name, result, test.expectedUsedLunList) + } + } +} diff --git a/pkg/azuredisk/azuredisk_v1_test.go b/pkg/azuredisk/azuredisk_v1_test.go index 093e840bc..c0f1c5365 100644 --- a/pkg/azuredisk/azuredisk_v1_test.go +++ b/pkg/azuredisk/azuredisk_v1_test.go @@ -23,33 +23,40 @@ import ( "context" "testing" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" - "github.com/golang/mock/gomock" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "github.com/stretchr/testify/assert" + "go.uber.org/mock/gomock" consts "sigs.k8s.io/azuredisk-csi-driver/pkg/azureconstants" - "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/mockdiskclient" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient/mock_diskclient" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient/mock_azclient" ) func TestCheckDiskCapacity_V1(t *testing.T) { - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) size := int32(10) diskName := "unit-test" resourceGroup := "unit-test" - disk := compute.Disk{ - DiskProperties: &compute.DiskProperties{ + disk := &armcompute.Disk{ + Properties: &armcompute.DiskProperties{ DiskSizeGB: &size, }, } - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClientForSub("").Return(diskClient, nil).AnyTimes() + diskClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() - d.setDiskThrottlingCache(consts.ThrottlingKey, "") + d.setThrottlingCache(consts.GetDiskThrottlingKey, "") flag, _ := d.checkDiskCapacity(context.TODO(), "", resourceGroup, diskName, 11) assert.Equal(t, flag, true) } func TestDriver_checkDiskExists_V1(t *testing.T) { - d, _ := NewFakeDriver(t) - d.setDiskThrottlingCache(consts.ThrottlingKey, "") + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) + d.setThrottlingCache(consts.GetDiskThrottlingKey, "") _, err := d.checkDiskExists(context.TODO(), "testurl/subscriptions/12/resourceGroups/23/providers/Microsoft.Compute/disks/name") assert.Equal(t, err, nil) } diff --git a/pkg/azuredisk/azuredisk_v2.go b/pkg/azuredisk/azuredisk_v2.go index 391c44256..fa951ab4a 100644 --- a/pkg/azuredisk/azuredisk_v2.go +++ b/pkg/azuredisk/azuredisk_v2.go @@ -21,16 +21,19 @@ package azuredisk import ( "context" + "errors" "flag" "fmt" + "os" "reflect" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "github.com/container-storage-interface/spec/lib/go/csi" - + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" + "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - + "k8s.io/client-go/rest" "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/volume/util/hostutil" @@ -78,69 +81,73 @@ func newDriverV2(options *DriverOptions) *DriverV2 { driver.enableOtelTracing = options.EnableOtelTracing driver.ioHandler = azureutils.NewOSIOHandler() driver.hostUtil = hostutil.NewHostUtil() + driver.disableAVSetNodes = options.DisableAVSetNodes + driver.endpoint = options.Endpoint topologyKey = fmt.Sprintf("topology.%s/zone", driver.Name) - return &driver -} + userAgent := GetUserAgent(driver.Name, driver.customUserAgent, driver.userAgentSuffix) + klog.V(2).Infof("driver userAgent: %s", userAgent) -// Run driver initialization -func (d *DriverV2) Run(endpoint, kubeconfig string, disableAVSetNodes, testingMock bool) { - versionMeta, err := GetVersionYAML(d.Name) + kubeClient, err := azureutils.GetKubeClient(options.Kubeconfig) if err != nil { - klog.Fatalf("%v", err) + klog.Warningf("get kubeconfig(%s) failed with error: %v", options.Kubeconfig, err) + if !os.IsNotExist(err) && !errors.Is(err, rest.ErrNotInCluster) { + klog.Fatalf("failed to get KubeClient: %v", err) + } } - klog.Infof("\nDRIVER INFORMATION:\n-------------------\n%s\n\nStreaming logs below:", versionMeta) - - userAgent := GetUserAgent(d.Name, d.customUserAgent, d.userAgentSuffix) - klog.V(2).Infof("driver userAgent: %s", userAgent) + driver.kubeClient = kubeClient - cloud, err := azureutils.GetCloudProvider(context.Background(), kubeconfig, d.cloudConfigSecretName, d.cloudConfigSecretNamespace, - userAgent, d.allowEmptyCloudConfig, d.enableTrafficManager, d.trafficManagerPort) + cloud, err := azureutils.GetCloudProviderFromClient(context.Background(), kubeClient, driver.cloudConfigSecretName, driver.cloudConfigSecretNamespace, + userAgent, driver.allowEmptyCloudConfig, driver.enableTrafficManager, driver.trafficManagerPort) if err != nil { klog.Fatalf("failed to get Azure Cloud Provider, error: %v", err) } - d.cloud = cloud - - if d.cloud != nil { - if d.vmType != "" { - klog.V(2).Infof("override VMType(%s) in cloud config as %s", d.cloud.VMType, d.vmType) - d.cloud.VMType = d.vmType + driver.cloud = cloud + + if driver.cloud != nil { + driver.diskController = NewManagedDiskController(driver.cloud) + driver.diskController.DisableUpdateCache = driver.disableUpdateCache + driver.diskController.AttachDetachInitialDelayInMs = int(driver.attachDetachInitialDelayInMs) + driver.clientFactory = driver.cloud.ComputeClientFactory + if driver.vmType != "" { + klog.V(2).Infof("override VMType(%s) in cloud config as %s", driver.cloud.VMType, driver.vmType) + driver.cloud.VMType = driver.vmType } - if d.NodeID == "" { + if driver.NodeID == "" { // Disable UseInstanceMetadata for controller to mitigate a timeout issue using IMDS // https://github.com/kubernetes-sigs/azuredisk-csi-driver/issues/168 klog.V(2).Infof("disable UseInstanceMetadata for controller") - d.cloud.Config.UseInstanceMetadata = false + driver.cloud.Config.UseInstanceMetadata = false - if d.cloud.VMType == consts.VMTypeStandard && d.cloud.DisableAvailabilitySetNodes { - klog.V(2).Infof("set DisableAvailabilitySetNodes as false since VMType is %s", d.cloud.VMType) - d.cloud.DisableAvailabilitySetNodes = false + if driver.cloud.VMType == consts.VMTypeStandard && driver.cloud.DisableAvailabilitySetNodes { + klog.V(2).Infof("set DisableAvailabilitySetNodes as false since VMType is %s", driver.cloud.VMType) + driver.cloud.DisableAvailabilitySetNodes = false } - if d.cloud.VMType == consts.VMTypeVMSS && !d.cloud.DisableAvailabilitySetNodes && disableAVSetNodes { + if driver.cloud.VMType == consts.VMTypeVMSS && !driver.cloud.DisableAvailabilitySetNodes && driver.disableAVSetNodes { klog.V(2).Infof("DisableAvailabilitySetNodes for controller since current VMType is vmss") - d.cloud.DisableAvailabilitySetNodes = true + driver.cloud.DisableAvailabilitySetNodes = true } - klog.V(2).Infof("cloud: %s, location: %s, rg: %s, VMType: %s, PrimaryScaleSetName: %s, PrimaryAvailabilitySetName: %s, DisableAvailabilitySetNodes: %v", d.cloud.Cloud, d.cloud.Location, d.cloud.ResourceGroup, d.cloud.VMType, d.cloud.PrimaryScaleSetName, d.cloud.PrimaryAvailabilitySetName, d.cloud.DisableAvailabilitySetNodes) + klog.V(2).Infof("cloud: %s, location: %s, rg: %s, VMType: %s, PrimaryScaleSetName: %s, PrimaryAvailabilitySetName: %s, DisableAvailabilitySetNodes: %v", driver.cloud.Cloud, driver.cloud.Location, driver.cloud.ResourceGroup, driver.cloud.VMType, driver.cloud.PrimaryScaleSetName, driver.cloud.PrimaryAvailabilitySetName, driver.cloud.DisableAvailabilitySetNodes) } } - d.deviceHelper = optimization.NewSafeDeviceHelper() + driver.deviceHelper = optimization.NewSafeDeviceHelper() - if d.getPerfOptimizationEnabled() { - d.nodeInfo, err = optimization.NewNodeInfo(context.TODO(), d.getCloud(), d.NodeID) + if driver.getPerfOptimizationEnabled() { + driver.nodeInfo, err = optimization.NewNodeInfo(context.TODO(), driver.getCloud(), driver.NodeID) if err != nil { klog.Errorf("Failed to get node info. Error: %v", err) } } - d.mounter, err = mounter.NewSafeMounter(d.enableWindowsHostProcess, d.useCSIProxyGAInterface) + driver.mounter, err = mounter.NewSafeMounter(driver.enableWindowsHostProcess, driver.useCSIProxyGAInterface) if err != nil { klog.Fatalf("Failed to get safe mounter. Error: %v", err) } - d.AddControllerServiceCapabilities( + driver.AddControllerServiceCapabilities( []csi.ControllerServiceCapability_RPC_Type{ csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME, csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME, @@ -152,27 +159,61 @@ func (d *DriverV2) Run(endpoint, kubeconfig string, disableAVSetNodes, testingMo csi.ControllerServiceCapability_RPC_LIST_VOLUMES_PUBLISHED_NODES, csi.ControllerServiceCapability_RPC_SINGLE_NODE_MULTI_WRITER, }) - d.AddVolumeCapabilityAccessModes( + driver.AddVolumeCapabilityAccessModes( []csi.VolumeCapability_AccessMode_Mode{ csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY, csi.VolumeCapability_AccessMode_SINGLE_NODE_SINGLE_WRITER, csi.VolumeCapability_AccessMode_SINGLE_NODE_MULTI_WRITER, }) - d.AddNodeServiceCapabilities([]csi.NodeServiceCapability_RPC_Type{ + driver.AddNodeServiceCapabilities([]csi.NodeServiceCapability_RPC_Type{ csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME, csi.NodeServiceCapability_RPC_EXPAND_VOLUME, csi.NodeServiceCapability_RPC_GET_VOLUME_STATS, csi.NodeServiceCapability_RPC_SINGLE_NODE_MULTI_WRITER, }) + return &driver +} - s := csicommon.NewNonBlockingGRPCServer() +// Run driver initialization +func (d *DriverV2) Run(ctx context.Context) error { + versionMeta, err := GetVersionYAML(d.Name) + if err != nil { + klog.Fatalf("%v", err) + } + klog.Infof("\nDRIVER INFORMATION:\n-------------------\n%s\n\nStreaming logs below:", versionMeta) + + grpcInterceptor := grpc.UnaryInterceptor(csicommon.LogGRPC) + if d.enableOtelTracing { + grpcInterceptor = grpc.ChainUnaryInterceptor(csicommon.LogGRPC, otelgrpc.UnaryServerInterceptor()) + } + opts := []grpc.ServerOption{ + grpcInterceptor, + } + s := grpc.NewServer(opts...) + csi.RegisterIdentityServer(s, d) + csi.RegisterControllerServer(s, d) + csi.RegisterNodeServer(s, d) + + go func() { + //graceful shutdown + <-ctx.Done() + s.GracefulStop() + }() // Driver d act as IdentityServer, ControllerServer and NodeServer - s.Start(endpoint, d, d, d, testingMock, d.enableOtelTracing) - s.Wait() + listener, err := csicommon.Listen(ctx, d.endpoint) + if err != nil { + klog.Fatalf("failed to listen to endpoint, error: %v", err) + } + err = s.Serve(listener) + if errors.Is(err, grpc.ErrServerStopped) { + klog.Infof("gRPC server stopped serving") + return nil + } + return err } -func (d *DriverV2) checkDiskExists(ctx context.Context, diskURI string) (*compute.Disk, error) { +func (d *DriverV2) checkDiskExists(ctx context.Context, diskURI string) (*armcompute.Disk, error) { diskName, err := azureutils.GetDiskName(diskURI) if err != nil { return nil, err @@ -184,21 +225,30 @@ func (d *DriverV2) checkDiskExists(ctx context.Context, diskURI string) (*comput } subsID := azureutils.GetSubscriptionIDFromURI(diskURI) - disk, rerr := d.cloud.DisksClient.Get(ctx, subsID, resourceGroup, diskName) - if rerr != nil { - return nil, rerr.Error() + diskClient, err := d.clientFactory.GetDiskClientForSub(subsID) + if err != nil { + return nil, err + } + + disk, err := diskClient.Get(ctx, resourceGroup, diskName) + if err != nil { + return nil, err } - return &disk, nil + return disk, nil } func (d *DriverV2) checkDiskCapacity(ctx context.Context, subsID, resourceGroup, diskName string, requestGiB int) (bool, error) { - disk, err := d.cloud.DisksClient.Get(ctx, subsID, resourceGroup, diskName) + diskClient, err := d.clientFactory.GetDiskClientForSub(subsID) + if err != nil { + return false, err + } + disk, err := diskClient.Get(ctx, resourceGroup, diskName) // Because we can not judge the reason of the error. Maybe the disk does not exist. // So here we do not handle the error. if err == nil { - if !reflect.DeepEqual(disk, compute.Disk{}) && disk.DiskSizeGB != nil && int(*disk.DiskSizeGB) != requestGiB { - return false, status.Errorf(codes.AlreadyExists, "the request volume already exists, but its capacity(%v) is different from (%v)", *disk.DiskProperties.DiskSizeGB, requestGiB) + if !reflect.DeepEqual(disk, &armcompute.Disk{}) && disk.Properties != nil && disk.Properties.DiskSizeGB != nil && int(*disk.Properties.DiskSizeGB) != requestGiB { + return false, status.Errorf(codes.AlreadyExists, "the request volume already exists, but its capacity(%v) is different from (%v)", *disk.Properties.DiskSizeGB, requestGiB) } } return true, nil diff --git a/pkg/azuredisk/controllerserver.go b/pkg/azuredisk/controllerserver.go index 9994cae02..3abe22673 100644 --- a/pkg/azuredisk/controllerserver.go +++ b/pkg/azuredisk/controllerserver.go @@ -25,7 +25,8 @@ import ( "strings" "time" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "github.com/container-storage-interface/spec/lib/go/csi" "google.golang.org/grpc/codes" @@ -42,13 +43,16 @@ import ( "sigs.k8s.io/azuredisk-csi-driver/pkg/azureutils" "sigs.k8s.io/azuredisk-csi-driver/pkg/optimization" volumehelper "sigs.k8s.io/azuredisk-csi-driver/pkg/util" + azureconsts "sigs.k8s.io/cloud-provider-azure/pkg/consts" "sigs.k8s.io/cloud-provider-azure/pkg/metrics" azure "sigs.k8s.io/cloud-provider-azure/pkg/provider" ) const ( - waitForSnapshotCopyInterval = 5 * time.Second - waitForSnapshotCopyTimeout = 10 * time.Minute + waitForSnapshotReadyInterval = 5 * time.Second + waitForSnapshotReadyTimeout = 10 * time.Minute + maxErrMsgLength = 990 + checkDiskLunThrottleLatency = 1 * time.Second ) // listVolumeStatus explains the return status of `listVolumesByResourceGroup` @@ -79,8 +83,8 @@ func (d *Driver) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) return nil, status.Error(codes.InvalidArgument, "CreateVolume Volume capabilities must be provided") } - if !azureutils.IsValidVolumeCapabilities(volCaps, diskParams.MaxShares) { - return nil, status.Error(codes.InvalidArgument, "Volume capability not supported") + if err := azureutils.IsValidVolumeCapabilities(volCaps, diskParams.MaxShares); err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) } isAdvancedPerfProfile := strings.EqualFold(diskParams.PerfProfile, consts.PerfProfileAdvanced) // If perfProfile is set to advanced and no/invalid device settings are provided, fail the request @@ -98,7 +102,13 @@ func (d *Driver) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) capacityBytes := req.GetCapacityRange().GetRequiredBytes() volSizeBytes := int64(capacityBytes) requestGiB := int(volumehelper.RoundUpGiB(volSizeBytes)) + + if diskParams.PerformancePlus != nil && *diskParams.PerformancePlus && requestGiB < consts.PerformancePlusMinimumDiskSizeGiB { + klog.Warningf("using PerformancePlus, increasing requested disk size from %vGiB to %vGiB (minimal size for PerformancePlus feature)", requestGiB, consts.PerformancePlusMinimumDiskSizeGiB) + requestGiB = consts.PerformancePlusMinimumDiskSizeGiB + } if requestGiB < consts.MinimumDiskSizeGiB { + klog.Infof("increasing requested disk size from %vGiB to %vGiB (minimal disk size)", requestGiB, consts.MinimumDiskSizeGiB) requestGiB = consts.MinimumDiskSizeGiB } @@ -112,13 +122,26 @@ func (d *Driver) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) } localCloud := d.cloud + localDiskController := d.diskController if diskParams.UserAgent != "" { - localCloud, err = azureutils.GetCloudProvider(ctx, d.kubeconfig, d.cloudConfigSecretName, d.cloudConfigSecretNamespace, diskParams.UserAgent, + localCloud, err = azureutils.GetCloudProviderFromClient(ctx, d.kubeClient, d.cloudConfigSecretName, d.cloudConfigSecretNamespace, diskParams.UserAgent, d.allowEmptyCloudConfig, d.enableTrafficManager, d.trafficManagerPort) if err != nil { return nil, status.Errorf(codes.Internal, "create cloud with UserAgent(%s) failed with: (%s)", diskParams.UserAgent, err) } + localDiskController = &ManagedDiskController{ + controllerCommon: &controllerCommon{ + cloud: localCloud, + lockMap: newLockMap(), + DisableDiskLunCheck: true, + clientFactory: localCloud.ComputeClientFactory, + ForceDetachBackoff: d.forceDetachBackoff, + }, + } + localDiskController.DisableUpdateCache = d.disableUpdateCache + localDiskController.AttachDetachInitialDelayInMs = int(d.attachDetachInitialDelayInMs) + } if azureutils.IsAzureStackCloud(localCloud.Config.Cloud, localCloud.Config.DisableAzureStackCloud) { if diskParams.MaxShares > 1 { @@ -144,7 +167,7 @@ func (d *Driver) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) if _, err := azureutils.NormalizeCachingMode(diskParams.CachingMode); err != nil { return nil, status.Error(codes.InvalidArgument, err.Error()) } - if skuName == compute.PremiumV2LRS { + if skuName == armcompute.DiskStorageAccountTypesPremiumV2LRS { // PremiumV2LRS only supports None caching mode azureutils.SetKeyValueInMap(diskParams.VolumeContext, consts.CachingModeField, string(v1.AzureDataDiskCachingNone)) } @@ -163,31 +186,8 @@ func (d *Driver) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) return nil, status.Error(codes.InvalidArgument, err.Error()) } - requirement := req.GetAccessibilityRequirements() - diskZone := azureutils.PickAvailabilityZone(requirement, diskParams.Location, topologyKey) + diskZone := azureutils.PickAvailabilityZone(req.GetAccessibilityRequirements(), diskParams.Location, topologyKey) accessibleTopology := []*csi.Topology{} - if skuName == compute.StandardSSDZRS || skuName == compute.PremiumZRS { - klog.V(2).Infof("diskZone(%s) is reset as empty since disk(%s) is ZRS(%s)", diskZone, diskParams.DiskName, skuName) - diskZone = "" - // make volume scheduled on all 3 availability zones - for i := 1; i <= 3; i++ { - topology := &csi.Topology{ - Segments: map[string]string{topologyKey: fmt.Sprintf("%s-%d", diskParams.Location, i)}, - } - accessibleTopology = append(accessibleTopology, topology) - } - // make volume scheduled on all non-zone nodes - topology := &csi.Topology{ - Segments: map[string]string{topologyKey: ""}, - } - accessibleTopology = append(accessibleTopology, topology) - } else { - accessibleTopology = []*csi.Topology{ - { - Segments: map[string]string{topologyKey: diskZone}, - }, - } - } if d.enableDiskCapacityCheck { if ok, err := d.checkDiskCapacity(ctx, diskParams.SubscriptionID, diskParams.ResourceGroup, diskParams.DiskName, requestGiB); !ok { @@ -195,16 +195,13 @@ func (d *Driver) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) } } - klog.V(2).Infof("begin to create azure disk(%s) account type(%s) rg(%s) location(%s) size(%d) diskZone(%v) maxShares(%d)", - diskParams.DiskName, skuName, diskParams.ResourceGroup, diskParams.Location, requestGiB, diskZone, diskParams.MaxShares) - contentSource := &csi.VolumeContentSource{} if strings.EqualFold(diskParams.WriteAcceleratorEnabled, consts.TrueValue) { diskParams.Tags[azure.WriteAcceleratorEnabled] = consts.TrueValue } - sourceID := "" - sourceType := "" + var sourceID, sourceType string + metricsRequest := "controller_create_volume" content := req.GetVolumeContentSource() if content != nil { if content.GetSnapshot() != nil { @@ -217,6 +214,7 @@ func (d *Driver) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) }, }, } + metricsRequest = "controller_create_volume_from_snapshot" } else { sourceID = content.GetVolume().GetVolumeId() sourceType = consts.SourceVolume @@ -228,13 +226,52 @@ func (d *Driver) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) }, } subsID := azureutils.GetSubscriptionIDFromURI(sourceID) - if sourceGiB, _ := d.GetSourceDiskSize(ctx, subsID, diskParams.ResourceGroup, path.Base(sourceID), 0, consts.SourceDiskSearchMaxDepth); sourceGiB != nil && *sourceGiB < int32(requestGiB) { - diskParams.VolumeContext[consts.ResizeRequired] = strconv.FormatBool(true) + sourceGiB, disk, err := d.GetSourceDiskSize(ctx, subsID, diskParams.ResourceGroup, path.Base(sourceID), 0, consts.SourceDiskSearchMaxDepth) + if err == nil { + if sourceGiB != nil && *sourceGiB < int32(requestGiB) { + diskParams.VolumeContext[consts.ResizeRequired] = strconv.FormatBool(true) + klog.V(2).Infof("source disk(%s) size(%d) is less than requested size(%d), set resizeRequired as true", sourceID, *sourceGiB, requestGiB) + } + if disk != nil && len(disk.Zones) == 1 { + if disk.Zones[0] != nil { + diskZone = fmt.Sprintf("%s-%s", diskParams.Location, *disk.Zones[0]) + klog.V(2).Infof("source disk(%s) is in zone(%s), set diskZone as %s", sourceID, *disk.Zones[0], diskZone) + } + } + } else { + klog.Warningf("failed to get source disk(%s) size, err: %v", sourceID, err) } + metricsRequest = "controller_create_volume_from_volume" + } + } + + if strings.HasSuffix(strings.ToLower(string(skuName)), "zrs") { + klog.V(2).Infof("diskZone(%s) is reset as empty since disk(%s) is ZRS(%s)", diskZone, diskParams.DiskName, skuName) + diskZone = "" + // make volume scheduled on all 3 availability zones + for i := 1; i <= 3; i++ { + topology := &csi.Topology{ + Segments: map[string]string{topologyKey: fmt.Sprintf("%s-%d", diskParams.Location, i)}, + } + accessibleTopology = append(accessibleTopology, topology) + } + // make volume scheduled on all non-zone nodes + topology := &csi.Topology{ + Segments: map[string]string{topologyKey: ""}, + } + accessibleTopology = append(accessibleTopology, topology) + } else { + accessibleTopology = []*csi.Topology{ + { + Segments: map[string]string{topologyKey: diskZone}, + }, } } - if skuName == compute.UltraSSDLRS { + klog.V(2).Infof("begin to create azure disk(%s) account type(%s) rg(%s) location(%s) size(%d) diskZone(%v) maxShares(%d)", + diskParams.DiskName, skuName, diskParams.ResourceGroup, diskParams.Location, requestGiB, diskZone, diskParams.MaxShares) + + if skuName == armcompute.DiskStorageAccountTypesUltraSSDLRS { if diskParams.DiskIOPSReadWrite == "" && diskParams.DiskMBPSReadWrite == "" { // set default DiskIOPSReadWrite, DiskMBPSReadWrite per request size diskParams.DiskIOPSReadWrite = strconv.Itoa(getDefaultDiskIOPSReadWrite(requestGiB)) @@ -244,7 +281,7 @@ func (d *Driver) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) } diskParams.VolumeContext[consts.RequestedSizeGib] = strconv.Itoa(requestGiB) - volumeOptions := &azure.ManagedDiskOptions{ + volumeOptions := &ManagedDiskOptions{ AvailabilityZone: diskZone, BurstingEnabled: diskParams.EnableBursting, DiskEncryptionSetID: diskParams.DiskEncryptionSetID, @@ -276,13 +313,13 @@ func (d *Driver) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) } var diskURI string - mc := metrics.NewMetricContext(consts.AzureDiskCSIDriverName, "controller_create_volume", d.cloud.ResourceGroup, d.cloud.SubscriptionID, d.Name) + mc := metrics.NewMetricContext(consts.AzureDiskCSIDriverName, metricsRequest, d.cloud.ResourceGroup, d.cloud.SubscriptionID, d.Name) isOperationSucceeded := false defer func() { mc.ObserveOperationWithResult(isOperationSucceeded, consts.VolumeID, diskURI) }() - diskURI, err = localCloud.CreateManagedDisk(ctx, volumeOptions) + diskURI, err = localDiskController.CreateManagedDisk(ctx, volumeOptions) if err != nil { if strings.Contains(err.Error(), consts.NotFound) { return nil, status.Error(codes.NotFound, err.Error()) @@ -333,7 +370,7 @@ func (d *Driver) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequest) }() klog.V(2).Infof("deleting azure disk(%s)", diskURI) - err := d.cloud.DeleteManagedDisk(ctx, diskURI) + err := d.diskController.DeleteManagedDisk(ctx, diskURI) klog.V(2).Infof("delete azure disk(%s) returned with %v", diskURI, err) isOperationSucceeded = (err == nil) return &csi.DeleteVolumeResponse{}, err @@ -344,6 +381,11 @@ func (d *Driver) ControllerGetVolume(context.Context, *csi.ControllerGetVolumeRe return nil, status.Error(codes.Unimplemented, "") } +// ControllerModifyVolume modify volume +func (d *Driver) ControllerModifyVolume(context.Context, *csi.ControllerModifyVolumeRequest) (*csi.ControllerModifyVolumeResponse, error) { + return nil, status.Error(codes.Unimplemented, "") +} + // ControllerPublishVolume attach an azure disk to a required node func (d *Driver) ControllerPublishVolume(ctx context.Context, req *csi.ControllerPublishVolumeRequest) (*csi.ControllerPublishVolumeResponse, error) { diskURI := req.GetVolumeId() @@ -362,8 +404,8 @@ func (d *Driver) ControllerPublishVolume(ctx context.Context, req *csi.Controlle return nil, status.Error(codes.InvalidArgument, "MaxShares value not supported") } - if !azureutils.IsValidVolumeCapabilities(caps, maxShares) { - return nil, status.Error(codes.InvalidArgument, "Volume capability not supported") + if err := azureutils.IsValidVolumeCapabilities(caps, maxShares); err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) } disk, err := d.checkDiskExists(ctx, diskURI) @@ -388,7 +430,7 @@ func (d *Driver) ControllerPublishVolume(ctx context.Context, req *csi.Controlle mc.ObserveOperationWithResult(isOperationSucceeded, consts.VolumeID, diskURI, consts.Node, string(nodeName)) }() - lun, vmState, err := d.cloud.GetDiskLun(diskName, diskURI, nodeName) + lun, vmState, err := d.diskController.GetDiskLun(diskName, diskURI, nodeName) if err == cloudprovider.InstanceNotFound { return nil, status.Error(codes.NotFound, fmt.Sprintf("failed to get azure instance id for node %q (%v)", nodeName, err)) } @@ -408,30 +450,30 @@ func (d *Driver) ControllerPublishVolume(ctx context.Context, req *csi.Controlle if err == nil { if vmState != nil && strings.ToLower(*vmState) == "failed" { klog.Warningf("VM(%s) is in failed state, update VM first", nodeName) - if err := d.cloud.UpdateVM(ctx, nodeName); err != nil { + if err := d.diskController.UpdateVM(ctx, nodeName); err != nil { return nil, status.Errorf(codes.Internal, "update instance %q failed with %v", nodeName, err) } } // Volume is already attached to node. klog.V(2).Infof("Attach operation is successful. volume %s is already attached to node %s at lun %d.", diskURI, nodeName, lun) } else { - if strings.Contains(strings.ToLower(err.Error()), strings.ToLower(consts.TooManyRequests)) || - strings.Contains(strings.ToLower(err.Error()), consts.ClientThrottled) { - return nil, status.Errorf(codes.Internal, err.Error()) + if !strings.Contains(err.Error(), azureconsts.CannotFindDiskLUN) { + return nil, status.Errorf(codes.Internal, "could not get disk lun for volume %s: %v", diskURI, err) } - var cachingMode compute.CachingTypes + var cachingMode armcompute.CachingTypes if cachingMode, err = azureutils.GetCachingMode(volumeContext); err != nil { return nil, status.Errorf(codes.Internal, err.Error()) } + + occupiedLuns := d.getOccupiedLunsFromNode(ctx, nodeName, diskURI) klog.V(2).Infof("Trying to attach volume %s to node %s", diskURI, nodeName) - asyncAttach := isAsyncAttachEnabled(d.enableAsyncAttach, volumeContext) attachDiskInitialDelay := azureutils.GetAttachDiskInitialDelay(volumeContext) if attachDiskInitialDelay > 0 { klog.V(2).Infof("attachDiskInitialDelayInMs is set to %d", attachDiskInitialDelay) - d.cloud.AttachDetachInitialDelayInMs = attachDiskInitialDelay + d.diskController.AttachDetachInitialDelayInMs = attachDiskInitialDelay } - lun, err = d.cloud.AttachDisk(ctx, asyncAttach, diskName, diskURI, nodeName, cachingMode, disk) + lun, err = d.diskController.AttachDisk(ctx, diskName, diskURI, nodeName, cachingMode, disk, occupiedLuns) if err == nil { klog.V(2).Infof("Attach operation successful: volume %s attached to node %s.", diskURI, nodeName) } else { @@ -442,15 +484,19 @@ func (d *Driver) ControllerPublishVolume(ctx context.Context, req *csi.Controlle return nil, err } klog.Warningf("volume %s is already attached to node %s, try detach first", diskURI, derr.CurrentNode) - if err = d.cloud.DetachDisk(ctx, diskName, diskURI, derr.CurrentNode); err != nil { + if err = d.diskController.DetachDisk(ctx, diskName, diskURI, derr.CurrentNode); err != nil { return nil, status.Errorf(codes.Internal, "Could not detach volume %s from node %s: %v", diskURI, derr.CurrentNode, err) } klog.V(2).Infof("Trying to attach volume %s to node %s again", diskURI, nodeName) - lun, err = d.cloud.AttachDisk(ctx, asyncAttach, diskName, diskURI, nodeName, cachingMode, disk) + lun, err = d.diskController.AttachDisk(ctx, diskName, diskURI, nodeName, cachingMode, disk, occupiedLuns) } if err != nil { klog.Errorf("Attach volume %s to instance %s failed with %v", diskURI, nodeName, err) - return nil, status.Errorf(codes.Internal, "Attach volume %s to instance %s failed with %v", diskURI, nodeName, err) + errMsg := fmt.Sprintf("Attach volume %s to instance %s failed with %v", diskURI, nodeName, err) + if len(errMsg) > maxErrMsgLength { + errMsg = errMsg[:maxErrMsgLength] + } + return nil, status.Errorf(codes.Internal, errMsg) } } klog.V(2).Infof("attach volume %s to node %s successfully", diskURI, nodeName) @@ -493,11 +539,16 @@ func (d *Driver) ControllerUnpublishVolume(ctx context.Context, req *csi.Control klog.V(2).Infof("Trying to detach volume %s from node %s", diskURI, nodeID) - if err := d.cloud.DetachDisk(ctx, diskName, diskURI, nodeName); err != nil { + if err := d.diskController.DetachDisk(ctx, diskName, diskURI, nodeName); err != nil { if strings.Contains(err.Error(), consts.ErrDiskNotFound) { klog.Warningf("volume %s already detached from node %s", diskURI, nodeID) } else { - return nil, status.Errorf(codes.Internal, "Could not detach volume %s from node %s: %v", diskURI, nodeID, err) + klog.Errorf("Could not detach volume %s from node %s: %v", diskURI, nodeID, err) + errMsg := fmt.Sprintf("Could not detach volume %s from node %s: %v", diskURI, nodeID, err) + if len(errMsg) > maxErrMsgLength { + errMsg = errMsg[:maxErrMsgLength] + } + return nil, status.Errorf(codes.Internal, errMsg) } } klog.V(2).Infof("detach volume %s from node %s successfully", diskURI, nodeID) @@ -524,8 +575,8 @@ func (d *Driver) ValidateVolumeCapabilities(ctx context.Context, req *csi.Valida return nil, status.Error(codes.InvalidArgument, "MaxShares value not supported") } - if !azureutils.IsValidVolumeCapabilities(volumeCapabilities, maxShares) { - return &csi.ValidateVolumeCapabilitiesResponse{Message: "VolumeCapabilities are invalid"}, nil + if err := azureutils.IsValidVolumeCapabilities(volumeCapabilities, maxShares); err != nil { + return &csi.ValidateVolumeCapabilitiesResponse{Message: err.Error()}, nil } if _, err := d.checkDiskExists(ctx, diskURI); err != nil { @@ -538,15 +589,51 @@ func (d *Driver) ValidateVolumeCapabilities(ctx context.Context, req *csi.Valida }}, nil } +// getOccupiedLunsFromNode returns the occupied luns from node +func (d *Driver) getOccupiedLunsFromNode(ctx context.Context, nodeName types.NodeName, diskURI string) []int { + var occupiedLuns []int + if d.checkDiskLUNCollision && !d.isCheckDiskLunThrottled() { + timer := time.AfterFunc(checkDiskLunThrottleLatency, func() { + klog.Warningf("checkDiskLun(%s) on node %s took longer than %v, disable disk lun check temporarily", diskURI, nodeName, checkDiskLunThrottleLatency) + d.checkDiskLunThrottlingCache.Set(consts.CheckDiskLunThrottlingKey, "") + }) + now := time.Now() + if usedLunsFromVA, err := d.getUsedLunsFromVolumeAttachments(ctx, string(nodeName)); err == nil { + if len(usedLunsFromVA) > 0 { + if usedLunsFromNode, err := d.getUsedLunsFromNode(nodeName); err == nil { + occupiedLuns = volumehelper.GetElementsInArray1NotInArray2(usedLunsFromVA, usedLunsFromNode) + if len(occupiedLuns) > 0 { + klog.Warningf("node: %s, usedLuns from VolumeAttachments: %v, usedLuns from Node: %v, occupiedLuns: %v, disk: %s", nodeName, usedLunsFromVA, usedLunsFromNode, occupiedLuns, diskURI) + } else { + klog.V(6).Infof("node: %s, usedLuns from VolumeAttachments: %v, usedLuns from Node: %v, occupiedLuns: %v, disk: %s", nodeName, usedLunsFromVA, usedLunsFromNode, occupiedLuns, diskURI) + } + } else { + klog.Warningf("getUsedLunsFromNode(%s, %s) failed with %v", nodeName, diskURI, err) + } + } + } else { + klog.Warningf("getUsedLunsFromVolumeAttachments(%s, %s) failed with %v", nodeName, diskURI, err) + } + latency := time.Since(now) + if latency > checkDiskLunThrottleLatency { + klog.Warningf("checkDiskLun(%s) on node %s took %v (limit: %v)", diskURI, nodeName, latency, checkDiskLunThrottleLatency) + } else { + timer.Stop() // cancel the timer + klog.V(6).Infof("checkDiskLun(%s) on node %s took %v", diskURI, nodeName, latency) + } + } + return occupiedLuns +} + // ControllerGetCapabilities returns the capabilities of the Controller plugin -func (d *Driver) ControllerGetCapabilities(ctx context.Context, req *csi.ControllerGetCapabilitiesRequest) (*csi.ControllerGetCapabilitiesResponse, error) { +func (d *Driver) ControllerGetCapabilities(_ context.Context, _ *csi.ControllerGetCapabilitiesRequest) (*csi.ControllerGetCapabilitiesResponse, error) { return &csi.ControllerGetCapabilitiesResponse{ Capabilities: d.Cap, }, nil } // GetCapacity returns the capacity of the total available storage pool -func (d *Driver) GetCapacity(ctx context.Context, req *csi.GetCapacityRequest) (*csi.GetCapacityResponse, error) { +func (d *Driver) GetCapacity(_ context.Context, _ *csi.GetCapacityRequest) (*csi.GetCapacityResponse, error) { return nil, status.Error(codes.Unimplemented, "") } @@ -681,7 +768,8 @@ func (d *Driver) listVolumesInNodeResourceGroup(ctx context.Context, start, maxE // listVolumesByResourceGroup is a helper function that updates the ListVolumeResponse_Entry slice and returns number of total visited volumes, number of volumes that needs to be visited and an error if found func (d *Driver) listVolumesByResourceGroup(ctx context.Context, resourceGroup string, entries []*csi.ListVolumesResponse_Entry, start, maxEntries int, volSet map[string]bool) listVolumeStatus { - disks, derr := d.cloud.DisksClient.ListByResourceGroup(ctx, "", resourceGroup) + diskClient := d.clientFactory.GetDiskClient() + disks, derr := diskClient.List(ctx, resourceGroup) if derr != nil { return listVolumeStatus{err: status.Errorf(codes.Internal, "ListVolumes on rg(%s) failed with error: %v", resourceGroup, derr.Error())} } @@ -717,7 +805,7 @@ func (d *Driver) listVolumesByResourceGroup(ctx context.Context, resourceGroup s continue } // HyperVGeneration property is only setup for os disks. Only the non os disks should be included in the list - if disk.DiskProperties == nil || disk.DiskProperties.HyperVGeneration == "" { + if disk.Properties == nil || disk.Properties.HyperVGeneration == nil || *disk.Properties.HyperVGeneration == "" { nodeList := []string{} if disk.ManagedBy != nil { @@ -775,14 +863,18 @@ func (d *Driver) ControllerExpandVolume(ctx context.Context, req *csi.Controller } subsID := azureutils.GetSubscriptionIDFromURI(diskURI) - result, rerr := d.cloud.DisksClient.Get(ctx, subsID, resourceGroup, diskName) + diskClient, err := d.clientFactory.GetDiskClientForSub(subsID) + if err != nil { + return nil, status.Errorf(codes.Internal, "could not get disk client for subscription(%s) with error(%v)", subsID, err) + } + result, rerr := diskClient.Get(ctx, resourceGroup, diskName) if rerr != nil { return nil, status.Errorf(codes.Internal, "could not get the disk(%s) under rg(%s) with error(%v)", diskName, resourceGroup, rerr.Error()) } - if result.DiskProperties.DiskSizeGB == nil { + if result.Properties == nil || result.Properties.DiskSizeGB == nil { return nil, status.Errorf(codes.Internal, "could not get size of the disk(%s)", diskName) } - oldSize := *resource.NewQuantity(int64(*result.DiskProperties.DiskSizeGB), resource.BinarySI) + oldSize := *resource.NewQuantity(int64(*result.Properties.DiskSizeGB), resource.BinarySI) mc := metrics.NewMetricContext(consts.AzureDiskCSIDriverName, "controller_expand_volume", d.cloud.ResourceGroup, d.cloud.SubscriptionID, d.Name) isOperationSucceeded := false @@ -791,7 +883,7 @@ func (d *Driver) ControllerExpandVolume(ctx context.Context, req *csi.Controller }() klog.V(2).Infof("begin to expand azure disk(%s) with new size(%d)", diskURI, requestSize.Value()) - newSize, err := d.cloud.ResizeDisk(ctx, diskURI, oldSize, requestSize, d.enableDiskOnlineResize) + newSize, err := d.diskController.ResizeDisk(ctx, diskURI, oldSize, requestSize, d.enableDiskOnlineResize) if err != nil { return nil, status.Errorf(codes.Internal, "failed to resize disk(%s) with error(%v)", diskURI, err) } @@ -846,7 +938,7 @@ func (d *Driver) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotRequ location = v case consts.UserAgentField: newUserAgent := v - localCloud, err = azureutils.GetCloudProvider(ctx, d.kubeconfig, d.cloudConfigSecretName, d.cloudConfigSecretNamespace, newUserAgent, + localCloud, err = azureutils.GetCloudProviderFromClient(ctx, d.kubeClient, d.cloudConfigSecretName, d.cloudConfigSecretNamespace, newUserAgent, d.allowEmptyCloudConfig, d.enableTrafficManager, d.trafficManagerPort) if err != nil { return nil, status.Errorf(codes.Internal, "create cloud with UserAgent(%s) failed with: (%s)", newUserAgent, err) @@ -882,10 +974,10 @@ func (d *Driver) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotRequ tags[k] = &value } - snapshot := compute.Snapshot{ - SnapshotProperties: &compute.SnapshotProperties{ - CreationData: &compute.CreationData{ - CreateOption: compute.Copy, + snapshot := armcompute.Snapshot{ + Properties: &armcompute.SnapshotProperties{ + CreationData: &armcompute.CreationData{ + CreateOption: to.Ptr(armcompute.DiskCreateOptionCopy), SourceResourceID: &sourceVolumeID, }, Incremental: &incremental, @@ -898,7 +990,7 @@ func (d *Driver) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotRequ if err := azureutils.ValidateDataAccessAuthMode(dataAccessAuthMode); err != nil { return nil, status.Error(codes.InvalidArgument, err.Error()) } - snapshot.SnapshotProperties.DataAccessAuthMode = compute.DataAccessAuthMode(dataAccessAuthMode) + snapshot.Properties.DataAccessAuthMode = to.Ptr(armcompute.DataAccessAuthMode(dataAccessAuthMode)) } if acquired := d.volumeLocks.TryAcquire(snapshotName); !acquired { @@ -916,20 +1008,34 @@ func (d *Driver) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotRequ } } - mc := metrics.NewMetricContext(consts.AzureDiskCSIDriverName, "controller_create_snapshot", d.cloud.ResourceGroup, d.cloud.SubscriptionID, d.Name) + metricsRequest := "controller_create_snapshot" + if crossRegionSnapshotName != "" { + metricsRequest = "controller_create_snapshot_cross_region" + } + mc := metrics.NewMetricContext(consts.AzureDiskCSIDriverName, metricsRequest, d.cloud.ResourceGroup, d.cloud.SubscriptionID, d.Name) isOperationSucceeded := false defer func() { mc.ObserveOperationWithResult(isOperationSucceeded, consts.SourceResourceID, sourceVolumeID, consts.SnapshotName, snapshotName) }() klog.V(2).Infof("begin to create snapshot(%s, incremental: %v) under rg(%s) region(%s)", snapshotName, incremental, resourceGroup, d.cloud.Location) - if rerr := d.cloud.SnapshotsClient.CreateOrUpdate(ctx, subsID, resourceGroup, snapshotName, snapshot); rerr != nil { - if strings.Contains(rerr.Error().Error(), "existing disk") { - return nil, status.Error(codes.AlreadyExists, fmt.Sprintf("request snapshot(%s) under rg(%s) already exists, but the SourceVolumeId is different, error details: %v", snapshotName, resourceGroup, rerr.Error())) + snapshotClient, err := d.clientFactory.GetSnapshotClientForSub(subsID) + if err != nil { + return nil, status.Errorf(codes.Internal, "could not get snapshot client for subscription(%s) with error(%v)", subsID, err) + } + if _, err := snapshotClient.CreateOrUpdate(ctx, resourceGroup, snapshotName, snapshot); err != nil { + if strings.Contains(err.Error(), "existing disk") { + return nil, status.Error(codes.AlreadyExists, fmt.Sprintf("request snapshot(%s) under rg(%s) already exists, but the SourceVolumeId is different, error details: %v", snapshotName, resourceGroup, err)) } - azureutils.SleepIfThrottled(rerr.Error(), consts.SnapshotOpThrottlingSleepSec) - return nil, status.Error(codes.Internal, fmt.Sprintf("create snapshot error: %v", rerr.Error())) + azureutils.SleepIfThrottled(err, consts.SnapshotOpThrottlingSleepSec) + return nil, status.Error(codes.Internal, fmt.Sprintf("create snapshot error: %v", err.Error())) + } + + if d.shouldWaitForSnapshotReady { + if err := d.waitForSnapshotReady(ctx, subsID, resourceGroup, snapshotName, waitForSnapshotReadyInterval, waitForSnapshotReadyTimeout); err != nil { + return nil, status.Error(codes.Internal, fmt.Sprintf("waitForSnapshotReady(%s, %s, %s) failed with %v", subsID, resourceGroup, snapshotName, err)) + } } klog.V(2).Infof("create snapshot(%s) under rg(%s) region(%s) successfully", snapshotName, resourceGroup, d.cloud.Location) @@ -940,35 +1046,35 @@ func (d *Driver) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotRequ if crossRegionSnapshotName != "" { copySnapshot := snapshot - if copySnapshot.SnapshotProperties == nil { - copySnapshot.SnapshotProperties = &compute.SnapshotProperties{} + if copySnapshot.Properties == nil { + copySnapshot.Properties = &armcompute.SnapshotProperties{} } - if copySnapshot.SnapshotProperties.CreationData == nil { - copySnapshot.SnapshotProperties.CreationData = &compute.CreationData{} + if copySnapshot.Properties.CreationData == nil { + copySnapshot.Properties.CreationData = &armcompute.CreationData{} } - copySnapshot.SnapshotProperties.CreationData.SourceResourceID = &csiSnapshot.SnapshotId - copySnapshot.SnapshotProperties.CreationData.CreateOption = compute.CopyStart + copySnapshot.Properties.CreationData.SourceResourceID = &csiSnapshot.SnapshotId + copySnapshot.Properties.CreationData.CreateOption = to.Ptr(armcompute.DiskCreateOptionCopyStart) copySnapshot.Location = &location klog.V(2).Infof("begin to create snapshot(%s, incremental: %v) under rg(%s) region(%s)", crossRegionSnapshotName, incremental, resourceGroup, location) - if rerr := d.cloud.SnapshotsClient.CreateOrUpdate(ctx, subsID, resourceGroup, crossRegionSnapshotName, copySnapshot); rerr != nil { - if strings.Contains(rerr.Error().Error(), "existing disk") { - return nil, status.Error(codes.AlreadyExists, fmt.Sprintf("request snapshot(%s) under rg(%s) already exists, but the SourceVolumeId is different, error details: %v", crossRegionSnapshotName, resourceGroup, rerr.Error())) + if _, err := snapshotClient.CreateOrUpdate(ctx, resourceGroup, crossRegionSnapshotName, copySnapshot); err != nil { + if strings.Contains(err.Error(), "existing disk") { + return nil, status.Error(codes.AlreadyExists, fmt.Sprintf("request snapshot(%s) under rg(%s) already exists, but the SourceVolumeId is different, error details: %v", crossRegionSnapshotName, resourceGroup, err)) } - azureutils.SleepIfThrottled(rerr.Error(), consts.SnapshotOpThrottlingSleepSec) - return nil, status.Error(codes.Internal, fmt.Sprintf("create snapshot error: %v", rerr.Error())) + azureutils.SleepIfThrottled(err, consts.SnapshotOpThrottlingSleepSec) + return nil, status.Error(codes.Internal, fmt.Sprintf("create snapshot error: %v", err)) } klog.V(2).Infof("create snapshot(%s) under rg(%s) region(%s) successfully", crossRegionSnapshotName, resourceGroup, location) - if err := d.waitForSnapshotCopy(ctx, subsID, resourceGroup, crossRegionSnapshotName, waitForSnapshotCopyInterval, waitForSnapshotCopyTimeout); err != nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("failed to wait for snapshot copy cross region: %v", err.Error())) + if err := d.waitForSnapshotReady(ctx, subsID, resourceGroup, crossRegionSnapshotName, waitForSnapshotReadyInterval, waitForSnapshotReadyTimeout); err != nil { + return nil, status.Error(codes.Internal, fmt.Sprintf("waitForSnapshotReady(%s, %s, %s) failed with %v", subsID, resourceGroup, crossRegionSnapshotName, err)) } klog.V(2).Infof("begin to delete snapshot(%s) under rg(%s) region(%s)", snapshotName, resourceGroup, d.cloud.Location) - if rerr := d.cloud.SnapshotsClient.Delete(ctx, subsID, resourceGroup, snapshotName); rerr != nil { - klog.Errorf("delete snapshot error: %v", rerr.Error()) - azureutils.SleepIfThrottled(rerr.Error(), consts.SnapshotOpThrottlingSleepSec) + if err = snapshotClient.Delete(ctx, resourceGroup, snapshotName); err != nil { + klog.Errorf("delete snapshot error: %v", err) + azureutils.SleepIfThrottled(err, consts.SnapshotOpThrottlingSleepSec) } else { klog.V(2).Infof("delete snapshot(%s) under rg(%s) region(%s) successfully", snapshotName, resourceGroup, d.cloud.Location) } @@ -1012,9 +1118,13 @@ func (d *Driver) DeleteSnapshot(ctx context.Context, req *csi.DeleteSnapshotRequ }() klog.V(2).Infof("begin to delete snapshot(%s) under rg(%s)", snapshotName, resourceGroup) - if rerr := d.cloud.SnapshotsClient.Delete(ctx, subsID, resourceGroup, snapshotName); rerr != nil { - azureutils.SleepIfThrottled(rerr.Error(), consts.SnapshotOpThrottlingSleepSec) - return nil, status.Error(codes.Internal, fmt.Sprintf("delete snapshot error: %v", rerr.Error())) + snapshotClient, err := d.clientFactory.GetSnapshotClientForSub(subsID) + if err != nil { + return nil, status.Errorf(codes.Internal, "could not get snapshot client for subscription(%s) with error(%v)", subsID, err) + } + if err := snapshotClient.Delete(ctx, resourceGroup, snapshotName); err != nil { + azureutils.SleepIfThrottled(err, consts.SnapshotOpThrottlingSleepSec) + return nil, status.Error(codes.Internal, fmt.Sprintf("delete snapshot error: %v", err)) } klog.V(2).Infof("delete snapshot(%s) under rg(%s) successfully", snapshotName, resourceGroup) isOperationSucceeded = true @@ -1042,9 +1152,9 @@ func (d *Driver) ListSnapshots(ctx context.Context, req *csi.ListSnapshotsReques } return listSnapshotResp, nil } - + snapshotClient := d.clientFactory.GetSnapshotClient() // no SnapshotId is set, return all snapshots that satisfy the request. - snapshots, err := d.cloud.SnapshotsClient.ListByResourceGroup(ctx, "", d.cloud.ResourceGroup) + snapshots, err := snapshotClient.List(ctx, d.cloud.ResourceGroup) if err != nil { return nil, status.Error(codes.Internal, fmt.Sprintf("Unknown list snapshot error: %v", err.Error())) } @@ -1061,40 +1171,47 @@ func (d *Driver) getSnapshotByID(ctx context.Context, subsID, resourceGroup, sna return nil, status.Errorf(codes.Internal, err.Error()) } } - - snapshot, rerr := d.cloud.SnapshotsClient.Get(ctx, subsID, resourceGroup, snapshotName) - if rerr != nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("get snapshot %s from rg(%s) error: %v", snapshotName, resourceGroup, rerr.Error())) + snapshotClient, err := d.clientFactory.GetSnapshotClientForSub(subsID) + if err != nil { + return nil, status.Errorf(codes.Internal, "could not get snapshot client for subscription(%s) with error(%v)", subsID, err) + } + snapshot, err := snapshotClient.Get(ctx, resourceGroup, snapshotName) + if err != nil { + return nil, status.Error(codes.Internal, fmt.Sprintf("get snapshot %s from rg(%s) error: %v", snapshotName, resourceGroup, err)) } - return azureutils.GenerateCSISnapshot(sourceVolumeID, &snapshot) + return azureutils.GenerateCSISnapshot(sourceVolumeID, snapshot) } // GetSourceDiskSize recursively searches for the sourceDisk and returns: sourceDisk disk size, error -func (d *Driver) GetSourceDiskSize(ctx context.Context, subsID, resourceGroup, diskName string, curDepth, maxDepth int) (*int32, error) { +func (d *Driver) GetSourceDiskSize(ctx context.Context, subsID, resourceGroup, diskName string, curDepth, maxDepth int) (*int32, *armcompute.Disk, error) { if curDepth > maxDepth { - return nil, status.Error(codes.Internal, fmt.Sprintf("current depth (%d) surpassed the max depth (%d) while searching for the source disk size", curDepth, maxDepth)) + return nil, nil, status.Error(codes.Internal, fmt.Sprintf("current depth (%d) surpassed the max depth (%d) while searching for the source disk size", curDepth, maxDepth)) } - result, rerr := d.cloud.DisksClient.Get(ctx, subsID, resourceGroup, diskName) - if rerr != nil { - return nil, rerr.Error() + diskClient, err := d.clientFactory.GetDiskClientForSub(subsID) + if err != nil { + return nil, nil, status.Error(codes.Internal, err.Error()) } - if result.DiskProperties == nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("DiskProperty not found for disk (%s) in resource group (%s)", diskName, resourceGroup)) + result, err := diskClient.Get(ctx, resourceGroup, diskName) + if err != nil { + return nil, result, err + } + if result.Properties == nil { + return nil, result, status.Error(codes.Internal, fmt.Sprintf("DiskProperty not found for disk (%s) in resource group (%s)", diskName, resourceGroup)) } - if result.DiskProperties.CreationData != nil && (*result.DiskProperties.CreationData).CreateOption == "Copy" { + if result.Properties.CreationData != nil && result.Properties.CreationData.CreateOption != nil && *result.Properties.CreationData.CreateOption == armcompute.DiskCreateOptionCopy { klog.V(2).Infof("Clone source disk has a parent source") - sourceResourceID := *result.DiskProperties.CreationData.SourceResourceID + sourceResourceID := *result.Properties.CreationData.SourceResourceID parentResourceGroup, _ := azureutils.GetResourceGroupFromURI(sourceResourceID) parentDiskName := path.Base(sourceResourceID) return d.GetSourceDiskSize(ctx, subsID, parentResourceGroup, parentDiskName, curDepth+1, maxDepth) } - if (*result.DiskProperties).DiskSizeGB == nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("DiskSizeGB for disk (%s) in resourcegroup (%s) is nil", diskName, resourceGroup)) + if (*result.Properties).DiskSizeGB == nil { + return nil, result, status.Error(codes.Internal, fmt.Sprintf("DiskSizeGB for disk (%s) in resourcegroup (%s) is nil", diskName, resourceGroup)) } - return (*result.DiskProperties).DiskSizeGB, nil + return (*result.Properties).DiskSizeGB, result, nil } // The format of snapshot id is /subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Compute/snapshots/snapshot-xxx-xxx. @@ -1110,18 +1227,3 @@ func (d *Driver) getSnapshotInfo(snapshotID string) (snapshotName, resourceGroup } return snapshotName, resourceGroup, subsID, err } - -func isAsyncAttachEnabled(defaultValue bool, volumeContext map[string]string) bool { - for k, v := range volumeContext { - switch strings.ToLower(k) { - case consts.EnableAsyncAttachField: - if strings.EqualFold(v, consts.TrueValue) { - return true - } - if strings.EqualFold(v, consts.FalseValue) { - return false - } - } - } - return defaultValue -} diff --git a/pkg/azuredisk/controllerserver_test.go b/pkg/azuredisk/controllerserver_test.go index f6868ffcc..6547d0f6e 100644 --- a/pkg/azuredisk/controllerserver_test.go +++ b/pkg/azuredisk/controllerserver_test.go @@ -21,12 +21,14 @@ import ( "fmt" "reflect" "testing" + "time" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" - "github.com/Azure/go-autorest/autorest/date" "github.com/container-storage-interface/spec/lib/go/csi" - "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" + "go.uber.org/mock/gomock" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "google.golang.org/protobuf/types/known/timestamppb" @@ -37,8 +39,9 @@ import ( "sigs.k8s.io/azuredisk-csi-driver/pkg/azuredisk/mockkubeclient" "sigs.k8s.io/azuredisk-csi-driver/pkg/azuredisk/mockpersistentvolume" volumehelper "sigs.k8s.io/azuredisk-csi-driver/pkg/util" - "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/mockdiskclient" - "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient/mocksnapshotclient" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient/mock_diskclient" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient/mock_azclient" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient/snapshotclient/mock_snapshotclient" "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/mockvmclient" azure "sigs.k8s.io/cloud-provider-azure/pkg/provider" "sigs.k8s.io/cloud-provider-azure/pkg/retry" @@ -67,7 +70,9 @@ func TestCreateVolume(t *testing.T) { { name: " invalid ", testFunc: func(t *testing.T) { - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) d.setControllerCapabilities([]*csi.ControllerServiceCapability{}) req := &csi.CreateVolumeRequest{} @@ -81,7 +86,9 @@ func TestCreateVolume(t *testing.T) { { name: " volume name missing", testFunc: func(t *testing.T) { - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) req := &csi.CreateVolumeRequest{} _, err := d.CreateVolume(context.Background(), req) expectedErr := status.Error(codes.InvalidArgument, "CreateVolume Name must be provided") @@ -93,7 +100,9 @@ func TestCreateVolume(t *testing.T) { { name: "volume capabilities missing", testFunc: func(t *testing.T) { - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) req := &csi.CreateVolumeRequest{ Name: "unit-test", } @@ -107,7 +116,9 @@ func TestCreateVolume(t *testing.T) { { name: "require volume size exceed", testFunc: func(t *testing.T) { - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) stdCapacityRange = &csi.CapacityRange{ RequiredBytes: volumehelper.GiBToBytes(15), LimitBytes: volumehelper.GiBToBytes(10), @@ -127,7 +138,9 @@ func TestCreateVolume(t *testing.T) { { name: "logical sector size parse error", testFunc: func(t *testing.T) { - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) mp := make(map[string]string) mp[consts.LogicalSectorSizeField] = "aaa" req := &csi.CreateVolumeRequest{ @@ -145,7 +158,9 @@ func TestCreateVolume(t *testing.T) { { name: "maxshare parse error ", testFunc: func(t *testing.T) { - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) mp := make(map[string]string) mp[consts.MaxSharesField] = "aaa" req := &csi.CreateVolumeRequest{ @@ -163,7 +178,9 @@ func TestCreateVolume(t *testing.T) { { name: "maxshare invalid value ", testFunc: func(t *testing.T) { - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) mp := make(map[string]string) mp[consts.MaxSharesField] = "0" req := &csi.CreateVolumeRequest{ @@ -181,7 +198,9 @@ func TestCreateVolume(t *testing.T) { { name: "invalid perf profile", testFunc: func(t *testing.T) { - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) mp := make(map[string]string) mp[consts.PerfProfileField] = "blah" req := &csi.CreateVolumeRequest{ @@ -199,7 +218,9 @@ func TestCreateVolume(t *testing.T) { { name: "Volume capability not supported ", testFunc: func(t *testing.T) { - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) mp := make(map[string]string) mp[consts.MaxSharesField] = "1" mp[consts.SkuNameField] = "ut" @@ -217,7 +238,7 @@ func TestCreateVolume(t *testing.T) { Parameters: mp, } _, err := d.CreateVolume(context.Background(), req) - expectedErr := status.Error(codes.InvalidArgument, "Volume capability not supported") + expectedErr := status.Error(codes.InvalidArgument, "mountVolume is not supported for access mode: MULTI_NODE_MULTI_WRITER") if !reflect.DeepEqual(err, expectedErr) { t.Errorf("actualErr: (%v), expectedErr: (%v)", err, expectedErr) } @@ -226,7 +247,9 @@ func TestCreateVolume(t *testing.T) { { name: "normalize storageaccounttype error ", testFunc: func(t *testing.T) { - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) mp := make(map[string]string) mp[consts.StorageAccountTypeField] = "NOT_EXISTING" req := &csi.CreateVolumeRequest{ @@ -244,7 +267,9 @@ func TestCreateVolume(t *testing.T) { { name: "normalize cache mode error ", testFunc: func(t *testing.T) { - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) mp := make(map[string]string) mp[consts.CachingModeField] = "WriteOnly" req := &csi.CreateVolumeRequest{ @@ -262,7 +287,9 @@ func TestCreateVolume(t *testing.T) { { name: "custom tags error ", testFunc: func(t *testing.T) { - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) mp := make(map[string]string) mp["tags"] = "unit-test" req := &csi.CreateVolumeRequest{ @@ -270,10 +297,12 @@ func TestCreateVolume(t *testing.T) { VolumeCapabilities: createVolumeCapabilities(csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER), Parameters: mp, } - disk := compute.Disk{ - DiskProperties: &compute.DiskProperties{}, + disk := &armcompute.Disk{ + Properties: &armcompute.DiskProperties{}, } - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClientForSub(gomock.Any()).Return(diskClient, nil).AnyTimes() + diskClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() _, err := d.CreateVolume(context.Background(), req) expectedErr := status.Error(codes.InvalidArgument, "Failed parsing disk parameters: Tags 'unit-test' are invalid, the format should like: 'key1=value1,key2=value2'") if !reflect.DeepEqual(err, expectedErr) { @@ -284,7 +313,9 @@ func TestCreateVolume(t *testing.T) { { name: "create managed disk error ", testFunc: func(t *testing.T) { - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) mp := make(map[string]string) mp["tags"] = "unit=test" volumeSnapshotSource := &csi.VolumeContentSource_SnapshotSource{ @@ -302,16 +333,15 @@ func TestCreateVolume(t *testing.T) { Parameters: mp, VolumeContentSource: &volumecontensource, } - disk := compute.Disk{ - DiskProperties: &compute.DiskProperties{}, + disk := &armcompute.Disk{ + Properties: &armcompute.DiskProperties{}, } - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() - rerr := &retry.Error{ - RawError: fmt.Errorf("test"), - } - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().CreateOrUpdate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(rerr).AnyTimes() + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClientForSub(gomock.Any()).Return(diskClient, nil).AnyTimes() + diskClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() + diskClient.EXPECT().CreateOrUpdate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("test")).AnyTimes() _, err := d.CreateVolume(context.Background(), req) - expectedErr := status.Errorf(codes.Internal, "Retriable: false, RetryAfter: 0s, HTTPStatusCode: 0, RawError: test") + expectedErr := status.Errorf(codes.Internal, "test") if err.Error() != expectedErr.Error() { t.Errorf("actualErr: (%v), expectedErr: (%v)", err, expectedErr) } @@ -320,7 +350,9 @@ func TestCreateVolume(t *testing.T) { { name: "create managed disk not found error ", testFunc: func(t *testing.T) { - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) mp := make(map[string]string) volumeContentSourceSnapshotSource := &csi.VolumeContentSource_Snapshot{} volumecontensource := csi.VolumeContentSource{ @@ -332,16 +364,15 @@ func TestCreateVolume(t *testing.T) { Parameters: mp, VolumeContentSource: &volumecontensource, } - disk := compute.Disk{ - DiskProperties: &compute.DiskProperties{}, - } - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() - rerr := &retry.Error{ - RawError: fmt.Errorf(consts.NotFound), + disk := &armcompute.Disk{ + Properties: &armcompute.DiskProperties{}, } - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().CreateOrUpdate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(rerr).AnyTimes() + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClientForSub(gomock.Any()).Return(diskClient, nil).AnyTimes() + diskClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() + diskClient.EXPECT().CreateOrUpdate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf(consts.NotFound)).AnyTimes() _, err := d.CreateVolume(context.Background(), req) - expectedErr := status.Error(codes.NotFound, "Retriable: false, RetryAfter: 0s, HTTPStatusCode: 0, RawError: NotFound") + expectedErr := status.Error(codes.NotFound, "NotFound") if !reflect.DeepEqual(err, expectedErr) { t.Errorf("actualErr: (%v), expectedErr: (%v)", err, expectedErr) } @@ -350,7 +381,9 @@ func TestCreateVolume(t *testing.T) { { name: "valid request ZRS", testFunc: func(t *testing.T) { - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) mp := make(map[string]string) mp[consts.SkuNameField] = "StandardSSD_ZRS" stdCapacityRangetest := &csi.CapacityRange{ @@ -366,16 +399,18 @@ func TestCreateVolume(t *testing.T) { size := int32(volumehelper.BytesToGiB(req.CapacityRange.RequiredBytes)) id := fmt.Sprintf(consts.ManagedDiskPath, "subs", "rg", testVolumeName) state := "Succeeded" - disk := compute.Disk{ + disk := &armcompute.Disk{ ID: &id, Name: &testVolumeName, - DiskProperties: &compute.DiskProperties{ + Properties: &armcompute.DiskProperties{ DiskSizeGB: &size, ProvisioningState: &state, }, } - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().CreateOrUpdate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClientForSub(gomock.Any()).Return(diskClient, nil).AnyTimes() + diskClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() + diskClient.EXPECT().CreateOrUpdate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() _, err := d.CreateVolume(context.Background(), req) expectedErr := error(nil) if !reflect.DeepEqual(err, expectedErr) { @@ -386,7 +421,9 @@ func TestCreateVolume(t *testing.T) { { name: "valid request", testFunc: func(t *testing.T) { - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) stdCapacityRangetest := &csi.CapacityRange{ RequiredBytes: volumehelper.GiBToBytes(10), LimitBytes: volumehelper.GiBToBytes(15), @@ -399,16 +436,18 @@ func TestCreateVolume(t *testing.T) { size := int32(volumehelper.BytesToGiB(req.CapacityRange.RequiredBytes)) id := fmt.Sprintf(consts.ManagedDiskPath, "subs", "rg", testVolumeName) state := "Succeeded" - disk := compute.Disk{ + disk := &armcompute.Disk{ ID: &id, Name: &testVolumeName, - DiskProperties: &compute.DiskProperties{ + Properties: &armcompute.DiskProperties{ DiskSizeGB: &size, ProvisioningState: &state, }, } - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().CreateOrUpdate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClientForSub(gomock.Any()).Return(diskClient, nil).AnyTimes() + diskClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() + diskClient.EXPECT().CreateOrUpdate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() _, err := d.CreateVolume(context.Background(), req) expectedErr := error(nil) if !reflect.DeepEqual(err, expectedErr) { @@ -419,7 +458,9 @@ func TestCreateVolume(t *testing.T) { { name: "invalid parameter", testFunc: func(t *testing.T) { - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) stdCapacityRangetest := &csi.CapacityRange{ RequiredBytes: volumehelper.GiBToBytes(10), LimitBytes: volumehelper.GiBToBytes(15), @@ -440,7 +481,9 @@ func TestCreateVolume(t *testing.T) { { name: "[Failure] advanced perfProfile fails if no device settings provided", testFunc: func(t *testing.T) { - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) d.setPerfOptimizationEnabled(true) stdCapacityRangetest := &csi.CapacityRange{ RequiredBytes: volumehelper.GiBToBytes(10), @@ -459,6 +502,45 @@ func TestCreateVolume(t *testing.T) { } }, }, + { + name: "valid PerformancePlus request, disk resizes to min required size", + testFunc: func(t *testing.T) { + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) + stdCapacityRangetest := &csi.CapacityRange{ + RequiredBytes: volumehelper.GiBToBytes(10), + LimitBytes: volumehelper.GiBToBytes(514), + } + req := &csi.CreateVolumeRequest{ + Name: testVolumeName, + VolumeCapabilities: stdVolumeCapabilities, + CapacityRange: stdCapacityRangetest, + Parameters: map[string]string{consts.PerformancePlusField: "true"}, + } + size := int32(volumehelper.BytesToGiB(req.CapacityRange.RequiredBytes)) + id := fmt.Sprintf(consts.ManagedDiskPath, "subs", "rg", testVolumeName) + state := "Succeeded" + disk := &armcompute.Disk{ + ID: &id, + Name: &testVolumeName, + Properties: &armcompute.DiskProperties{ + DiskSizeGB: &size, + ProvisioningState: &state, + }, + } + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClientForSub(gomock.Any()).Return(diskClient, nil).AnyTimes() + diskClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() + diskClient.EXPECT().CreateOrUpdate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() + res, err := d.CreateVolume(context.Background(), req) + assert.Equal(t, res.Volume.CapacityBytes, volumehelper.GiBToBytes(consts.PerformancePlusMinimumDiskSizeGiB)) + expectedErr := error(nil) + if !reflect.DeepEqual(err, expectedErr) { + t.Errorf("actualErr: (%v), expectedErr: (%v)", err, expectedErr) + } + }, + }, } for _, tc := range testCases { t.Run(tc.name, tc.testFunc) @@ -466,7 +548,9 @@ func TestCreateVolume(t *testing.T) { } func TestDeleteVolume(t *testing.T) { - d, err := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, err := NewFakeDriver(cntl) if err != nil { t.Fatalf("Error getting driver: %v", err) } @@ -505,12 +589,13 @@ func TestDeleteVolume(t *testing.T) { ctx, cancel := context.WithCancel(context.TODO()) defer cancel() id := test.req.VolumeId - disk := compute.Disk{ + disk := &armcompute.Disk{ ID: &id, } - - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().Get(gomock.Eq(ctx), gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().Delete(gomock.Eq(ctx), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClientForSub(gomock.Any()).Return(diskClient, nil).AnyTimes() + diskClient.EXPECT().Get(gomock.Eq(ctx), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() + diskClient.EXPECT().Delete(gomock.Eq(ctx), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() result, err := d.DeleteVolume(ctx, test.req) if err != nil { @@ -523,7 +608,9 @@ func TestDeleteVolume(t *testing.T) { } func TestControllerGetVolume(t *testing.T) { - d, err := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, err := NewFakeDriver(cntl) if err != nil { t.Fatalf("Error getting driver: %v", err) } @@ -536,7 +623,9 @@ func TestControllerGetVolume(t *testing.T) { } func TestGetSnapshotInfo(t *testing.T) { - d, err := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, err := NewFakeDriver(cntl) if err != nil { t.Fatalf("Error getting driver: %v", err) } @@ -591,7 +680,9 @@ func TestControllerPublishVolume(t *testing.T) { volumeCapWrong := &csi.VolumeCapability{ AccessType: &csi.VolumeCapability_Mount{Mount: &csi.VolumeCapability_MountVolume{}}, AccessMode: &csi.VolumeCapability_AccessMode{Mode: 10}} - d, err := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, err := NewFakeDriver(cntl) nodeName := "unit-test-node" //d.setCloud(&azure.Cloud{}) if err != nil { @@ -604,7 +695,9 @@ func TestControllerPublishVolume(t *testing.T) { { name: "Volume ID missing", testFunc: func(t *testing.T) { - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) req := &csi.ControllerPublishVolumeRequest{} expectedErr := status.Error(codes.InvalidArgument, "Volume ID not provided") _, err := d.ControllerPublishVolume(context.Background(), req) @@ -616,7 +709,9 @@ func TestControllerPublishVolume(t *testing.T) { { name: "Volume capability missing", testFunc: func(t *testing.T) { - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) req := &csi.ControllerPublishVolumeRequest{ VolumeId: "vol_1", } @@ -630,12 +725,14 @@ func TestControllerPublishVolume(t *testing.T) { { name: "Volume capability not supported", testFunc: func(t *testing.T) { - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) req := &csi.ControllerPublishVolumeRequest{ VolumeId: "vol_1", VolumeCapability: volumeCapWrong, } - expectedErr := status.Error(codes.InvalidArgument, "Volume capability not supported") + expectedErr := status.Error(codes.InvalidArgument, "invalid access mode: [mount:<> access_mode: ]") _, err := d.ControllerPublishVolume(context.Background(), req) if !reflect.DeepEqual(err, expectedErr) { t.Errorf("actualErr: (%v), expectedErr: (%v)", err, expectedErr) @@ -664,14 +761,14 @@ func TestControllerPublishVolume(t *testing.T) { VolumeCapability: volumeCap, } id := req.VolumeId - disk := compute.Disk{ + disk := &armcompute.Disk{ ID: &id, } ctrl := gomock.NewController(t) defer ctrl.Finish() - mockDiskClient := mockdiskclient.NewMockInterface(ctrl) - d.getCloud().DisksClient = mockDiskClient - mockDiskClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClientForSub(gomock.Any()).Return(diskClient, nil).AnyTimes() + diskClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() expectedErr := status.Error(codes.InvalidArgument, "Node ID not provided") _, err := d.ControllerPublishVolume(context.Background(), req) @@ -689,14 +786,14 @@ func TestControllerPublishVolume(t *testing.T) { NodeId: nodeName, } id := req.VolumeId - disk := compute.Disk{ + disk := &armcompute.Disk{ ID: &id, } ctrl := gomock.NewController(t) defer ctrl.Finish() - mockDiskClient := mockdiskclient.NewMockInterface(ctrl) - d.getCloud().DisksClient = mockDiskClient - mockDiskClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClientForSub(gomock.Any()).Return(diskClient, nil).AnyTimes() + diskClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() instanceID := fmt.Sprintf("/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/%s", nodeName) vm := compute.VirtualMachine{ Name: &nodeName, @@ -740,21 +837,26 @@ func TestControllerPublishVolume(t *testing.T) { { name: "Volume already attached success", testFunc: func(t *testing.T) { - d, err = NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, err := NewFakeDriver(cntl) + if err != nil { + t.Fatalf("Error getting driver: %v", err) + } req := &csi.ControllerPublishVolumeRequest{ VolumeId: testVolumeID, VolumeCapability: volumeCap, NodeId: nodeName, } id := req.VolumeId - disk := compute.Disk{ + disk := &armcompute.Disk{ ID: &id, } ctrl := gomock.NewController(t) defer ctrl.Finish() - mockDiskClient := mockdiskclient.NewMockInterface(ctrl) - d.getCloud().DisksClient = mockDiskClient - mockDiskClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClientForSub(gomock.Any()).Return(diskClient, nil).AnyTimes() + diskClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() instanceID := fmt.Sprintf("/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/%s", nodeName) vm := compute.VirtualMachine{ Name: &nodeName, @@ -786,7 +888,7 @@ func TestControllerPublishVolume(t *testing.T) { vm.StorageProfile.DataDisks = &dataDisks mockVMsClient := d.getCloud().VirtualMachinesClient.(*mockvmclient.MockInterface) mockVMsClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(vm, nil).AnyTimes() - _, err := d.ControllerPublishVolume(context.Background(), req) + _, err = d.ControllerPublishVolume(context.Background(), req) if !reflect.DeepEqual(err, nil) { t.Errorf("actualErr: (%v), expectedErr: ()", err) } @@ -795,7 +897,12 @@ func TestControllerPublishVolume(t *testing.T) { { name: "CachingMode Error", testFunc: func(t *testing.T) { - d, err = NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, err := NewFakeDriver(cntl) + if err != nil { + t.Fatalf("Error getting driver: %v", err) + } volumeContext := make(map[string]string) volumeContext[consts.CachingModeField] = "badmode" req := &csi.ControllerPublishVolumeRequest{ @@ -805,14 +912,14 @@ func TestControllerPublishVolume(t *testing.T) { VolumeContext: volumeContext, } id := req.VolumeId - disk := compute.Disk{ + disk := &armcompute.Disk{ ID: &id, } ctrl := gomock.NewController(t) defer ctrl.Finish() - mockDiskClient := mockdiskclient.NewMockInterface(ctrl) - d.getCloud().DisksClient = mockDiskClient - mockDiskClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClientForSub(gomock.Any()).Return(diskClient, nil).AnyTimes() + diskClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() instanceID := fmt.Sprintf("/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/%s", nodeName) vm := compute.VirtualMachine{ Name: &nodeName, @@ -842,7 +949,7 @@ func TestControllerPublishVolume(t *testing.T) { mockVMsClient := d.getCloud().VirtualMachinesClient.(*mockvmclient.MockInterface) mockVMsClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(vm, nil).AnyTimes() expectedErr := status.Errorf(codes.Internal, "azureDisk - badmode is not supported cachingmode. Supported values are [None ReadOnly ReadWrite]") - _, err := d.ControllerPublishVolume(context.Background(), req) + _, err = d.ControllerPublishVolume(context.Background(), req) if !reflect.DeepEqual(err, expectedErr) { t.Errorf("actualErr: (%v), expectedErr: ()", err) } @@ -855,7 +962,12 @@ func TestControllerPublishVolume(t *testing.T) { } func TestControllerUnpublishVolume(t *testing.T) { - d, err := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, err := NewFakeDriver(cntl) + if err != nil { + t.Fatalf("Error getting driver: %v", err) + } d.setCloud(&azure.Cloud{}) if err != nil { t.Fatalf("Error getting driver: %v", err) @@ -895,7 +1007,9 @@ func TestControllerUnpublishVolume(t *testing.T) { } func TestControllerGetCapabilities(t *testing.T) { - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) capType := &csi.ControllerServiceCapability_Rpc{ Rpc: &csi.ControllerServiceCapability_RPC{ Type: csi.ControllerServiceCapability_RPC_UNKNOWN, @@ -927,7 +1041,9 @@ func TestControllerExpandVolume(t *testing.T) { req := &csi.ControllerExpandVolumeRequest{} ctx := context.Background() - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) expectedErr := status.Error(codes.InvalidArgument, "Volume ID missing in the request") _, err := d.ControllerExpandVolume(ctx, req) @@ -944,7 +1060,9 @@ func TestControllerExpandVolume(t *testing.T) { } ctx := context.Background() - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) var csc []*csi.ControllerServiceCapability d.setControllerCapabilities(csc) expectedErr := status.Error(codes.InvalidArgument, "invalid expand volume request: volume_id:\"vol_1\" ") @@ -962,7 +1080,9 @@ func TestControllerExpandVolume(t *testing.T) { } ctx := context.Background() - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) expectedErr := status.Error(codes.InvalidArgument, "volume capacity range missing in request") _, err := d.ControllerExpandVolume(ctx, req) @@ -979,7 +1099,9 @@ func TestControllerExpandVolume(t *testing.T) { CapacityRange: stdCapRange, } ctx := context.Background() - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) expectedErr := status.Error(codes.InvalidArgument, "disk URI(httptest) is not valid: invalid DiskURI: httptest, correct format: [/subscriptions/{sub-id}/resourcegroups/{group-name}/providers/microsoft.compute/disks/{disk-id}]") _, err := d.ControllerExpandVolume(ctx, req) @@ -997,7 +1119,9 @@ func TestControllerExpandVolume(t *testing.T) { } ctx := context.Background() - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) expectedErr := status.Errorf(codes.InvalidArgument, "disk URI(vol_1) is not valid: invalid DiskURI: vol_1, correct format: [/subscriptions/{sub-id}/resourcegroups/{group-name}/providers/microsoft.compute/disks/{disk-id}]") _, err := d.ControllerExpandVolume(ctx, req) @@ -1014,19 +1138,20 @@ func TestControllerExpandVolume(t *testing.T) { CapacityRange: stdCapRange, } id := req.VolumeId - diskProperties := compute.DiskProperties{} - disk := compute.Disk{ - ID: &id, - DiskProperties: &diskProperties, + diskProperties := armcompute.DiskProperties{} + disk := &armcompute.Disk{ + ID: &id, + Properties: &diskProperties, } ctx := context.Background() - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) ctrl := gomock.NewController(t) defer ctrl.Finish() - mockDiskClient := mockdiskclient.NewMockInterface(ctrl) - d.setCloud(&azure.Cloud{}) - d.getCloud().DisksClient = mockDiskClient - mockDiskClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClientForSub(gomock.Any()).Return(diskClient, nil).AnyTimes() + diskClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() expectedErr := status.Errorf(codes.Internal, "could not get size of the disk(unit-test-volume)") _, err := d.ControllerExpandVolume(ctx, req) if !reflect.DeepEqual(err, expectedErr) { @@ -1048,7 +1173,9 @@ func TestCreateSnapshot(t *testing.T) { { name: "Source volume ID missing", testFunc: func(t *testing.T) { - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) req := &csi.CreateSnapshotRequest{} _, err := d.CreateSnapshot(context.Background(), req) expectedErr := status.Error(codes.InvalidArgument, "CreateSnapshot Source Volume ID must be provided") @@ -1060,7 +1187,9 @@ func TestCreateSnapshot(t *testing.T) { { name: "Snapshot name missing", testFunc: func(t *testing.T) { - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) req := &csi.CreateSnapshotRequest{ SourceVolumeId: "vol_1"} _, err := d.CreateSnapshot(context.Background(), req) @@ -1073,7 +1202,9 @@ func TestCreateSnapshot(t *testing.T) { { name: "Invalid parameter option", testFunc: func(t *testing.T) { - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) parameter := make(map[string]string) parameter["unit-test"] = "test" req := &csi.CreateSnapshotRequest{ @@ -1092,7 +1223,9 @@ func TestCreateSnapshot(t *testing.T) { { name: "Invalid volume ID", testFunc: func(t *testing.T) { - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) req := &csi.CreateSnapshotRequest{ SourceVolumeId: "vol_1", Name: "snapname", @@ -1107,7 +1240,9 @@ func TestCreateSnapshot(t *testing.T) { { name: "Invalid tag ", testFunc: func(t *testing.T) { - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) d.setCloud(&azure.Cloud{}) parameter := make(map[string]string) parameter["tags"] = "unit-test" @@ -1133,19 +1268,18 @@ func TestCreateSnapshot(t *testing.T) { SourceVolumeId: testVolumeID, Name: "snapname", } - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) d.setCloud(&azure.Cloud{}) ctrl := gomock.NewController(t) defer ctrl.Finish() - mockSnapshotClient := mocksnapshotclient.NewMockInterface(ctrl) - d.getCloud().SnapshotsClient = mockSnapshotClient - rerr := &retry.Error{ - RawError: fmt.Errorf("test"), - } - mockSnapshotClient.EXPECT().CreateOrUpdate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(rerr).AnyTimes() + mockSnapshotClient := mock_snapshotclient.NewMockInterface(ctrl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetSnapshotClientForSub(gomock.Any()).Return(mockSnapshotClient, nil).AnyTimes() + mockSnapshotClient.EXPECT().CreateOrUpdate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("test")).AnyTimes() _, err := d.CreateSnapshot(context.Background(), req) - expectedErr := status.Errorf(codes.Internal, "create snapshot error: Retriable: false, RetryAfter: 0s, HTTPStatusCode: 0, RawError: test") + expectedErr := status.Errorf(codes.Internal, "create snapshot error: test") if !reflect.DeepEqual(err, expectedErr) { t.Errorf("actualErr: (%v), expectedErr: (%v)", err, expectedErr) } @@ -1161,18 +1295,17 @@ func TestCreateSnapshot(t *testing.T) { Name: "snapname", Parameters: parameter, } - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) d.setCloud(&azure.Cloud{}) ctrl := gomock.NewController(t) defer ctrl.Finish() - mockSnapshotClient := mocksnapshotclient.NewMockInterface(ctrl) - d.getCloud().SnapshotsClient = mockSnapshotClient - rerr := &retry.Error{ - RawError: fmt.Errorf("existing disk"), - } - mockSnapshotClient.EXPECT().CreateOrUpdate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(rerr).AnyTimes() + mockSnapshotClient := mock_snapshotclient.NewMockInterface(ctrl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetSnapshotClientForSub(gomock.Any()).Return(mockSnapshotClient, nil).AnyTimes() + mockSnapshotClient.EXPECT().CreateOrUpdate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("existing disk")).AnyTimes() _, err := d.CreateSnapshot(context.Background(), req) - expectedErr := status.Errorf(codes.AlreadyExists, "request snapshot(snapname) under rg(rg) already exists, but the SourceVolumeId is different, error details: Retriable: false, RetryAfter: 0s, HTTPStatusCode: 0, RawError: existing disk") + expectedErr := status.Errorf(codes.AlreadyExists, "request snapshot(snapname) under rg(rg) already exists, but the SourceVolumeId is different, error details: existing disk") if !reflect.DeepEqual(err, expectedErr) { t.Errorf("actualErr: (%v), expectedErr: (%v)", err, expectedErr) } @@ -1188,20 +1321,20 @@ func TestCreateSnapshot(t *testing.T) { Name: "unit-test", Parameters: parameter, } - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) d.setCloud(&azure.Cloud{}) ctrl := gomock.NewController(t) defer ctrl.Finish() - mockSnapshotClient := mocksnapshotclient.NewMockInterface(ctrl) - d.getCloud().SnapshotsClient = mockSnapshotClient - rerr := &retry.Error{ - RawError: fmt.Errorf("get snapshot error"), - } - snapshot := compute.Snapshot{} - mockSnapshotClient.EXPECT().CreateOrUpdate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() - mockSnapshotClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(snapshot, rerr).AnyTimes() + mockSnapshotClient := mock_snapshotclient.NewMockInterface(ctrl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetSnapshotClientForSub(gomock.Any()).Return(mockSnapshotClient, nil).AnyTimes() + + snapshot := &armcompute.Snapshot{} + mockSnapshotClient.EXPECT().CreateOrUpdate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil).AnyTimes() + mockSnapshotClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(snapshot, fmt.Errorf("get snapshot error")).AnyTimes() _, err := d.CreateSnapshot(context.Background(), req) - expectedErr := status.Errorf(codes.Internal, "get snapshot unit-test from rg(rg) error: Retriable: false, RetryAfter: 0s, HTTPStatusCode: 0, RawError: get snapshot error") + expectedErr := status.Errorf(codes.Internal, "waitForSnapshotReady(, rg, unit-test) failed with get snapshot error") if !reflect.DeepEqual(err, expectedErr) { t.Errorf("actualErr: (%v), expectedErr: (%v)", err, expectedErr) } @@ -1217,32 +1350,34 @@ func TestCreateSnapshot(t *testing.T) { Name: "testurl/subscriptions/23/providers/Microsoft.Compute/snapshots/snapshot-name", Parameters: parameter, } - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) d.setCloud(&azure.Cloud{}) ctrl := gomock.NewController(t) defer ctrl.Finish() - mockSnapshotClient := mocksnapshotclient.NewMockInterface(ctrl) - d.getCloud().SnapshotsClient = mockSnapshotClient + mockSnapshotClient := mock_snapshotclient.NewMockInterface(ctrl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetSnapshotClientForSub(gomock.Any()).Return(mockSnapshotClient, nil).AnyTimes() provisioningState := "succeeded" DiskSize := int32(10) snapshotID := "test" - snapshot := compute.Snapshot{ - SnapshotProperties: &compute.SnapshotProperties{ - TimeCreated: &date.Time{}, + snapshot := &armcompute.Snapshot{ + Properties: &armcompute.SnapshotProperties{ + TimeCreated: &time.Time{}, ProvisioningState: &provisioningState, DiskSizeGB: &DiskSize, }, ID: &snapshotID, } - mockSnapshotClient.EXPECT().CreateOrUpdate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() - mockSnapshotClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(snapshot, nil).AnyTimes() + mockSnapshotClient.EXPECT().CreateOrUpdate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil).AnyTimes() + mockSnapshotClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(snapshot, nil).AnyTimes() actualresponse, err := d.CreateSnapshot(context.Background(), req) - tp := timestamppb.New(snapshot.SnapshotProperties.TimeCreated.ToTime()) + tp := timestamppb.New(*snapshot.Properties.TimeCreated) ready := true expectedresponse := &csi.CreateSnapshotResponse{ Snapshot: &csi.Snapshot{ - SizeBytes: volumehelper.GiBToBytes(int64(*snapshot.SnapshotProperties.DiskSizeGB)), + SizeBytes: volumehelper.GiBToBytes(int64(*snapshot.Properties.DiskSizeGB)), SnapshotId: *snapshot.ID, SourceVolumeId: req.SourceVolumeId, CreationTime: tp, @@ -1272,7 +1407,9 @@ func TestDeleteSnapshot(t *testing.T) { testFunc: func(t *testing.T) { req := &csi.DeleteSnapshotRequest{} expectedErr := status.Error(codes.InvalidArgument, "Snapshot ID must be provided") - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) _, err := d.DeleteSnapshot(context.Background(), req) if !reflect.DeepEqual(err, expectedErr) { t.Errorf("actualErr: (%v), expectedErr: (%v)", err, expectedErr) @@ -1285,7 +1422,9 @@ func TestDeleteSnapshot(t *testing.T) { req := &csi.DeleteSnapshotRequest{ SnapshotId: "/subscriptions/23/providers/Microsoft.Compute/snapshots/snapshot-name", } - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) expectedErr := status.Errorf(codes.Internal, "could not get snapshot name from /subscriptions/23/providers/Microsoft.Compute/snapshots/snapshot-name, correct format: (?i).*/subscriptions/(?:.*)/resourceGroups/(?:.*)/providers/Microsoft.Compute/snapshots/(.+)") _, err := d.DeleteSnapshot(context.Background(), req) if !reflect.DeepEqual(err, expectedErr) { @@ -1296,20 +1435,19 @@ func TestDeleteSnapshot(t *testing.T) { { name: "delete Snapshot error", testFunc: func(t *testing.T) { - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) d.setCloud(&azure.Cloud{}) ctrl := gomock.NewController(t) defer ctrl.Finish() - mockSnapshotClient := mocksnapshotclient.NewMockInterface(ctrl) - d.getCloud().SnapshotsClient = mockSnapshotClient + mockSnapshotClient := mock_snapshotclient.NewMockInterface(ctrl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetSnapshotClientForSub(gomock.Any()).Return(mockSnapshotClient, nil).AnyTimes() req := &csi.DeleteSnapshotRequest{ SnapshotId: "testurl/subscriptions/12/resourceGroups/23/providers/Microsoft.Compute/snapshots/snapshot-name", } - rerr := &retry.Error{ - RawError: fmt.Errorf("get snapshot error"), - } - mockSnapshotClient.EXPECT().Delete(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(rerr).AnyTimes() - expectedErr := status.Errorf(codes.Internal, "delete snapshot error: Retriable: false, RetryAfter: 0s, HTTPStatusCode: 0, RawError: get snapshot error") + mockSnapshotClient.EXPECT().Delete(gomock.Any(), gomock.Any(), gomock.Any()).Return(fmt.Errorf("get snapshot error")).AnyTimes() + expectedErr := status.Errorf(codes.Internal, "delete snapshot error: get snapshot error") _, err := d.DeleteSnapshot(context.Background(), req) if !reflect.DeepEqual(err, expectedErr) { t.Errorf("actualErr: (%v), expectedErr: (%v)", err, expectedErr) @@ -1319,16 +1457,18 @@ func TestDeleteSnapshot(t *testing.T) { { name: "Valid delete Snapshot ", testFunc: func(t *testing.T) { - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) d.setCloud(&azure.Cloud{}) ctrl := gomock.NewController(t) defer ctrl.Finish() - mockSnapshotClient := mocksnapshotclient.NewMockInterface(ctrl) - d.getCloud().SnapshotsClient = mockSnapshotClient + mockSnapshotClient := mock_snapshotclient.NewMockInterface(ctrl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetSnapshotClientForSub(gomock.Any()).Return(mockSnapshotClient, nil).AnyTimes() req := &csi.DeleteSnapshotRequest{ SnapshotId: "testurl/subscriptions/12/resourceGroups/23/providers/Microsoft.Compute/snapshots/snapshot-name", } - mockSnapshotClient.EXPECT().Delete(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + mockSnapshotClient.EXPECT().Delete(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() _, err := d.DeleteSnapshot(context.Background(), req) if !reflect.DeepEqual(err, nil) { t.Errorf("actualErr: (%v), expectedErr: nil)", err) @@ -1351,7 +1491,9 @@ func TestGetSnapshotByID(t *testing.T) { testFunc: func(t *testing.T) { sourceVolumeID := "unit-test" ctx := context.Background() - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) d.setCloud(&azure.Cloud{}) snapshotID := "testurl/subscriptions/23/providers/Microsoft.Compute/snapshots/snapshot-name" expectedErr := status.Errorf(codes.Internal, "could not get snapshot name from testurl/subscriptions/23/providers/Microsoft.Compute/snapshots/snapshot-name, correct format: (?i).*/subscriptions/(?:.*)/resourceGroups/(?:.*)/providers/Microsoft.Compute/snapshots/(.+)") @@ -1364,22 +1506,21 @@ func TestGetSnapshotByID(t *testing.T) { { name: "snapshot get error", testFunc: func(t *testing.T) { - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) d.setCloud(&azure.Cloud{}) ctrl := gomock.NewController(t) defer ctrl.Finish() - mockSnapshotClient := mocksnapshotclient.NewMockInterface(ctrl) - d.getCloud().SnapshotsClient = mockSnapshotClient - rerr := &retry.Error{ - RawError: fmt.Errorf("test"), - } + mockSnapshotClient := mock_snapshotclient.NewMockInterface(ctrl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetSnapshotClientForSub(gomock.Any()).Return(mockSnapshotClient, nil).AnyTimes() snapshotID := "testurl/subscriptions/23/providers/Microsoft.Compute/snapshots/snapshot-name" - snapshot := compute.Snapshot{ - SnapshotProperties: &compute.SnapshotProperties{}, - ID: &snapshotID, + snapshot := &armcompute.Snapshot{ + Properties: &armcompute.SnapshotProperties{}, + ID: &snapshotID, } snapshotVolumeID := "unit-test" - mockSnapshotClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(snapshot, rerr).AnyTimes() + mockSnapshotClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(snapshot, fmt.Errorf("test")).AnyTimes() expectedErr := status.Errorf(codes.Internal, "could not get snapshot name from testurl/subscriptions/23/providers/Microsoft.Compute/snapshots/snapshot-name, correct format: (?i).*/subscriptions/(?:.*)/resourceGroups/(?:.*)/providers/Microsoft.Compute/snapshots/(.+)") _, err := d.getSnapshotByID(context.Background(), d.getCloud().SubscriptionID, d.getCloud().ResourceGroup, snapshotID, snapshotVolumeID) if !reflect.DeepEqual(err, expectedErr) { @@ -1404,7 +1545,9 @@ func TestListSnapshots(t *testing.T) { req := csi.ListSnapshotsRequest{ SnapshotId: "testurl/subscriptions/23/providers/Microsoft.Compute/snapshots/snapshot-nametestVolumeName", } - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) expectedErr := status.Errorf(codes.Internal, "could not get snapshot name from testurl/subscriptions/23/providers/Microsoft.Compute/snapshots/snapshot-nametestVolumeName, correct format: (?i).*/subscriptions/(?:.*)/resourceGroups/(?:.*)/providers/Microsoft.Compute/snapshots/(.+)") _, err := d.ListSnapshots(context.TODO(), &req) if !reflect.DeepEqual(err, expectedErr) { @@ -1418,13 +1561,15 @@ func TestListSnapshots(t *testing.T) { req := csi.ListSnapshotsRequest{ SnapshotId: "testurl/subscriptions/12/resourceGroups/23/providers/Microsoft.Compute/snapshots/snapshot-name", } - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) provisioningState := "succeeded" DiskSize := int32(10) snapshotID := "test" - snapshot := compute.Snapshot{ - SnapshotProperties: &compute.SnapshotProperties{ - TimeCreated: &date.Time{}, + snapshot := &armcompute.Snapshot{ + Properties: &armcompute.SnapshotProperties{ + TimeCreated: &time.Time{}, ProvisioningState: &provisioningState, DiskSizeGB: &DiskSize, }, @@ -1432,9 +1577,9 @@ func TestListSnapshots(t *testing.T) { } ctrl := gomock.NewController(t) defer ctrl.Finish() - mockSnapshotClient := mocksnapshotclient.NewMockInterface(ctrl) - d.getCloud().SnapshotsClient = mockSnapshotClient - mockSnapshotClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(snapshot, nil).AnyTimes() + mockSnapshotClient := mock_snapshotclient.NewMockInterface(ctrl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetSnapshotClientForSub(gomock.Any()).Return(mockSnapshotClient, nil).AnyTimes() + mockSnapshotClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(snapshot, nil).AnyTimes() expectedErr := error(nil) _, err := d.ListSnapshots(context.TODO(), &req) if !reflect.DeepEqual(err, expectedErr) { @@ -1446,19 +1591,18 @@ func TestListSnapshots(t *testing.T) { name: "List resource error", testFunc: func(t *testing.T) { req := csi.ListSnapshotsRequest{} - d, _ := NewFakeDriver(t) - snapshot := compute.Snapshot{} - snapshots := []compute.Snapshot{} + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) + snapshot := &armcompute.Snapshot{} + snapshots := []*armcompute.Snapshot{} snapshots = append(snapshots, snapshot) ctrl := gomock.NewController(t) defer ctrl.Finish() - rerr := &retry.Error{ - RawError: fmt.Errorf("test"), - } - mockSnapshotClient := mocksnapshotclient.NewMockInterface(ctrl) - d.getCloud().SnapshotsClient = mockSnapshotClient - mockSnapshotClient.EXPECT().ListByResourceGroup(gomock.Any(), gomock.Any(), gomock.Any()).Return(snapshots, rerr).AnyTimes() - expectedErr := status.Error(codes.Internal, "Unknown list snapshot error: Retriable: false, RetryAfter: 0s, HTTPStatusCode: 0, RawError: test") + mockSnapshotClient := mock_snapshotclient.NewMockInterface(ctrl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetSnapshotClient().Return(mockSnapshotClient).AnyTimes() + mockSnapshotClient.EXPECT().List(gomock.Any(), gomock.Any()).Return(snapshots, fmt.Errorf("test")).AnyTimes() + expectedErr := status.Error(codes.Internal, "Unknown list snapshot error: test") _, err := d.ListSnapshots(context.TODO(), &req) if !reflect.DeepEqual(err, expectedErr) { t.Errorf("actualErr: (%v), expectedErr: (%v)", err, expectedErr) @@ -1469,15 +1613,17 @@ func TestListSnapshots(t *testing.T) { name: "snapshot property nil", testFunc: func(t *testing.T) { req := csi.ListSnapshotsRequest{} - d, _ := NewFakeDriver(t) - snapshot := compute.Snapshot{} - snapshots := []compute.Snapshot{} + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) + snapshot := &armcompute.Snapshot{} + snapshots := []*armcompute.Snapshot{} snapshots = append(snapshots, snapshot) ctrl := gomock.NewController(t) defer ctrl.Finish() - mockSnapshotClient := mocksnapshotclient.NewMockInterface(ctrl) - d.getCloud().SnapshotsClient = mockSnapshotClient - mockSnapshotClient.EXPECT().ListByResourceGroup(gomock.Any(), gomock.Any(), gomock.Any()).Return(snapshots, nil).AnyTimes() + mockSnapshotClient := mock_snapshotclient.NewMockInterface(ctrl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetSnapshotClient().Return(mockSnapshotClient).AnyTimes() + mockSnapshotClient.EXPECT().List(gomock.Any(), gomock.Any()).Return(snapshots, nil).AnyTimes() expectedErr := fmt.Errorf("failed to generate snapshot entry: snapshot property is nil") _, err := d.ListSnapshots(context.TODO(), &req) if !reflect.DeepEqual(err, expectedErr) { @@ -1489,27 +1635,29 @@ func TestListSnapshots(t *testing.T) { name: "List snapshots when source volumeId is given", testFunc: func(t *testing.T) { req := csi.ListSnapshotsRequest{SourceVolumeId: "test"} - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) volumeID := "test" DiskSize := int32(10) snapshotID := "test" provisioningState := "succeeded" - snapshot1 := compute.Snapshot{ - SnapshotProperties: &compute.SnapshotProperties{ - TimeCreated: &date.Time{}, + snapshot1 := &armcompute.Snapshot{ + Properties: &armcompute.SnapshotProperties{ + TimeCreated: &time.Time{}, ProvisioningState: &provisioningState, DiskSizeGB: &DiskSize, - CreationData: &compute.CreationData{SourceResourceID: &volumeID}, + CreationData: &armcompute.CreationData{SourceResourceID: &volumeID}, }, ID: &snapshotID} - snapshot2 := compute.Snapshot{} - snapshots := []compute.Snapshot{} + snapshot2 := &armcompute.Snapshot{} + snapshots := []*armcompute.Snapshot{} snapshots = append(snapshots, snapshot1, snapshot2) ctrl := gomock.NewController(t) defer ctrl.Finish() - mockSnapshotClient := mocksnapshotclient.NewMockInterface(ctrl) - d.getCloud().SnapshotsClient = mockSnapshotClient - mockSnapshotClient.EXPECT().ListByResourceGroup(gomock.Any(), gomock.Any(), gomock.Any()).Return(snapshots, nil).AnyTimes() + mockSnapshotClient := mock_snapshotclient.NewMockInterface(ctrl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetSnapshotClient().Return(mockSnapshotClient).AnyTimes() + mockSnapshotClient.EXPECT().List(gomock.Any(), gomock.Any()).Return(snapshots, nil).AnyTimes() snapshotsResponse, _ := d.ListSnapshots(context.TODO(), &req) if len(snapshotsResponse.Entries) != 1 { t.Errorf("actualNumberOfEntries: (%v), expectedNumberOfEntries: (%v)", len(snapshotsResponse.Entries), 1) @@ -1530,7 +1678,9 @@ func TestListSnapshots(t *testing.T) { } func TestGetCapacity(t *testing.T) { - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) req := csi.GetCapacityRequest{} resp, err := d.GetCapacity(context.Background(), &req) assert.Nil(t, resp) @@ -1569,12 +1719,16 @@ func TestListVolumes(t *testing.T) { name: "When no KubeClient exists, Valid list without max_entries or starting_token", testFunc: func(t *testing.T) { req := csi.ListVolumesRequest{} - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) fakeVolumeID := "test" - disk := compute.Disk{ID: &fakeVolumeID} - disks := []compute.Disk{} + disk := &armcompute.Disk{ID: &fakeVolumeID} + disks := []*armcompute.Disk{} disks = append(disks, disk) - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().ListByResourceGroup(gomock.Any(), gomock.Any(), gomock.Any()).Return(disks, nil).AnyTimes() + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClient().Return(diskClient).AnyTimes() + diskClient.EXPECT().List(gomock.Any(), gomock.Any()).Return(disks, nil).AnyTimes() expectedErr := error(nil) listVolumesResponse, err := d.ListVolumes(context.TODO(), &req) if !reflect.DeepEqual(err, expectedErr) { @@ -1591,12 +1745,16 @@ func TestListVolumes(t *testing.T) { req := csi.ListVolumesRequest{ MaxEntries: 1, } - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) fakeVolumeID := "test" - disk1, disk2 := compute.Disk{ID: &fakeVolumeID}, compute.Disk{ID: &fakeVolumeID} - disks := []compute.Disk{} + disk1, disk2 := &armcompute.Disk{ID: &fakeVolumeID}, &armcompute.Disk{ID: &fakeVolumeID} + disks := []*armcompute.Disk{} disks = append(disks, disk1, disk2) - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().ListByResourceGroup(gomock.Any(), gomock.Any(), gomock.Any()).Return(disks, nil).AnyTimes() + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClient().Return(diskClient).AnyTimes() + diskClient.EXPECT().List(gomock.Any(), gomock.Any()).Return(disks, nil).AnyTimes() expectedErr := error(nil) listVolumesResponse, err := d.ListVolumes(context.TODO(), &req) if !reflect.DeepEqual(err, expectedErr) { @@ -1617,12 +1775,16 @@ func TestListVolumes(t *testing.T) { StartingToken: "1", MaxEntries: 1, } - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) fakeVolumeID1, fakeVolumeID12 := "test1", "test2" - disk1, disk2 := compute.Disk{ID: &fakeVolumeID1}, compute.Disk{ID: &fakeVolumeID12} - disks := []compute.Disk{} + disk1, disk2 := &armcompute.Disk{ID: &fakeVolumeID1}, &armcompute.Disk{ID: &fakeVolumeID12} + disks := []*armcompute.Disk{} disks = append(disks, disk1, disk2) - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().ListByResourceGroup(gomock.Any(), gomock.Any(), gomock.Any()).Return(disks, nil).AnyTimes() + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClient().Return(diskClient).AnyTimes() + diskClient.EXPECT().List(gomock.Any(), gomock.Any()).Return(disks, nil).AnyTimes() expectedErr := error(nil) listVolumesResponse, err := d.ListVolumes(context.TODO(), &req) if !reflect.DeepEqual(err, expectedErr) { @@ -1645,9 +1807,13 @@ func TestListVolumes(t *testing.T) { req := csi.ListVolumesRequest{ StartingToken: "1", } - d, _ := NewFakeDriver(t) - disks := []compute.Disk{} - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().ListByResourceGroup(gomock.Any(), gomock.Any(), gomock.Any()).Return(disks, nil).AnyTimes() + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) + disks := []*armcompute.Disk{} + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClient().Return(diskClient).AnyTimes() + diskClient.EXPECT().List(gomock.Any(), gomock.Any()).Return(disks, nil).AnyTimes() expectedErr := status.Error(codes.FailedPrecondition, "ListVolumes starting token(1) on rg(rg) is greater than total number of volumes") _, err := d.ListVolumes(context.TODO(), &req) if !reflect.DeepEqual(err, expectedErr) { @@ -1661,13 +1827,14 @@ func TestListVolumes(t *testing.T) { req := csi.ListVolumesRequest{ StartingToken: "1", } - d, _ := NewFakeDriver(t) - disks := []compute.Disk{} - rerr := &retry.Error{ - RawError: fmt.Errorf("test"), - } - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().ListByResourceGroup(gomock.Any(), gomock.Any(), gomock.Any()).Return(disks, rerr).AnyTimes() - expectedErr := status.Error(codes.Internal, "ListVolumes on rg(rg) failed with error: Retriable: false, RetryAfter: 0s, HTTPStatusCode: 0, RawError: test") + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) + disks := []*armcompute.Disk{} + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClient().Return(diskClient).AnyTimes() + diskClient.EXPECT().List(gomock.Any(), gomock.Any()).Return(disks, fmt.Errorf("test")).AnyTimes() + expectedErr := status.Error(codes.Internal, "ListVolumes on rg(rg) failed with error: test") _, err := d.ListVolumes(context.TODO(), &req) if !reflect.DeepEqual(err, expectedErr) { t.Errorf("actualErr: (%v), expectedErr: (%v)", err, expectedErr) @@ -1678,12 +1845,16 @@ func TestListVolumes(t *testing.T) { name: "When KubeClient exists, Empty list without start token should not return error", testFunc: func(t *testing.T) { req := csi.ListVolumesRequest{} - d := getFakeDriverWithKubeClient(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d := getFakeDriverWithKubeClient(cntl) pvList := v1.PersistentVolumeList{ Items: []v1.PersistentVolume{}, } d.getCloud().KubeClient.CoreV1().PersistentVolumes().(*mockpersistentvolume.MockInterface).EXPECT().List(gomock.Any(), gomock.Any()).Return(&pvList, nil) - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().ListByResourceGroup(gomock.Any(), gomock.Any(), gomock.Any()).Return([]compute.Disk{}, nil) + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClient().Return(diskClient).AnyTimes() + diskClient.EXPECT().List(gomock.Any(), gomock.Any()).Return([]*armcompute.Disk{}, nil).AnyTimes() expectedErr := error(nil) _, err := d.ListVolumes(context.TODO(), &req) if !reflect.DeepEqual(err, expectedErr) { @@ -1696,13 +1867,17 @@ func TestListVolumes(t *testing.T) { testFunc: func(t *testing.T) { req := csi.ListVolumesRequest{} fakeVolumeID := "/subscriptions/test-subscription/resourceGroups/test_resourcegroup-1/providers/Microsoft.Compute/disks/test-pv-1" - d := getFakeDriverWithKubeClient(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d := getFakeDriverWithKubeClient(cntl) pvList := v1.PersistentVolumeList{ Items: []v1.PersistentVolume{volume1}, } d.getCloud().KubeClient.CoreV1().PersistentVolumes().(*mockpersistentvolume.MockInterface).EXPECT().List(gomock.Any(), gomock.Any()).Return(&pvList, nil) - disk1 := compute.Disk{ID: &fakeVolumeID} - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().ListByResourceGroup(gomock.Any(), gomock.Any(), gomock.Any()).Return([]compute.Disk{disk1}, nil) + disk1 := &armcompute.Disk{ID: &fakeVolumeID} + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClient().Return(diskClient).AnyTimes() + diskClient.EXPECT().List(gomock.Any(), gomock.Any()).Return([]*armcompute.Disk{disk1}, nil).AnyTimes() expectedErr := error(nil) listVolumesResponse, err := d.ListVolumes(context.TODO(), &req) if !reflect.DeepEqual(err, expectedErr) { @@ -1719,16 +1894,20 @@ func TestListVolumes(t *testing.T) { req := csi.ListVolumesRequest{ MaxEntries: 1, } - d := getFakeDriverWithKubeClient(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d := getFakeDriverWithKubeClient(cntl) d.getCloud().SubscriptionID = "test-subscription" fakeVolumeID := "/subscriptions/test-subscription/resourceGroups/test_resourcegroup-1/providers/Microsoft.Compute/disks/test-pv-1" - disk1, disk2 := compute.Disk{ID: &fakeVolumeID}, compute.Disk{ID: &fakeVolumeID} + disk1, disk2 := &armcompute.Disk{ID: &fakeVolumeID}, &armcompute.Disk{ID: &fakeVolumeID} pvList := v1.PersistentVolumeList{ Items: []v1.PersistentVolume{volume1, volume2}, } d.getCloud().KubeClient.CoreV1().PersistentVolumes().(*mockpersistentvolume.MockInterface).EXPECT().List(gomock.Any(), gomock.Any()).Return(&pvList, nil) - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().ListByResourceGroup(gomock.Any(), gomock.Any(), gomock.Any()).Return([]compute.Disk{disk1}, nil) - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().ListByResourceGroup(gomock.Any(), gomock.Any(), gomock.Any()).Return([]compute.Disk{disk2}, nil) + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClient().Return(diskClient).AnyTimes() + diskClient.EXPECT().List(gomock.Any(), gomock.Any()).Return([]*armcompute.Disk{disk1}, nil).AnyTimes() + diskClient.EXPECT().List(gomock.Any(), gomock.Any()).Return([]*armcompute.Disk{disk2}, nil).AnyTimes() expectedErr := error(nil) listVolumesResponse, err := d.ListVolumes(context.TODO(), &req) if !reflect.DeepEqual(err, expectedErr) { @@ -1749,16 +1928,20 @@ func TestListVolumes(t *testing.T) { StartingToken: "1", MaxEntries: 1, } - d := getFakeDriverWithKubeClient(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d := getFakeDriverWithKubeClient(cntl) d.getCloud().SubscriptionID = "test-subscription" pvList := v1.PersistentVolumeList{ Items: []v1.PersistentVolume{volume1, volume2}, } fakeVolumeID11, fakeVolumeID12 := "/subscriptions/test-subscription/resourceGroups/test_resourcegroup-1/providers/Microsoft.Compute/disks/test-pv-1", "/subscriptions/test-subscription/resourceGroups/test_resourcegroup-2/providers/Microsoft.Compute/disks/test-pv-2" - disk1, disk2 := compute.Disk{ID: &fakeVolumeID11}, compute.Disk{ID: &fakeVolumeID12} + disk1, disk2 := &armcompute.Disk{ID: &fakeVolumeID11}, &armcompute.Disk{ID: &fakeVolumeID12} d.getCloud().KubeClient.CoreV1().PersistentVolumes().(*mockpersistentvolume.MockInterface).EXPECT().List(gomock.Any(), gomock.Any()).Return(&pvList, nil) - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().ListByResourceGroup(gomock.Any(), gomock.Any(), gomock.Any()).Return([]compute.Disk{disk1}, nil) - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().ListByResourceGroup(gomock.Any(), gomock.Any(), gomock.Any()).Return([]compute.Disk{disk2}, nil) + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClient().Return(diskClient).AnyTimes() + diskClient.EXPECT().List(gomock.Any(), gomock.Any()).Return([]*armcompute.Disk{disk1}, nil).AnyTimes() + diskClient.EXPECT().List(gomock.Any(), gomock.Any()).Return([]*armcompute.Disk{disk2}, nil).AnyTimes() expectedErr := error(nil) listVolumesResponse, err := d.ListVolumes(context.TODO(), &req) if !reflect.DeepEqual(err, expectedErr) { @@ -1770,8 +1953,8 @@ func TestListVolumes(t *testing.T) { if listVolumesResponse.NextToken != "" { t.Errorf("actualNextToken: (%v), expectedNextToken: (%v)", listVolumesResponse.NextToken, "") } - if listVolumesResponse.Entries[0].Volume.VolumeId != fakeVolumeID12 { - t.Errorf("actualVolumeId: (%v), expectedVolumeId: (%v)", listVolumesResponse.Entries[0].Volume.VolumeId, fakeVolumeID12) + if listVolumesResponse.Entries[0].Volume.VolumeId != fakeVolumeID11 { + t.Errorf("actualVolumeId: (%v), expectedVolumeId: (%v)", listVolumesResponse.Entries[0].Volume.VolumeId, fakeVolumeID11) } }, }, @@ -1781,12 +1964,16 @@ func TestListVolumes(t *testing.T) { req := csi.ListVolumesRequest{ StartingToken: "1", } - d := getFakeDriverWithKubeClient(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d := getFakeDriverWithKubeClient(cntl) pvList := v1.PersistentVolumeList{ Items: []v1.PersistentVolume{}, } d.getCloud().KubeClient.CoreV1().PersistentVolumes().(*mockpersistentvolume.MockInterface).EXPECT().List(gomock.Any(), gomock.Any()).Return(&pvList, nil) expectedErr := status.Error(codes.FailedPrecondition, "ListVolumes starting token(1) is greater than total number of disks") + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClient().Return(diskClient).AnyTimes() _, err := d.ListVolumes(context.TODO(), &req) if !reflect.DeepEqual(err, expectedErr) { t.Errorf("actualErr: (%v), expectedErr: (%v)", err, expectedErr) @@ -1799,10 +1986,14 @@ func TestListVolumes(t *testing.T) { req := csi.ListVolumesRequest{ StartingToken: "1", } - d := getFakeDriverWithKubeClient(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d := getFakeDriverWithKubeClient(cntl) rerr := fmt.Errorf("test") d.getCloud().KubeClient.CoreV1().PersistentVolumes().(*mockpersistentvolume.MockInterface).EXPECT().List(gomock.Any(), gomock.Any()).Return(nil, rerr) expectedErr := status.Error(codes.Internal, "ListVolumes failed while fetching PersistentVolumes List with error: test") + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClient().Return(diskClient).AnyTimes() _, err := d.ListVolumes(context.TODO(), &req) if !reflect.DeepEqual(err, expectedErr) { t.Errorf("actualErr: (%v), expectedErr: (%v)", err, expectedErr) @@ -1825,7 +2016,9 @@ func TestValidateVolumeCapabilities(t *testing.T) { name: "Volume ID missing ", testFunc: func(t *testing.T) { req := csi.ValidateVolumeCapabilitiesRequest{} - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) expectedErr := status.Errorf(codes.InvalidArgument, "Volume ID missing in the request") _, err := d.ValidateVolumeCapabilities(context.TODO(), &req) if !reflect.DeepEqual(err, expectedErr) { @@ -1839,7 +2032,9 @@ func TestValidateVolumeCapabilities(t *testing.T) { req := csi.ValidateVolumeCapabilitiesRequest{ VolumeId: "unit-test", } - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) expectedErr := status.Errorf(codes.InvalidArgument, "VolumeCapabilities missing in the request") _, err := d.ValidateVolumeCapabilities(context.TODO(), &req) if !reflect.DeepEqual(err, expectedErr) { @@ -1854,7 +2049,9 @@ func TestValidateVolumeCapabilities(t *testing.T) { VolumeId: "-", VolumeCapabilities: stdVolumeCapabilities, } - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) expectedErr := status.Errorf(codes.NotFound, "Volume not found, failed with error: could not get disk name from -, correct format: (?i).*/subscriptions/(?:.*)/resourceGroups/(?:.*)/providers/Microsoft.Compute/disks/(.+)") _, err := d.ValidateVolumeCapabilities(context.TODO(), &req) if !reflect.DeepEqual(err, expectedErr) { @@ -1869,11 +2066,15 @@ func TestValidateVolumeCapabilities(t *testing.T) { VolumeId: testVolumeID, VolumeCapabilities: stdVolumeCapabilities, } - d, _ := NewFakeDriver(t) - disk := compute.Disk{ - DiskProperties: &compute.DiskProperties{}, + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) + disk := &armcompute.Disk{ + Properties: &armcompute.DiskProperties{}, } - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClientForSub(gomock.Any()).Return(diskClient, nil).AnyTimes() + diskClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() expectedErr := error(nil) _, err := d.ValidateVolumeCapabilities(context.TODO(), &req) if !reflect.DeepEqual(err, expectedErr) { @@ -1896,11 +2097,15 @@ func TestValidateVolumeCapabilities(t *testing.T) { VolumeId: testVolumeID, VolumeCapabilities: stdVolumeCapabilitiestest, } - d, _ := NewFakeDriver(t) - disk := compute.Disk{ - DiskProperties: &compute.DiskProperties{}, + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) + disk := &armcompute.Disk{ + Properties: &armcompute.DiskProperties{}, } - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClientForSub(gomock.Any()).Return(diskClient, nil).AnyTimes() + diskClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() expectedErr := error(nil) _, err := d.ValidateVolumeCapabilities(context.TODO(), &req) if !reflect.DeepEqual(err, expectedErr) { @@ -1922,8 +2127,10 @@ func TestGetSourceDiskSize(t *testing.T) { { name: "max depth reached", testFunc: func(t *testing.T) { - d, _ := NewFakeDriver(t) - _, err := d.GetSourceDiskSize(context.Background(), "", "test-rg", "test-disk", 2, 1) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) + _, _, err := d.GetSourceDiskSize(context.Background(), "", "test-rg", "test-disk", 2, 1) expectedErr := status.Errorf(codes.Internal, "current depth (2) surpassed the max depth (1) while searching for the source disk size") if !reflect.DeepEqual(err, expectedErr) { t.Errorf("actualErr: (%v), expectedErr: (%v)", err, expectedErr) @@ -1933,10 +2140,15 @@ func TestGetSourceDiskSize(t *testing.T) { { name: "diskproperty not found", testFunc: func(t *testing.T) { - d, _ := NewFakeDriver(t) - disk := compute.Disk{} - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() - _, err := d.GetSourceDiskSize(context.Background(), "", "test-rg", "test-disk", 0, 1) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) + disk := &armcompute.Disk{} + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClientForSub(gomock.Any()).Return(diskClient, nil).AnyTimes() + diskClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() + + _, _, err := d.GetSourceDiskSize(context.Background(), "", "test-rg", "test-disk", 0, 1) expectedErr := status.Error(codes.Internal, "DiskProperty not found for disk (test-disk) in resource group (test-rg)") if !reflect.DeepEqual(err, expectedErr) { t.Errorf("actualErr: (%v), expectedErr: (%v)", err, expectedErr) @@ -1946,13 +2158,17 @@ func TestGetSourceDiskSize(t *testing.T) { { name: "nil DiskSizeGB", testFunc: func(t *testing.T) { - d, _ := NewFakeDriver(t) - diskProperties := compute.DiskProperties{} - disk := compute.Disk{ - DiskProperties: &diskProperties, - } - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() - _, err := d.GetSourceDiskSize(context.Background(), "", "test-rg", "test-disk", 0, 1) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) + diskProperties := armcompute.DiskProperties{} + disk := &armcompute.Disk{ + Properties: &diskProperties, + } + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClientForSub(gomock.Any()).Return(diskClient, nil).AnyTimes() + diskClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() + _, _, err := d.GetSourceDiskSize(context.Background(), "", "test-rg", "test-disk", 0, 1) expectedErr := status.Error(codes.Internal, "DiskSizeGB for disk (test-disk) in resourcegroup (test-rg) is nil") if !reflect.DeepEqual(err, expectedErr) { t.Errorf("actualErr: (%v), expectedErr: (%v)", err, expectedErr) @@ -1962,16 +2178,20 @@ func TestGetSourceDiskSize(t *testing.T) { { name: "successful search: depth 1", testFunc: func(t *testing.T) { - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) diskSizeGB := int32(8) - diskProperties := compute.DiskProperties{ + diskProperties := armcompute.DiskProperties{ DiskSizeGB: &diskSizeGB, } - disk := compute.Disk{ - DiskProperties: &diskProperties, + disk := &armcompute.Disk{ + Properties: &diskProperties, } - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() - size, _ := d.GetSourceDiskSize(context.Background(), "", "test-rg", "test-disk", 0, 1) + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClientForSub(gomock.Any()).Return(diskClient, nil).AnyTimes() + diskClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes() + size, _, _ := d.GetSourceDiskSize(context.Background(), "", "test-rg", "test-disk", 0, 1) expectedOutput := diskSizeGB if *size != expectedOutput { t.Errorf("actualOutput: (%v), expectedOutput: (%v)", *size, expectedOutput) @@ -1981,29 +2201,33 @@ func TestGetSourceDiskSize(t *testing.T) { { name: "successful search: depth 2", testFunc: func(t *testing.T) { - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) diskSizeGB1 := int32(16) diskSizeGB2 := int32(8) sourceURI := "/subscriptions/xxxxxxxx/resourcegroups/test-rg/providers/microsoft.compute/disks/test-disk-1" - creationData := compute.CreationData{ - CreateOption: "Copy", + creationData := armcompute.CreationData{ + CreateOption: to.Ptr(armcompute.DiskCreateOptionCopy), SourceURI: &sourceURI, } - diskProperties1 := compute.DiskProperties{ + diskProperties1 := armcompute.DiskProperties{ CreationData: &creationData, DiskSizeGB: &diskSizeGB1, } - diskProperties2 := compute.DiskProperties{ + diskProperties2 := armcompute.DiskProperties{ DiskSizeGB: &diskSizeGB2, } - disk1 := compute.Disk{ - DiskProperties: &diskProperties1, + disk1 := &armcompute.Disk{ + Properties: &diskProperties1, } - disk2 := compute.Disk{ - DiskProperties: &diskProperties2, + disk2 := &armcompute.Disk{ + Properties: &diskProperties2, } - d.getCloud().DisksClient.(*mockdiskclient.MockInterface).EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(disk1, nil).Return(disk2, nil).AnyTimes() - size, _ := d.GetSourceDiskSize(context.Background(), "", "test-rg", "test-disk-1", 0, 2) + diskClient := mock_diskclient.NewMockInterface(cntl) + d.getClientFactory().(*mock_azclient.MockClientFactory).EXPECT().GetDiskClientForSub(gomock.Any()).Return(diskClient, nil).AnyTimes() + diskClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(disk1, nil).Return(disk2, nil).AnyTimes() + size, _, _ := d.GetSourceDiskSize(context.Background(), "", "test-rg", "test-disk-1", 0, 2) expectedOutput := diskSizeGB2 if *size != expectedOutput { t.Errorf("actualOutput: (%v), expectedOutput: (%v)", *size, expectedOutput) @@ -2016,10 +2240,10 @@ func TestGetSourceDiskSize(t *testing.T) { } } -func getFakeDriverWithKubeClient(t *testing.T) FakeDriver { - d, _ := NewFakeDriver(t) - ctrl := gomock.NewController(t) - defer ctrl.Finish() +func getFakeDriverWithKubeClient(ctrl *gomock.Controller) FakeDriver { + + d, _ := NewFakeDriver(ctrl) + corev1 := mockcorev1.NewMockInterface(ctrl) persistentvolume := mockpersistentvolume.NewMockInterface(ctrl) d.getCloud().KubeClient = mockkubeclient.NewMockInterface(ctrl) @@ -2027,42 +2251,3 @@ func getFakeDriverWithKubeClient(t *testing.T) FakeDriver { d.getCloud().KubeClient.CoreV1().(*mockcorev1.MockInterface).EXPECT().PersistentVolumes().Return(persistentvolume).AnyTimes() return d } - -func TestIsAsyncAttachEnabled(t *testing.T) { - tests := []struct { - name string - defaultValue bool - volumeContext map[string]string - expected bool - }{ - { - name: "nil volumeContext", - defaultValue: false, - expected: false, - }, - { - name: "empty volumeContext", - defaultValue: true, - volumeContext: map[string]string{}, - expected: true, - }, - { - name: "false value in volumeContext", - defaultValue: true, - volumeContext: map[string]string{"enableAsyncAttach": "false"}, - expected: false, - }, - { - name: "trie value in volumeContext", - defaultValue: false, - volumeContext: map[string]string{"enableasyncattach": "true"}, - expected: true, - }, - } - for _, test := range tests { - result := isAsyncAttachEnabled(test.defaultValue, test.volumeContext) - if result != test.expected { - t.Errorf("test(%s): result(%v) != expected result(%v)", test.name, result, test.expected) - } - } -} diff --git a/pkg/azuredisk/controllerserver_v2.go b/pkg/azuredisk/controllerserver_v2.go index 3f87ec594..862560ba8 100644 --- a/pkg/azuredisk/controllerserver_v2.go +++ b/pkg/azuredisk/controllerserver_v2.go @@ -27,7 +27,8 @@ import ( "strconv" "strings" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "github.com/container-storage-interface/spec/lib/go/csi" "google.golang.org/grpc/codes" @@ -70,8 +71,8 @@ func (d *DriverV2) CreateVolume(ctx context.Context, req *csi.CreateVolumeReques return nil, status.Error(codes.InvalidArgument, "CreateVolume Volume capabilities must be provided") } - if !azureutils.IsValidVolumeCapabilities(volCaps, diskParams.MaxShares) { - return nil, status.Error(codes.InvalidArgument, "Volume capability not supported") + if err := azureutils.IsValidVolumeCapabilities(volCaps, diskParams.MaxShares); err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) } isAdvancedPerfProfile := strings.EqualFold(diskParams.PerfProfile, consts.PerfProfileAdvanced) // If perfProfile is set to advanced and no/invalid device settings are provided, fail the request @@ -89,7 +90,13 @@ func (d *DriverV2) CreateVolume(ctx context.Context, req *csi.CreateVolumeReques capacityBytes := req.GetCapacityRange().GetRequiredBytes() volSizeBytes := int64(capacityBytes) requestGiB := int(volumehelper.RoundUpGiB(volSizeBytes)) + + if diskParams.PerformancePlus != nil && *diskParams.PerformancePlus && requestGiB < consts.PerformancePlusMinimumDiskSizeGiB { + klog.Warningf("using PerformancePlus, increasing requested disk size from %vGiB to %vGiB (minimal size for PerformancePlus feature)", requestGiB, consts.PerformancePlusMinimumDiskSizeGiB) + requestGiB = consts.PerformancePlusMinimumDiskSizeGiB + } if requestGiB < consts.MinimumDiskSizeGiB { + klog.Infof("increasing requested disk size from %vGiB to %vGiB (minimal disk size)", requestGiB, consts.MinimumDiskSizeGiB) requestGiB = consts.MinimumDiskSizeGiB } @@ -122,7 +129,7 @@ func (d *DriverV2) CreateVolume(ctx context.Context, req *csi.CreateVolumeReques if _, err := azureutils.NormalizeCachingMode(diskParams.CachingMode); err != nil { return nil, status.Error(codes.InvalidArgument, err.Error()) } - if skuName == compute.PremiumV2LRS { + if skuName == armcompute.DiskStorageAccountTypesPremiumV2LRS { // PremiumV2LRS only supports None caching mode azureutils.SetKeyValueInMap(diskParams.VolumeContext, consts.CachingModeField, string(v1.AzureDataDiskCachingNone)) } @@ -183,14 +190,14 @@ func (d *DriverV2) CreateVolume(ctx context.Context, req *csi.CreateVolumeReques } subsID := azureutils.GetSubscriptionIDFromURI(sourceID) - if sourceGiB, _ := d.GetSourceDiskSize(ctx, subsID, diskParams.ResourceGroup, path.Base(sourceID), 0, consts.SourceDiskSearchMaxDepth); sourceGiB != nil && *sourceGiB < int32(requestGiB) { + if sourceGiB, _, _ := d.GetSourceDiskSize(ctx, subsID, diskParams.ResourceGroup, path.Base(sourceID), 0, consts.SourceDiskSearchMaxDepth); sourceGiB != nil && *sourceGiB < int32(requestGiB) { diskParams.VolumeContext[consts.ResizeRequired] = strconv.FormatBool(true) } } } diskParams.VolumeContext[consts.RequestedSizeGib] = strconv.Itoa(requestGiB) - volumeOptions := &azure.ManagedDiskOptions{ + volumeOptions := &ManagedDiskOptions{ AvailabilityZone: selectedAvailabilityZone, BurstingEnabled: diskParams.EnableBursting, DiskEncryptionSetID: diskParams.DiskEncryptionSetID, @@ -225,7 +232,7 @@ func (d *DriverV2) CreateVolume(ctx context.Context, req *csi.CreateVolumeReques mc.ObserveOperationWithResult(isOperationSucceeded, consts.VolumeID, diskURI) }() - diskURI, err = d.cloud.CreateManagedDisk(ctx, volumeOptions) + diskURI, err = d.diskController.CreateManagedDisk(ctx, volumeOptions) if err != nil { if strings.Contains(err.Error(), consts.NotFound) { return nil, status.Error(codes.NotFound, err.Error()) @@ -280,7 +287,7 @@ func (d *DriverV2) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeReques }() klog.V(2).Infof("deleting azure disk(%s)", diskURI) - err := d.cloud.DeleteManagedDisk(ctx, diskURI) + err := d.diskController.DeleteManagedDisk(ctx, diskURI) klog.V(2).Infof("delete azure disk(%s) returned with %v", diskURI, err) isOperationSucceeded = (err == nil) return &csi.DeleteVolumeResponse{}, err @@ -291,6 +298,11 @@ func (d *DriverV2) ControllerGetVolume(context.Context, *csi.ControllerGetVolume return nil, status.Error(codes.Unimplemented, "") } +// ControllerModifyVolume modify volume +func (d *DriverV2) ControllerModifyVolume(context.Context, *csi.ControllerModifyVolumeRequest) (*csi.ControllerModifyVolumeResponse, error) { + return nil, status.Error(codes.Unimplemented, "") +} + // ControllerPublishVolume attach an azure disk to a required node func (d *DriverV2) ControllerPublishVolume(ctx context.Context, req *csi.ControllerPublishVolumeRequest) (*csi.ControllerPublishVolumeResponse, error) { diskURI := req.GetVolumeId() @@ -309,8 +321,8 @@ func (d *DriverV2) ControllerPublishVolume(ctx context.Context, req *csi.Control return nil, status.Error(codes.InvalidArgument, "MaxShares value not supported") } - if !azureutils.IsValidVolumeCapabilities(caps, maxShares) { - return nil, status.Error(codes.InvalidArgument, "Volume capability not supported") + if err := azureutils.IsValidVolumeCapabilities(caps, maxShares); err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) } disk, err := d.checkDiskExists(ctx, diskURI) @@ -335,7 +347,7 @@ func (d *DriverV2) ControllerPublishVolume(ctx context.Context, req *csi.Control mc.ObserveOperationWithResult(isOperationSucceeded, consts.VolumeID, diskURI, consts.Node, string(nodeName)) }() - lun, vmState, err := d.cloud.GetDiskLun(diskName, diskURI, nodeName) + lun, vmState, err := d.diskController.GetDiskLun(diskName, diskURI, nodeName) if err == cloudprovider.InstanceNotFound { return nil, status.Error(codes.NotFound, fmt.Sprintf("failed to get azure instance id for node %q (%v)", nodeName, err)) } @@ -350,34 +362,33 @@ func (d *DriverV2) ControllerPublishVolume(ctx context.Context, req *csi.Control if err == nil { if vmState != nil && strings.ToLower(*vmState) == "failed" { klog.Warningf("VM(%s) is in failed state, update VM first", nodeName) - if err := d.cloud.UpdateVM(ctx, nodeName); err != nil { + if err := d.diskController.UpdateVM(ctx, nodeName); err != nil { return nil, status.Errorf(codes.Internal, "update instance %q failed with %v", nodeName, err) } } // Volume is already attached to node. klog.V(2).Infof("Attach operation is successful. volume %s is already attached to node %s at lun %d.", diskURI, nodeName, lun) } else { - if strings.Contains(strings.ToLower(err.Error()), strings.ToLower(consts.TooManyRequests)) || - strings.Contains(strings.ToLower(err.Error()), consts.ClientThrottled) { + if azureutils.IsThrottlingError(err) { return nil, status.Errorf(codes.Internal, err.Error()) } - var cachingMode compute.CachingTypes + var cachingMode armcompute.CachingTypes if cachingMode, err = azureutils.GetCachingMode(volumeContext); err != nil { return nil, status.Errorf(codes.Internal, err.Error()) } klog.V(2).Infof("Trying to attach volume %s to node %s", diskURI, nodeName) - lun, err = d.cloud.AttachDisk(ctx, true, diskName, diskURI, nodeName, cachingMode, disk) + lun, err = d.diskController.AttachDisk(ctx, diskName, diskURI, nodeName, cachingMode, disk, nil) if err == nil { klog.V(2).Infof("Attach operation successful: volume %s attached to node %s.", diskURI, nodeName) } else { if derr, ok := err.(*volerr.DanglingAttachError); ok { klog.Warningf("volume %s is already attached to node %s, try detach first", diskURI, derr.CurrentNode) - if err = d.cloud.DetachDisk(ctx, diskName, diskURI, derr.CurrentNode); err != nil { + if err = d.diskController.DetachDisk(ctx, diskName, diskURI, derr.CurrentNode); err != nil { return nil, status.Errorf(codes.Internal, "Could not detach volume %s from node %s: %v", diskURI, derr.CurrentNode, err) } klog.V(2).Infof("Trying to attach volume %s to node %s again", diskURI, nodeName) - lun, err = d.cloud.AttachDisk(ctx, true, diskName, diskURI, nodeName, cachingMode, disk) + lun, err = d.diskController.AttachDisk(ctx, diskName, diskURI, nodeName, cachingMode, disk, nil) } if err != nil { klog.Errorf("Attach volume %s to instance %s failed with %v", diskURI, nodeName, err) @@ -424,7 +435,7 @@ func (d *DriverV2) ControllerUnpublishVolume(ctx context.Context, req *csi.Contr klog.V(2).Infof("Trying to detach volume %s from node %s", diskURI, nodeID) - if err := d.cloud.DetachDisk(ctx, diskName, diskURI, nodeName); err != nil { + if err := d.diskController.DetachDisk(ctx, diskName, diskURI, nodeName); err != nil { if strings.Contains(err.Error(), consts.ErrDiskNotFound) { klog.Warningf("volume %s already detached from node %s", diskURI, nodeID) } else { @@ -454,8 +465,8 @@ func (d *DriverV2) ValidateVolumeCapabilities(ctx context.Context, req *csi.Vali return nil, status.Error(codes.InvalidArgument, "MaxShares value not supported") } - if !azureutils.IsValidVolumeCapabilities(volumeCapabilities, maxShares) { - return &csi.ValidateVolumeCapabilitiesResponse{Message: "VolumeCapabilities are invalid"}, nil + if err := azureutils.IsValidVolumeCapabilities(volumeCapabilities, maxShares); err != nil { + return &csi.ValidateVolumeCapabilitiesResponse{Message: err.Error()}, nil } if _, err := d.checkDiskExists(ctx, diskURI); err != nil { @@ -611,9 +622,10 @@ func (d *DriverV2) listVolumesInNodeResourceGroup(ctx context.Context, start, ma // listVolumesByResourceGroup is a helper function that updates the ListVolumeResponse_Entry slice and returns number of total visited volumes, number of volumes that needs to be visited and an error if found func (d *DriverV2) listVolumesByResourceGroup(ctx context.Context, resourceGroup string, entries []*csi.ListVolumesResponse_Entry, start, maxEntries int, volSet map[string]bool) listVolumeStatus { - disks, derr := d.cloud.DisksClient.ListByResourceGroup(ctx, "", resourceGroup) + diskClient := d.clientFactory.GetDiskClient() + disks, derr := diskClient.List(ctx, resourceGroup) if derr != nil { - return listVolumeStatus{err: status.Errorf(codes.Internal, "ListVolumes on rg(%s) failed with error: %v", resourceGroup, derr.Error())} + return listVolumeStatus{err: status.Errorf(codes.Internal, "ListVolumes on rg(%s) failed with error: %s", resourceGroup, derr.Error())} } // if volSet is initialized but is empty, return if volSet != nil && len(volSet) == 0 { @@ -647,7 +659,7 @@ func (d *DriverV2) listVolumesByResourceGroup(ctx context.Context, resourceGroup continue } // HyperVGeneration property is only setup for os disks. Only the non os disks should be included in the list - if disk.DiskProperties == nil || disk.DiskProperties.HyperVGeneration == "" { + if disk.Properties == nil || disk.Properties.HyperVGeneration == nil || *disk.Properties.HyperVGeneration == "" { nodeList := []string{} if disk.ManagedBy != nil { @@ -711,17 +723,21 @@ func (d *DriverV2) ControllerExpandVolume(ctx context.Context, req *csi.Controll }() subsID := azureutils.GetSubscriptionIDFromURI(diskURI) - result, rerr := d.cloud.DisksClient.Get(ctx, subsID, resourceGroup, diskName) - if rerr != nil { - return nil, status.Errorf(codes.Internal, "could not get the disk(%s) under rg(%s) with error(%v)", diskName, resourceGroup, rerr.Error()) + diskClient, err := d.clientFactory.GetDiskClientForSub(subsID) + if err != nil { + return nil, status.Errorf(codes.Internal, "could not get disk client for subscription(%s) with error(%v)", subsID, err) + } + result, err := diskClient.Get(ctx, resourceGroup, diskName) + if err != nil { + return nil, status.Errorf(codes.Internal, "could not get the disk(%s) under rg(%s) with error(%v)", diskName, resourceGroup, err) } - if result.DiskProperties.DiskSizeGB == nil { + if result.Properties.DiskSizeGB == nil { return nil, status.Errorf(codes.Internal, "could not get size of the disk(%s)", diskName) } - oldSize := *resource.NewQuantity(int64(*result.DiskProperties.DiskSizeGB), resource.BinarySI) + oldSize := *resource.NewQuantity(int64(*result.Properties.DiskSizeGB), resource.BinarySI) klog.V(2).Infof("begin to expand azure disk(%s) with new size(%d)", diskURI, requestSize.Value()) - newSize, err := d.cloud.ResizeDisk(ctx, diskURI, oldSize, requestSize, d.enableDiskOnlineResize) + newSize, err := d.diskController.ResizeDisk(ctx, diskURI, oldSize, requestSize, d.enableDiskOnlineResize) if err != nil { return nil, status.Errorf(codes.Internal, "failed to resize disk(%s) with error(%v)", diskURI, err) } @@ -801,10 +817,10 @@ func (d *DriverV2) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotRe tags[k] = &value } - snapshot := compute.Snapshot{ - SnapshotProperties: &compute.SnapshotProperties{ - CreationData: &compute.CreationData{ - CreateOption: compute.Copy, + snapshot := armcompute.Snapshot{ + Properties: &armcompute.SnapshotProperties{ + CreationData: &armcompute.CreationData{ + CreateOption: to.Ptr(armcompute.DiskCreateOptionCopy), SourceResourceID: &sourceVolumeID, }, Incremental: &incremental, @@ -816,7 +832,7 @@ func (d *DriverV2) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotRe if err := azureutils.ValidateDataAccessAuthMode(dataAccessAuthMode); err != nil { return nil, status.Error(codes.InvalidArgument, err.Error()) } - snapshot.SnapshotProperties.DataAccessAuthMode = compute.DataAccessAuthMode(dataAccessAuthMode) + snapshot.Properties.DataAccessAuthMode = to.Ptr(armcompute.DataAccessAuthMode(dataAccessAuthMode)) } mc := metrics.NewMetricContext(consts.AzureDiskCSIDriverName, "controller_create_snapshot", d.cloud.ResourceGroup, d.cloud.SubscriptionID, d.Name) @@ -826,13 +842,19 @@ func (d *DriverV2) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotRe }() klog.V(2).Infof("begin to create snapshot(%s, incremental: %v) under rg(%s)", snapshotName, incremental, resourceGroup) - if rerr := d.cloud.SnapshotsClient.CreateOrUpdate(ctx, subsID, resourceGroup, snapshotName, snapshot); rerr != nil { - if strings.Contains(rerr.Error().Error(), "existing disk") { - return nil, status.Error(codes.AlreadyExists, fmt.Sprintf("request snapshot(%s) under rg(%s) already exists, but the SourceVolumeId is different, error details: %v", snapshotName, resourceGroup, rerr.Error())) + snapshotClient, err := d.clientFactory.GetSnapshotClientForSub(subsID) + if err != nil { + return nil, status.Errorf(codes.Internal, "could not get snapshot client for subscription(%s) with error(%v)", subsID, err) + } + if _, err := snapshotClient.CreateOrUpdate(ctx, resourceGroup, snapshotName, snapshot); err != nil { + if strings.Contains(err.Error(), "existing disk") { + return nil, status.Error(codes.AlreadyExists, fmt.Sprintf("request snapshot(%s) under rg(%s) already exists, but the SourceVolumeId is different, error details: %v", snapshotName, resourceGroup, err)) } - - azureutils.SleepIfThrottled(rerr.Error(), consts.SnapshotOpThrottlingSleepSec) - return nil, status.Error(codes.Internal, fmt.Sprintf("create snapshot error: %v", rerr.Error())) + azureutils.SleepIfThrottled(err, consts.SnapshotOpThrottlingSleepSec) + return nil, status.Error(codes.Internal, fmt.Sprintf("create snapshot error: %v", err)) + } + if err := d.waitForSnapshotReady(ctx, subsID, resourceGroup, snapshotName, waitForSnapshotReadyInterval, waitForSnapshotReadyTimeout); err != nil { + return nil, status.Error(codes.Internal, fmt.Sprintf("waitForSnapshotReady(%s, %s, %s) failed with %v", subsID, resourceGroup, snapshotName, err)) } klog.V(2).Infof("create snapshot(%s) under rg(%s) successfully", snapshotName, resourceGroup) @@ -874,10 +896,14 @@ func (d *DriverV2) DeleteSnapshot(ctx context.Context, req *csi.DeleteSnapshotRe }() klog.V(2).Infof("begin to delete snapshot(%s) under rg(%s)", snapshotName, resourceGroup) - rerr := d.cloud.SnapshotsClient.Delete(ctx, subsID, resourceGroup, snapshotName) - if rerr != nil { - azureutils.SleepIfThrottled(rerr.Error(), consts.SnapshotOpThrottlingSleepSec) - return nil, status.Error(codes.Internal, fmt.Sprintf("delete snapshot error: %v", rerr.Error())) + snapshotClient, err := d.clientFactory.GetSnapshotClientForSub(subsID) + if err != nil { + return nil, status.Errorf(codes.Internal, "could not get snapshot client for subscription(%s) with error(%v)", subsID, err) + } + err = snapshotClient.Delete(ctx, resourceGroup, snapshotName) + if err != nil { + azureutils.SleepIfThrottled(err, consts.SnapshotOpThrottlingSleepSec) + return nil, status.Error(codes.Internal, fmt.Sprintf("delete snapshot error: %v", err)) } klog.V(2).Infof("delete snapshot(%s) under rg(%s) successfully", snapshotName, resourceGroup) isOperationSucceeded = true @@ -905,9 +931,9 @@ func (d *DriverV2) ListSnapshots(ctx context.Context, req *csi.ListSnapshotsRequ } return listSnapshotResp, nil } - + snapshotClient := d.clientFactory.GetSnapshotClient() // no SnapshotId is set, return all snapshots that satisfy the request. - snapshots, err := d.cloud.SnapshotsClient.ListByResourceGroup(ctx, "", d.cloud.ResourceGroup) + snapshots, err := snapshotClient.List(ctx, d.cloud.ResourceGroup) if err != nil { return nil, status.Error(codes.Internal, fmt.Sprintf("Unknown list snapshot error: %v", err.Error())) } @@ -924,40 +950,47 @@ func (d *DriverV2) getSnapshotByID(ctx context.Context, subsID, resourceGroup, s return nil, status.Errorf(codes.Internal, err.Error()) } } - - snapshot, rerr := d.cloud.SnapshotsClient.Get(ctx, subsID, resourceGroup, snapshotName) + snapshotClient, err := d.clientFactory.GetSnapshotClientForSub(subsID) + if err != nil { + return nil, status.Errorf(codes.Internal, "could not get snapshot client for subscription(%s) with error(%v)", subsID, err) + } + snapshot, rerr := snapshotClient.Get(ctx, resourceGroup, snapshotName) if rerr != nil { return nil, status.Error(codes.Internal, fmt.Sprintf("get snapshot %s from rg(%s) error: %v", snapshotName, resourceGroup, rerr.Error())) } - return azureutils.GenerateCSISnapshot(sourceVolumeID, &snapshot) + return azureutils.GenerateCSISnapshot(sourceVolumeID, snapshot) } // GetSourceDiskSize recursively searches for the sourceDisk and returns: sourceDisk disk size, error -func (d *DriverV2) GetSourceDiskSize(ctx context.Context, subsID, resourceGroup, diskName string, curDepth, maxDepth int) (*int32, error) { +func (d *DriverV2) GetSourceDiskSize(ctx context.Context, subsID, resourceGroup, diskName string, curDepth, maxDepth int) (*int32, *armcompute.Disk, error) { if curDepth > maxDepth { - return nil, status.Error(codes.Internal, fmt.Sprintf("current depth (%d) surpassed the max depth (%d) while searching for the source disk size", curDepth, maxDepth)) + return nil, nil, status.Error(codes.Internal, fmt.Sprintf("current depth (%d) surpassed the max depth (%d) while searching for the source disk size", curDepth, maxDepth)) } - result, rerr := d.cloud.DisksClient.Get(ctx, subsID, resourceGroup, diskName) - if rerr != nil { - return nil, rerr.Error() + diskClient, err := d.clientFactory.GetDiskClientForSub(subsID) + if err != nil { + return nil, nil, status.Error(codes.Internal, err.Error()) + } + result, err := diskClient.Get(ctx, resourceGroup, diskName) + if err != nil { + return nil, result, err } - if result.DiskProperties == nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("DiskProperty not found for disk (%s) in resource group (%s)", diskName, resourceGroup)) + if result.Properties == nil { + return nil, result, status.Error(codes.Internal, fmt.Sprintf("DiskProperty not found for disk (%s) in resource group (%s)", diskName, resourceGroup)) } - if result.DiskProperties.CreationData != nil && (*result.DiskProperties.CreationData).CreateOption == "Copy" { + if result.Properties.CreationData != nil && result.Properties.CreationData.CreateOption != nil && *result.Properties.CreationData.CreateOption == armcompute.DiskCreateOptionCopy { klog.V(2).Infof("Clone source disk has a parent source") - sourceResourceID := *result.DiskProperties.CreationData.SourceResourceID + sourceResourceID := *result.Properties.CreationData.SourceResourceID parentResourceGroup, _ := azureutils.GetResourceGroupFromURI(sourceResourceID) parentDiskName := path.Base(sourceResourceID) return d.GetSourceDiskSize(ctx, subsID, parentResourceGroup, parentDiskName, curDepth+1, maxDepth) } - if (*result.DiskProperties).DiskSizeGB == nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("DiskSizeGB for disk (%s) in resourcegroup (%s) is nil", diskName, resourceGroup)) + if (*result.Properties).DiskSizeGB == nil { + return nil, result, status.Error(codes.Internal, fmt.Sprintf("DiskSizeGB for disk (%s) in resourcegroup (%s) is nil", diskName, resourceGroup)) } - return (*result.DiskProperties).DiskSizeGB, nil + return (*result.Properties).DiskSizeGB, result, nil } // The format of snapshot id is /subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Compute/snapshots/snapshot-xxx-xxx. diff --git a/pkg/azuredisk/fake_azuredisk.go b/pkg/azuredisk/fake_azuredisk.go index 5a9b1a84c..2322caf83 100644 --- a/pkg/azuredisk/fake_azuredisk.go +++ b/pkg/azuredisk/fake_azuredisk.go @@ -36,13 +36,13 @@ package azuredisk import ( "context" - "testing" "time" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "github.com/container-storage-interface/spec/lib/go/csi" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/assert" + "go.uber.org/mock/gomock" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/fake" "k8s.io/mount-utils" testingexec "k8s.io/utils/exec/testing" @@ -52,6 +52,7 @@ import ( "sigs.k8s.io/azuredisk-csi-driver/pkg/optimization" "sigs.k8s.io/azuredisk-csi-driver/pkg/optimization/mockoptimization" volumehelper "sigs.k8s.io/azuredisk-csi-driver/pkg/util" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient" azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" azure "sigs.k8s.io/cloud-provider-azure/pkg/provider" ) @@ -104,7 +105,7 @@ func (s *fakeCryptMapper) GetDevicePath(volumeID string) (string, error) { type FakeDriver interface { CSIDriver - GetSourceDiskSize(ctx context.Context, subsID, resourceGroup, diskName string, curDepth, maxDepth int) (*int32, error) + GetSourceDiskSize(ctx context.Context, subsID, resourceGroup, diskName string, curDepth, maxDepth int) (*int32, *armcompute.Disk, error) setNextCommandOutputScripts(scripts ...testingexec.FakeAction) @@ -116,6 +117,7 @@ type FakeDriver interface { setVersion(version string) getCloud() *azure.Cloud setCloud(*azure.Cloud) + getClientFactory() azclient.ClientFactory getMounter() *mount.SafeFormatAndMount setMounter(*mount.SafeFormatAndMount) setPerfOptimizationEnabled(bool) @@ -123,14 +125,16 @@ type FakeDriver interface { getHostUtil() hostUtil checkDiskCapacity(context.Context, string, string, string, int) (bool, error) - checkDiskExists(ctx context.Context, diskURI string) (*compute.Disk, error) + checkDiskExists(ctx context.Context, diskURI string) (*armcompute.Disk, error) getSnapshotInfo(string) (string, string, string, error) - waitForSnapshotCopy(context.Context, string, string, string, time.Duration, time.Duration) error + waitForSnapshotReady(context.Context, string, string, string, time.Duration, time.Duration) error getSnapshotByID(context.Context, string, string, string, string) (*csi.Snapshot, error) ensureMountPoint(string) (bool, error) ensureBlockTargetFile(string) error getDevicePathWithLUN(lunStr string) (string, error) - setDiskThrottlingCache(key string, value string) + setThrottlingCache(key string, value string) + getUsedLunsFromVolumeAttachments(context.Context, string) ([]int, error) + getUsedLunsFromNode(nodeName types.NodeName) ([]int, error) setDiskDeviceName(string) } @@ -139,7 +143,7 @@ type fakeDriverV1 struct { Driver } -func newFakeDriverV1(t *testing.T) (*fakeDriverV1, error) { +func newFakeDriverV1(ctrl *gomock.Controller) (*fakeDriverV1, error) { driver := fakeDriverV1{} driver.Name = fakeDriverName driver.Version = fakeDriverVersion @@ -152,13 +156,18 @@ func newFakeDriverV1(t *testing.T) (*fakeDriverV1, error) { driver.hostUtil = azureutils.NewFakeHostUtil() driver.useCSIProxyGAInterface = true driver.allowEmptyCloudConfig = true + driver.shouldWaitForSnapshotReady = true + driver.endpoint = "tcp://127.0.0.1:0" + driver.disableAVSetNodes = true + driver.kubeClient = fake.NewSimpleClientset() + driver.getVolumeName = func(s string) (string, error) { return s, nil } driver.cryptMapper = &fakeCryptMapper{} - ctrl := gomock.NewController(t) - defer ctrl.Finish() - driver.cloud = azure.GetTestCloud(ctrl) + driver.diskController = NewManagedDiskController(driver.cloud) + driver.clientFactory = driver.cloud.ComputeClientFactory + mounter, err := mounter.NewSafeMounter(driver.enableWindowsHostProcess, driver.useCSIProxyGAInterface) if err != nil { return nil, err @@ -172,7 +181,8 @@ func newFakeDriverV1(t *testing.T) (*fakeDriverV1, error) { if err != nil { return nil, err } - driver.getDiskThrottlingCache = cache + driver.throttlingCache = cache + driver.checkDiskLunThrottlingCache = cache driver.deviceHelper = mockoptimization.NewMockInterface(ctrl) driver.AddControllerServiceCapabilities( @@ -192,11 +202,6 @@ func newFakeDriverV1(t *testing.T) (*fakeDriverV1, error) { csi.NodeServiceCapability_RPC_EXPAND_VOLUME, }) - nodeInfo := driver.getNodeInfo() - assert.NotEqual(t, nil, nodeInfo) - dh := driver.getDeviceHelper() - assert.NotEqual(t, nil, dh) - return &driver, nil } @@ -204,8 +209,11 @@ func (d *fakeDriverV1) setNextCommandOutputScripts(scripts ...testingexec.FakeAc d.mounter.Exec.(*mounter.FakeSafeMounter).SetNextCommandOutputScripts(scripts...) } -func (d *fakeDriverV1) setDiskThrottlingCache(key string, value string) { - d.getDiskThrottlingCache.Set(key, value) +func (d *fakeDriverV1) setThrottlingCache(key string, value string) { + d.throttlingCache.Set(key, value) +} +func (d *fakeDriverV1) getClientFactory() azclient.ClientFactory { + return d.clientFactory } func (d *fakeDriverV1) setDiskDeviceName(name string) { diff --git a/pkg/azuredisk/fake_azuredisk_test.go b/pkg/azuredisk/fake_azuredisk_test.go index 9e0f75877..8182b2085 100644 --- a/pkg/azuredisk/fake_azuredisk_test.go +++ b/pkg/azuredisk/fake_azuredisk_test.go @@ -20,10 +20,13 @@ import ( "testing" "github.com/stretchr/testify/assert" + "go.uber.org/mock/gomock" ) func TestNewFakeDriver(t *testing.T) { - d, err := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, err := NewFakeDriver(cntl) assert.NotNil(t, d) assert.Nil(t, err) } diff --git a/pkg/azuredisk/fake_azuredisk_v1.go b/pkg/azuredisk/fake_azuredisk_v1.go index 3237afd0c..a3fcdea39 100644 --- a/pkg/azuredisk/fake_azuredisk_v1.go +++ b/pkg/azuredisk/fake_azuredisk_v1.go @@ -19,9 +19,11 @@ limitations under the License. package azuredisk -import "testing" +import ( + "go.uber.org/mock/gomock" +) // NewFakeDriver returns a driver implementation suitable for use in unit tests. -func NewFakeDriver(t *testing.T) (FakeDriver, error) { +func NewFakeDriver(t *gomock.Controller) (FakeDriver, error) { return newFakeDriverV1(t) } diff --git a/pkg/azuredisk/fake_azuredisk_v2.go b/pkg/azuredisk/fake_azuredisk_v2.go index dc50f511e..4facb5a42 100644 --- a/pkg/azuredisk/fake_azuredisk_v2.go +++ b/pkg/azuredisk/fake_azuredisk_v2.go @@ -20,11 +20,9 @@ limitations under the License. package azuredisk import ( - "testing" - "github.com/container-storage-interface/spec/lib/go/csi" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/assert" + "go.uber.org/mock/gomock" + "k8s.io/client-go/kubernetes/fake" "k8s.io/klog/v2" testingexec "k8s.io/utils/exec/testing" "sigs.k8s.io/azuredisk-csi-driver/pkg/azureutils" @@ -32,6 +30,7 @@ import ( "sigs.k8s.io/azuredisk-csi-driver/pkg/mounter" "sigs.k8s.io/azuredisk-csi-driver/pkg/optimization/mockoptimization" volumehelper "sigs.k8s.io/azuredisk-csi-driver/pkg/util" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient" azure "sigs.k8s.io/cloud-provider-azure/pkg/provider" ) @@ -40,20 +39,20 @@ type fakeDriverV2 struct { } // NewFakeDriver returns a driver implementation suitable for use in unit tests. -func NewFakeDriver(t *testing.T) (FakeDriver, error) { +func NewFakeDriver(ctrl *gomock.Controller) (FakeDriver, error) { var d FakeDriver var err error if !*useDriverV2 { - d, err = newFakeDriverV1(t) + d, err = newFakeDriverV1(ctrl) } else { - d, err = newFakeDriverV2(t) + d, err = newFakeDriverV2(ctrl) } return d, err } -func newFakeDriverV2(t *testing.T) (*fakeDriverV2, error) { +func newFakeDriverV2(ctrl *gomock.Controller) (*fakeDriverV2, error) { klog.Warning("Using DriverV2") driver := fakeDriverV2{} driver.Name = fakeDriverName @@ -67,11 +66,14 @@ func newFakeDriverV2(t *testing.T) (*fakeDriverV2, error) { driver.hostUtil = azureutils.NewFakeHostUtil() driver.useCSIProxyGAInterface = true driver.allowEmptyCloudConfig = true - - ctrl := gomock.NewController(t) - defer ctrl.Finish() + driver.endpoint = "tcp://127.0.0.1:0" + driver.disableAVSetNodes = true + driver.kubeClient = fake.NewSimpleClientset() driver.cloud = azure.GetTestCloud(ctrl) + driver.diskController = NewManagedDiskController(driver.cloud) + driver.clientFactory = driver.cloud.ComputeClientFactory + mounter, err := mounter.NewSafeMounter(driver.enableWindowsHostProcess, driver.useCSIProxyGAInterface) if err != nil { return nil, err @@ -98,17 +100,15 @@ func newFakeDriverV2(t *testing.T) (*fakeDriverV2, error) { csi.NodeServiceCapability_RPC_EXPAND_VOLUME, }) - nodeInfo := driver.getNodeInfo() - assert.NotEqual(t, nil, nodeInfo) - dh := driver.getDeviceHelper() - assert.NotEqual(t, nil, dh) - return &driver, nil } func (d *fakeDriverV2) setNextCommandOutputScripts(scripts ...testingexec.FakeAction) { d.mounter.Exec.(*mounter.FakeSafeMounter).SetNextCommandOutputScripts(scripts...) } +func (d *fakeDriverV2) getClientFactory() azclient.ClientFactory { + return d.clientFactory +} -func (d *DriverV2) setDiskThrottlingCache(key string, value string) { +func (d *DriverV2) setThrottlingCache(key string, value string) { } diff --git a/pkg/azuredisk/identityserver.go b/pkg/azuredisk/identityserver.go index b3e2e9245..1c4b5a78e 100644 --- a/pkg/azuredisk/identityserver.go +++ b/pkg/azuredisk/identityserver.go @@ -28,7 +28,7 @@ import ( ) // GetPluginInfo return the version and name of the plugin -func (f *Driver) GetPluginInfo(ctx context.Context, req *csi.GetPluginInfoRequest) (*csi.GetPluginInfoResponse, error) { +func (f *Driver) GetPluginInfo(_ context.Context, _ *csi.GetPluginInfoRequest) (*csi.GetPluginInfoResponse, error) { if f.Name == "" { return nil, status.Error(codes.Unavailable, "Driver name not configured") } @@ -47,12 +47,12 @@ func (f *Driver) GetPluginInfo(ctx context.Context, req *csi.GetPluginInfoReques // This method does not need to return anything. // Currently the spec does not dictate what you should return either. // Hence, return an empty response -func (f *Driver) Probe(ctx context.Context, req *csi.ProbeRequest) (*csi.ProbeResponse, error) { +func (f *Driver) Probe(_ context.Context, _ *csi.ProbeRequest) (*csi.ProbeResponse, error) { return &csi.ProbeResponse{Ready: &wrappers.BoolValue{Value: true}}, nil } // GetPluginCapabilities returns the capabilities of the plugin -func (f *Driver) GetPluginCapabilities(ctx context.Context, req *csi.GetPluginCapabilitiesRequest) (*csi.GetPluginCapabilitiesResponse, error) { +func (f *Driver) GetPluginCapabilities(_ context.Context, _ *csi.GetPluginCapabilitiesRequest) (*csi.GetPluginCapabilitiesResponse, error) { capabilities := []*csi.PluginCapability{ { Type: &csi.PluginCapability_Service_{ diff --git a/pkg/azuredisk/identityserver_test.go b/pkg/azuredisk/identityserver_test.go index 6767367d1..173f4a035 100644 --- a/pkg/azuredisk/identityserver_test.go +++ b/pkg/azuredisk/identityserver_test.go @@ -22,6 +22,7 @@ import ( "github.com/container-storage-interface/spec/lib/go/csi" "github.com/stretchr/testify/assert" + "go.uber.org/mock/gomock" ) const ( @@ -31,42 +32,53 @@ const ( func TestGetPluginInfo(t *testing.T) { // Check with correct arguments - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + d, _ := NewFakeDriver(cntl) req := csi.GetPluginInfoRequest{} resp, err := d.GetPluginInfo(context.Background(), &req) assert.NoError(t, err) assert.Equal(t, resp.Name, fakeCSIDriverName) assert.Equal(t, resp.GetVendorVersion(), vendorVersion) + cntl.Finish() //Check error when driver name is empty - d, _ = NewFakeDriver(t) + cntl = gomock.NewController(t) + d, _ = NewFakeDriver(cntl) d.setName("") req = csi.GetPluginInfoRequest{} resp, err = d.GetPluginInfo(context.Background(), &req) assert.Error(t, err) assert.Nil(t, resp) + cntl.Finish() //Check error when version is empty - d, _ = NewFakeDriver(t) + cntl = gomock.NewController(t) + d, _ = NewFakeDriver(cntl) d.setVersion("") req = csi.GetPluginInfoRequest{} resp, err = d.GetPluginInfo(context.Background(), &req) assert.Error(t, err) assert.Nil(t, resp) + cntl.Finish() } func TestProbe(t *testing.T) { - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) req := csi.ProbeRequest{} resp, err := d.Probe(context.Background(), &req) assert.NoError(t, err) assert.NotNil(t, resp) assert.Equal(t, resp.XXX_sizecache, int32(0)) assert.Equal(t, resp.Ready.Value, true) + } func TestGetPluginCapabilities(t *testing.T) { - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) req := csi.GetPluginCapabilitiesRequest{} resp, err := d.GetPluginCapabilities(context.Background(), &req) assert.NoError(t, err) diff --git a/pkg/azuredisk/mockcorev1/interface.go b/pkg/azuredisk/mockcorev1/interface.go index d4000c769..7dee079e8 100644 --- a/pkg/azuredisk/mockcorev1/interface.go +++ b/pkg/azuredisk/mockcorev1/interface.go @@ -19,7 +19,7 @@ package mockcorev1 import ( reflect "reflect" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" v1 "k8s.io/client-go/kubernetes/typed/core/v1" rest "k8s.io/client-go/rest" ) diff --git a/pkg/azuredisk/mockkubeclient/interface.go b/pkg/azuredisk/mockkubeclient/interface.go index 8649e2877..bc923993c 100644 --- a/pkg/azuredisk/mockkubeclient/interface.go +++ b/pkg/azuredisk/mockkubeclient/interface.go @@ -23,7 +23,7 @@ package mockkubeclient import ( reflect "reflect" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" discovery "k8s.io/client-go/discovery" v1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1" v1alpha1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1" @@ -54,7 +54,7 @@ import ( v19 "k8s.io/client-go/kubernetes/typed/events/v1" v1beta17 "k8s.io/client-go/kubernetes/typed/events/v1beta1" v1beta18 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" - v1alpha12 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1" + flowcontrolv1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1" v1beta19 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1" v1beta20 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2" v1beta3 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3" @@ -507,18 +507,18 @@ func (mr *MockInterfaceMockRecorder) ExtensionsV1beta1() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExtensionsV1beta1", reflect.TypeOf((*MockInterface)(nil).ExtensionsV1beta1)) } -// FlowcontrolV1alpha1 mocks base method. -func (m *MockInterface) FlowcontrolV1alpha1() v1alpha12.FlowcontrolV1alpha1Interface { +// FlowcontrolV1beta1 mocks base method. +func (m *MockInterface) FlowcontrolV1() flowcontrolv1.FlowcontrolV1Interface { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FlowcontrolV1alpha1") - ret0, _ := ret[0].(v1alpha12.FlowcontrolV1alpha1Interface) + ret := m.ctrl.Call(m, "FlowcontrolV1") + ret0, _ := ret[0].(flowcontrolv1.FlowcontrolV1Interface) return ret0 } -// FlowcontrolV1alpha1 indicates an expected call of FlowcontrolV1alpha1. -func (mr *MockInterfaceMockRecorder) FlowcontrolV1alpha1() *gomock.Call { +// FlowcontrolV1 indicates an expected call of FlowcontrolV1. +func (mr *MockInterfaceMockRecorder) FlowcontrolV1() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FlowcontrolV1alpha1", reflect.TypeOf((*MockInterface)(nil).FlowcontrolV1alpha1)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FlowcontrolV1", reflect.TypeOf((*MockInterface)(nil).FlowcontrolV1)) } // FlowcontrolV1beta1 mocks base method. diff --git a/pkg/azuredisk/mockpersistentvolume/interface.go b/pkg/azuredisk/mockpersistentvolume/interface.go index ca0269216..0a217362f 100644 --- a/pkg/azuredisk/mockpersistentvolume/interface.go +++ b/pkg/azuredisk/mockpersistentvolume/interface.go @@ -20,7 +20,7 @@ import ( context "context" reflect "reflect" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" diff --git a/pkg/azuredisk/nodeserver.go b/pkg/azuredisk/nodeserver.go index 4d43f2d55..95280e7c1 100644 --- a/pkg/azuredisk/nodeserver.go +++ b/pkg/azuredisk/nodeserver.go @@ -104,8 +104,8 @@ func (d *Driver) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRe return nil, status.Error(codes.InvalidArgument, "MaxShares value not supported") } - if !azureutils.IsValidVolumeCapabilities([]*csi.VolumeCapability{volumeCapability}, maxShares) { - return nil, status.Error(codes.InvalidArgument, "Volume capability not supported") + if err := azureutils.IsValidVolumeCapabilities([]*csi.VolumeCapability{volumeCapability}, maxShares); err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) } if acquired := d.volumeLocks.TryAcquire(diskURI); !acquired { @@ -137,7 +137,7 @@ func (d *Driver) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRe return nil, status.Errorf(codes.Internal, "failed to optimize device performance for target(%s) error(%s)", source, err) } } else { - klog.V(2).Infof("NodeStageVolume: perf optimization is disabled for %s. perfProfile %s accountType %s", source, profile, accountType) + klog.V(6).Infof("NodeStageVolume: perf optimization is disabled for %s. perfProfile %s accountType %s", source, profile, accountType) } } @@ -213,7 +213,7 @@ func (d *Driver) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRe } // NodeUnstageVolume unmount disk device from a staging path -func (d *Driver) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) { +func (d *Driver) NodeUnstageVolume(_ context.Context, req *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) { volumeID := req.GetVolumeId() if len(volumeID) == 0 { return nil, status.Error(codes.InvalidArgument, "Volume ID not provided") @@ -250,7 +250,7 @@ func (d *Driver) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolu } // NodePublishVolume mount the volume from staging to target path -func (d *Driver) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) { +func (d *Driver) NodePublishVolume(_ context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) { volumeID := req.GetVolumeId() if len(volumeID) == 0 { return nil, status.Error(codes.InvalidArgument, "Volume ID missing in the request") @@ -267,8 +267,8 @@ func (d *Driver) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolu return nil, status.Error(codes.InvalidArgument, "MaxShares value not supported") } - if !azureutils.IsValidVolumeCapabilities([]*csi.VolumeCapability{volumeCapability}, maxShares) { - return nil, status.Error(codes.InvalidArgument, "Volume capability not supported") + if err := azureutils.IsValidVolumeCapabilities([]*csi.VolumeCapability{volumeCapability}, maxShares); err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) } source := req.GetStagingTargetPath() @@ -326,7 +326,7 @@ func (d *Driver) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolu } // NodeUnpublishVolume unmount the volume from the target path -func (d *Driver) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) { +func (d *Driver) NodeUnpublishVolume(_ context.Context, req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) { targetPath := req.GetTargetPath() volumeID := req.GetVolumeId() @@ -349,14 +349,14 @@ func (d *Driver) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublish } // NodeGetCapabilities return the capabilities of the Node plugin -func (d *Driver) NodeGetCapabilities(ctx context.Context, req *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) { +func (d *Driver) NodeGetCapabilities(_ context.Context, _ *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) { return &csi.NodeGetCapabilitiesResponse{ Capabilities: d.NSCap, }, nil } // NodeGetInfo return info of the node on which this plugin is running -func (d *Driver) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) { +func (d *Driver) NodeGetInfo(ctx context.Context, _ *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) { topology := &csi.Topology{ Segments: map[string]string{topologyKey: ""}, } @@ -431,7 +431,7 @@ func (d *Driver) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoRequest) ( if instanceType == "" { instanceType = instanceTypeFromLabels } - maxDataDiskCount = getMaxDataDiskCount(instanceType) + maxDataDiskCount = getMaxDataDiskCount(instanceType) - d.ReservedDataDiskSlotNum } nodeID := d.NodeID @@ -483,7 +483,10 @@ func (d *Driver) NodeGetVolumeStats(ctx context.Context, req *csi.NodeGetVolumeS return nil, status.Error(codes.InvalidArgument, "NodeGetVolumeStats volume path was empty") } - volUsage, err := GetVolumeStats(ctx, d.mounter, req.VolumePath, d.hostUtil) + volUsage, err := d.GetVolumeStats(ctx, d.mounter, req.VolumeId, req.VolumePath, d.hostUtil) + if err != nil { + klog.Errorf("NodeGetVolumeStats: failed to get volume stats for volume %s path %s: %v", req.VolumeId, req.VolumePath, err) + } return &csi.NodeGetVolumeStatsResponse{ Usage: volUsage, }, err @@ -528,7 +531,7 @@ func (d *Driver) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandVolume if isBlock { if d.enableDiskOnlineResize { - klog.V(2).Info("NodeExpandVolume begin to rescan all devices on block volume(%s)", volumeID) + klog.V(2).Infof("NodeExpandVolume begin to rescan all devices on block volume(%s)", volumeID) if err := rescanAllVolumes(d.ioHandler); err != nil { klog.Errorf("NodeExpandVolume rescanAllVolumes failed with error: %v", err) } @@ -537,7 +540,7 @@ func (d *Driver) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandVolume if _, err := d.cryptMapper.ResizeCryptDevice(ctx, diskName); err != nil { return nil, status.Errorf(codes.Internal, "resizing crypt device: %v", err) } - klog.V(2).Info("NodeExpandVolume skip resize operation on block volume(%s)", volumeID) + klog.V(2).Infof("NodeExpandVolume skip resize operation on block volume(%s)", volumeID) return &csi.NodeExpandVolumeResponse{}, nil } diff --git a/pkg/azuredisk/nodeserver_test.go b/pkg/azuredisk/nodeserver_test.go index 2ded932c6..4cb001300 100644 --- a/pkg/azuredisk/nodeserver_test.go +++ b/pkg/azuredisk/nodeserver_test.go @@ -49,9 +49,9 @@ import ( "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" "github.com/container-storage-interface/spec/lib/go/csi" - "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" testingexec "k8s.io/utils/exec/testing" @@ -117,7 +117,9 @@ func TestMain(m *testing.M) { } func TestNodeGetCapabilities(t *testing.T) { - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) capType := &csi.NodeServiceCapability_Rpc{ Rpc: &csi.NodeServiceCapability_RPC{ Type: csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME, @@ -213,14 +215,14 @@ func TestEnsureMountPoint(t *testing.T) { }, } - // Setup - _ = makeDir(alreadyExistTarget) - d, _ := NewFakeDriver(t) - fakeMounter, err := mounter.NewFakeSafeMounter() - assert.NoError(t, err) - d.setMounter(fakeMounter) - for _, test := range tests { + // Setup + cntl := gomock.NewController(t) + _ = makeDir(alreadyExistTarget) + d, _ := NewFakeDriver(cntl) + fakeMounter, err := mounter.NewFakeSafeMounter() + assert.NoError(t, err) + d.setMounter(fakeMounter) if !(runtime.GOOS == "windows" && test.skipOnWindows) && !(runtime.GOOS == "darwin" && test.skipOnDarwin) { mnt, err := d.ensureMountPoint(test.target) if !testutil.AssertError(&test.expectedErr, err) { @@ -230,13 +232,14 @@ func TestEnsureMountPoint(t *testing.T) { assert.Equal(t, test.expectedMnt, mnt) } } + // Clean up + err = os.RemoveAll(alreadyExistTarget) + assert.NoError(t, err) + err = os.RemoveAll(targetTest) + assert.NoError(t, err) + cntl.Finish() } - // Clean up - err = os.RemoveAll(alreadyExistTarget) - assert.NoError(t, err) - err = os.RemoveAll(targetTest) - assert.NoError(t, err) } func TestNodeGetInfo(t *testing.T) { @@ -301,10 +304,12 @@ func TestNodeGetInfo(t *testing.T) { for _, test := range tests { test := test t.Run(test.desc, func(t *testing.T) { + cntl := gomock.NewController(t) + defer cntl.Finish() if test.skipOnDarwin && runtime.GOOS == "darwin" { t.Skip("Skip test case on Darwin") } - d, err := NewFakeDriver(t) + d, err := NewFakeDriver(cntl) require.NoError(t, err) test.setupFunc(t, d) @@ -380,15 +385,15 @@ func TestNodeGetVolumeStats(t *testing.T) { }, } - // Setup - _ = makeDir(fakePath) - _ = makeDir(blockVolumePath) - d, _ := NewFakeDriver(t) - mounter, err := mounter.NewFakeSafeMounter() - assert.NoError(t, err) - d.setMounter(mounter) - for _, test := range tests { + cntl := gomock.NewController(t) + // Setup + _ = makeDir(fakePath) + _ = makeDir(blockVolumePath) + d, _ := NewFakeDriver(cntl) + mounter, err := mounter.NewFakeSafeMounter() + assert.NoError(t, err) + d.setMounter(mounter) if !(test.skipOnDarwin && runtime.GOOS == "darwin") && !(test.skipOnWindows && runtime.GOOS == "windows") { if test.setupFunc != nil { test.setupFunc(t, d) @@ -398,17 +403,19 @@ func TestNodeGetVolumeStats(t *testing.T) { t.Errorf("desc: %s\n actualErr: (%v), expectedErr: (%v)", test.desc, err, test.expectedErr) } } + // Clean up + err = os.RemoveAll(fakePath) + assert.NoError(t, err) + err = os.RemoveAll(blockVolumePath) + assert.NoError(t, err) + cntl.Finish() } - - // Clean up - err = os.RemoveAll(fakePath) - assert.NoError(t, err) - err = os.RemoveAll(blockVolumePath) - assert.NoError(t, err) } func TestNodeStageVolume(t *testing.T) { - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) stdVolCap := &csi.VolumeCapability_Mount{ Mount: &csi.VolumeCapability_MountVolume{ @@ -481,7 +488,7 @@ func TestNodeStageVolume(t *testing.T) { { desc: "Volume capabilities not supported", req: csi.NodeStageVolumeRequest{VolumeId: "vol_1", StagingTargetPath: sourceTest, VolumeCapability: &csi.VolumeCapability{AccessMode: &volumeCapWrong}}, - expectedErr: status.Error(codes.InvalidArgument, "Volume capability not supported"), + expectedErr: status.Error(codes.InvalidArgument, "invalid access mode: [access_mode: ]"), }, { desc: "MaxShares value not supported", @@ -676,14 +683,14 @@ func TestNodeStageVolume(t *testing.T) { }, } - // Setup - _ = makeDir(sourceTest) - _ = makeDir(targetTest) - fakeMounter, err := mounter.NewFakeSafeMounter() - assert.NoError(t, err) - d.setMounter(fakeMounter) - for _, test := range tests { + + // Setup + _ = makeDir(sourceTest) + _ = makeDir(targetTest) + fakeMounter, err := mounter.NewFakeSafeMounter() + assert.NoError(t, err) + d.setMounter(fakeMounter) if !(test.skipOnDarwin && runtime.GOOS == "darwin") && !(test.skipOnWindows && runtime.GOOS == "windows") { if test.setupFunc != nil { test.setupFunc(t, d) @@ -698,17 +705,19 @@ func TestNodeStageVolume(t *testing.T) { test.cleanupFunc(t, d) } } + // Clean up + err = os.RemoveAll(sourceTest) + assert.NoError(t, err) + err = os.RemoveAll(targetTest) + assert.NoError(t, err) } - // Clean up - err = os.RemoveAll(sourceTest) - assert.NoError(t, err) - err = os.RemoveAll(targetTest) - assert.NoError(t, err) } func TestNodeUnstageVolume(t *testing.T) { - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) errorTarget, err := testutil.GetWorkDirPath("error_is_likely_target") assert.NoError(t, err) targetFile, err := testutil.GetWorkDirPath("abc.go") @@ -795,7 +804,9 @@ func TestNodeUnstageVolume(t *testing.T) { } func TestNodePublishVolume(t *testing.T) { - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) volumeCap := csi.VolumeCapability_AccessMode{Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_SINGLE_WRITER} volumeCapWrong := csi.VolumeCapability_AccessMode{Mode: 10} @@ -864,7 +875,7 @@ func TestNodePublishVolume(t *testing.T) { VolumeId: "vol_1", }, expectedErr: testutil.TestError{ - DefaultError: status.Error(codes.InvalidArgument, "Volume capability not supported"), + DefaultError: status.Error(codes.InvalidArgument, "invalid access mode: [block:<> access_mode: ]"), }, }, { @@ -976,7 +987,8 @@ func TestNodePublishVolume(t *testing.T) { } func TestNodeUnpublishVolume(t *testing.T) { - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + d, _ := NewFakeDriver(cntl) errorTarget, err := testutil.GetWorkDirPath("error_is_likely_target") assert.NoError(t, err) targetFile, err := testutil.GetWorkDirPath("abc.go") @@ -1051,7 +1063,9 @@ func TestNodeUnpublishVolume(t *testing.T) { func TestNodeExpandVolume(t *testing.T) { t.Skip("Skipping expansion test relying on external binaries") - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) fakeMounter, err := mounter.NewFakeSafeMounter() assert.NoError(t, err) d.setMounter(fakeMounter) @@ -1238,10 +1252,12 @@ func TestNodeExpandVolume(t *testing.T) { } func TestGetBlockSizeBytes(t *testing.T) { + cntl := gomock.NewController(t) + defer cntl.Finish() if runtime.GOOS == "windows" { t.Skip("Skipping test on Windows") } - d, _ := NewFakeDriver(t) + d, _ := NewFakeDriver(cntl) testTarget, err := testutil.GetWorkDirPath("test") assert.NoError(t, err) @@ -1291,6 +1307,8 @@ func TestGetBlockSizeBytes(t *testing.T) { } func TestEnsureBlockTargetFile(t *testing.T) { + cntl := gomock.NewController(t) + defer cntl.Finish() // sip this test because `util/mount` not supported // on darwin if runtime.GOOS == "darwin" { @@ -1300,7 +1318,7 @@ func TestEnsureBlockTargetFile(t *testing.T) { assert.NoError(t, err) testPath, err := testutil.GetWorkDirPath(fmt.Sprintf("test%ctest", os.PathSeparator)) assert.NoError(t, err) - d, err := NewFakeDriver(t) + d, err := NewFakeDriver(cntl) assert.NoError(t, err) tests := []struct { @@ -1360,7 +1378,9 @@ func TestMakeDir(t *testing.T) { } func TestGetDevicePathWithLUN(t *testing.T) { - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) tests := []struct { desc string req string @@ -1381,7 +1401,9 @@ func TestGetDevicePathWithLUN(t *testing.T) { } func TestGetDevicePathWithMountPath(t *testing.T) { - d, _ := NewFakeDriver(t) + cntl := gomock.NewController(t) + defer cntl.Finish() + d, _ := NewFakeDriver(cntl) err := "exit status 1" if runtime.GOOS == "darwin" { @@ -1422,6 +1444,8 @@ func TestGetDevicePathWithMountPath(t *testing.T) { } func TestNodePublishVolumeIdempotentMount(t *testing.T) { + cntl := gomock.NewController(t) + defer cntl.Finish() if runtime.GOOS == "windows" || os.Getuid() != 0 { return } @@ -1432,7 +1456,7 @@ func TestNodePublishVolumeIdempotentMount(t *testing.T) { } _ = makeDir(sourceTest) _ = makeDir(targetTest) - d, _ := NewFakeDriver(t) + d, _ := NewFakeDriver(cntl) volumeCap := csi.VolumeCapability_AccessMode{Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER} req := csi.NodePublishVolumeRequest{ diff --git a/pkg/azuredisk/nodeserver_v2.go b/pkg/azuredisk/nodeserver_v2.go index e2e60d05b..d21f20524 100644 --- a/pkg/azuredisk/nodeserver_v2.go +++ b/pkg/azuredisk/nodeserver_v2.go @@ -67,8 +67,8 @@ func (d *DriverV2) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolume return nil, status.Error(codes.InvalidArgument, "MaxShares value not supported") } - if !azureutils.IsValidVolumeCapabilities([]*csi.VolumeCapability{volumeCapability}, maxShares) { - return nil, status.Error(codes.InvalidArgument, "Volume capability not supported") + if err := azureutils.IsValidVolumeCapabilities([]*csi.VolumeCapability{volumeCapability}, maxShares); err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) } if acquired := d.volumeLocks.TryAcquire(diskURI); !acquired { @@ -213,8 +213,8 @@ func (d *DriverV2) NodePublishVolume(ctx context.Context, req *csi.NodePublishVo return nil, status.Error(codes.InvalidArgument, "MaxShares value not supported") } - if !azureutils.IsValidVolumeCapabilities([]*csi.VolumeCapability{volumeCapability}, maxShares) { - return nil, status.Error(codes.InvalidArgument, "Volume capability not supported") + if err := azureutils.IsValidVolumeCapabilities([]*csi.VolumeCapability{volumeCapability}, maxShares); err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) } source := req.GetStagingTargetPath() @@ -405,7 +405,7 @@ func (d *DriverV2) NodeGetVolumeStats(ctx context.Context, req *csi.NodeGetVolum return nil, status.Error(codes.InvalidArgument, "NodeGetVolumeStats volume path was empty") } - volUsage, err := GetVolumeStats(ctx, d.mounter, req.VolumePath, d.hostUtil) + volUsage, err := d.GetVolumeStats(ctx, d.mounter, req.VolumeId, req.VolumePath, d.hostUtil) return &csi.NodeGetVolumeStatsResponse{ Usage: volUsage, }, err @@ -439,12 +439,12 @@ func (d *DriverV2) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandVolu if isBlock { if d.enableDiskOnlineResize { - klog.V(2).Info("NodeExpandVolume begin to rescan all devices on block volume(%s)", volumeID) + klog.V(2).Infof("NodeExpandVolume begin to rescan all devices on block volume(%s)", volumeID) if err := rescanAllVolumes(d.ioHandler); err != nil { klog.Errorf("NodeExpandVolume rescanAllVolumes failed with error: %v", err) } } - klog.V(2).Info("NodeExpandVolume skip resize operation on block volume(%s)", volumeID) + klog.V(2).Infof("NodeExpandVolume skip resize operation on block volume(%s)", volumeID) return &csi.NodeExpandVolumeResponse{}, nil } @@ -459,7 +459,7 @@ func (d *DriverV2) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandVolu } if d.enableDiskOnlineResize { - klog.V(2).Info("NodeExpandVolume begin to rescan device %s on volume(%s)", devicePath, volumeID) + klog.V(2).Infof("NodeExpandVolume begin to rescan device %s on volume(%s)", devicePath, volumeID) if err := rescanVolume(d.ioHandler, devicePath); err != nil { klog.Errorf("NodeExpandVolume rescanVolume failed with error: %v", err) } diff --git a/pkg/azuredisk/util.go b/pkg/azuredisk/util.go new file mode 100644 index 000000000..67645e7a5 --- /dev/null +++ b/pkg/azuredisk/util.go @@ -0,0 +1,57 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azuredisk + +import "sync" + +// lockMap used to lock on entries +type lockMap struct { + sync.Mutex + mutexMap map[string]*sync.Mutex +} + +// NewLockMap returns a new lock map +func newLockMap() *lockMap { + return &lockMap{ + mutexMap: make(map[string]*sync.Mutex), + } +} + +// LockEntry acquires a lock associated with the specific entry +func (lm *lockMap) LockEntry(entry string) { + lm.Lock() + // check if entry does not exists, then add entry + mutex, exists := lm.mutexMap[entry] + if !exists { + mutex = &sync.Mutex{} + lm.mutexMap[entry] = mutex + } + lm.Unlock() + mutex.Lock() +} + +// UnlockEntry release the lock associated with the specific entry +func (lm *lockMap) UnlockEntry(entry string) { + lm.Lock() + defer lm.Unlock() + + mutex, exists := lm.mutexMap[entry] + if !exists { + return + } + mutex.Unlock() +} diff --git a/pkg/azurediskplugin/Dockerfile b/pkg/azurediskplugin/Dockerfile index 03f60f857..278092ff2 100644 --- a/pkg/azurediskplugin/Dockerfile +++ b/pkg/azurediskplugin/Dockerfile @@ -30,7 +30,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM registry.k8s.io/build-image/debian-base:bullseye-v1.4.3 +FROM registry.k8s.io/build-image/debian-base:bookworm-v1.0.3 RUN apt update && apt upgrade -y && apt-mark unhold libcap2 && clean-install util-linux e2fsprogs mount ca-certificates udev xfsprogs btrfs-progs libcryptsetup-dev diff --git a/pkg/azurediskplugin/WindowsHostProcess.Dockerfile b/pkg/azurediskplugin/WindowsHostProcess.Dockerfile index 16a202d6c..eefd360d5 100644 --- a/pkg/azurediskplugin/WindowsHostProcess.Dockerfile +++ b/pkg/azurediskplugin/WindowsHostProcess.Dockerfile @@ -1,3 +1,4 @@ +#https://github.com/microsoft/windows-host-process-containers-base-image FROM mcr.microsoft.com/oss/kubernetes/windows-host-process-containers-base-image:v1.0.0 LABEL description="CSI Azure disk plugin" diff --git a/pkg/azurediskplugin/main.go b/pkg/azurediskplugin/main.go index c22ef25d2..31d92cb3d 100644 --- a/pkg/azurediskplugin/main.go +++ b/pkg/azurediskplugin/main.go @@ -43,56 +43,28 @@ import ( "os" "strings" - "sigs.k8s.io/azuredisk-csi-driver/pkg/azuredisk" - "k8s.io/component-base/metrics/legacyregistry" "k8s.io/klog/v2" - consts "sigs.k8s.io/azuredisk-csi-driver/pkg/azureconstants" + "sigs.k8s.io/azuredisk-csi-driver/pkg/azuredisk" ) func init() { klog.InitFlags(nil) + driverOptions.AddFlags().VisitAll(func(f *flag.Flag) { + flag.CommandLine.Var(f.Value, f.Name, f.Usage) + }) } var ( - kmsAddr = flag.String("kms-addr", "kms-service.kube-system:9000", "Address of Constellation's KMS. Used to request keys (default: kms.kube-system:9000") - endpoint = flag.String("endpoint", "unix://tmp/csi.sock", "CSI endpoint") - nodeID = flag.String("nodeid", "", "node id") - version = flag.Bool("version", false, "Print the version and exit.") - metricsAddress = flag.String("metrics-address", "", "export the metrics") - kubeconfig = flag.String("kubeconfig", "", "Absolute path to the kubeconfig file. Required only when running out of cluster.") - driverName = flag.String("drivername", consts.DefaultDriverName, "name of the driver") - volumeAttachLimit = flag.Int64("volume-attach-limit", -1, "maximum number of attachable volumes per node") - supportZone = flag.Bool("support-zone", true, "boolean flag to get zone info in NodeGetInfo") - getNodeInfoFromLabels = flag.Bool("get-node-info-from-labels", false, "boolean flag to get zone info from node labels in NodeGetInfo") - getNodeIDFromIMDS = flag.Bool("get-nodeid-from-imds", false, "boolean flag to get NodeID from IMDS") - disableAVSetNodes = flag.Bool("disable-avset-nodes", false, "disable DisableAvailabilitySetNodes in cloud config for controller") - vmType = flag.String("vm-type", "", "type of agent node. available values: vmss, standard") - enablePerfOptimization = flag.Bool("enable-perf-optimization", false, "boolean flag to enable disk perf optimization") - cloudConfigSecretName = flag.String("cloud-config-secret-name", "azure-cloud-provider", "cloud config secret name") - cloudConfigSecretNamespace = flag.String("cloud-config-secret-namespace", "kube-system", "cloud config secret namespace") - customUserAgent = flag.String("custom-user-agent", "", "custom userAgent") - userAgentSuffix = flag.String("user-agent-suffix", "", "userAgent suffix") - useCSIProxyGAInterface = flag.Bool("use-csiproxy-ga-interface", true, "boolean flag to enable csi-proxy GA interface on Windows") - enableDiskOnlineResize = flag.Bool("enable-disk-online-resize", true, "boolean flag to enable disk online resize") - allowEmptyCloudConfig = flag.Bool("allow-empty-cloud-config", true, "Whether allow running driver without cloud config") - enableAsyncAttach = flag.Bool("enable-async-attach", false, "boolean flag to enable async attach") - enableListVolumes = flag.Bool("enable-list-volumes", false, "boolean flag to enable ListVolumes on controller") - enableListSnapshots = flag.Bool("enable-list-snapshots", false, "boolean flag to enable ListSnapshots on controller") - enableDiskCapacityCheck = flag.Bool("enable-disk-capacity-check", false, "boolean flag to enable volume capacity check in CreateVolume") - disableUpdateCache = flag.Bool("disable-update-cache", false, "boolean flag to disable update cache during disk attach/detach") - vmssCacheTTLInSeconds = flag.Int64("vmss-cache-ttl-seconds", -1, "vmss cache TTL in seconds (600 by default)") - attachDetachInitialDelayInMs = flag.Int64("attach-detach-initial-delay-ms", 1000, "initial delay in milliseconds for batch disk attach/detach") - enableTrafficManager = flag.Bool("enable-traffic-manager", false, "boolean flag to enable traffic manager") - trafficManagerPort = flag.Int64("traffic-manager-port", 7788, "default traffic manager port") - enableWindowsHostProcess = flag.Bool("enable-windows-host-process", false, "enable windows host process") - enableOtelTracing = flag.Bool("enable-otel-tracing", false, "If set, enable opentelemetry tracing for the driver. The tracing is disabled by default. Configure the exporter endpoint with OTEL_EXPORTER_OTLP_ENDPOINT and other env variables, see https://opentelemetry.io/docs/specs/otel/configuration/sdk-environment-variables/#general-sdk-configuration.") + version = flag.Bool("version", false, "Print the version and exit.") + metricsAddress = flag.String("metrics-address", "", "export the metrics") + driverOptions azuredisk.DriverOptions ) func main() { flag.Parse() if *version { - info, err := azuredisk.GetVersionYAML(*driverName) + info, err := azuredisk.GetVersionYAML(driverOptions.DriverName) if err != nil { klog.Fatalln(err) } @@ -100,66 +72,19 @@ func main() { os.Exit(0) } - if *nodeID == "" { - // nodeid is not needed in controller component - klog.Warning("nodeid is empty") - } - exportMetrics() handle() os.Exit(0) } func handle() { - // Start tracing as soon as possible - if *enableOtelTracing { - exporter, err := azuredisk.InitOtelTracing() - if err != nil { - klog.Fatalf("Failed to initialize otel tracing: %v", err) - } - // Exporter will flush traces on shutdown - defer func() { - if err := exporter.Shutdown(context.Background()); err != nil { - klog.Errorf("Could not shutdown otel exporter: %v", err) - } - }() - } - - driverOptions := azuredisk.DriverOptions{ - NodeID: *nodeID, - DriverName: *driverName, - VolumeAttachLimit: *volumeAttachLimit, - EnablePerfOptimization: *enablePerfOptimization, - CloudConfigSecretName: *cloudConfigSecretName, - CloudConfigSecretNamespace: *cloudConfigSecretNamespace, - CustomUserAgent: *customUserAgent, - UserAgentSuffix: *userAgentSuffix, - UseCSIProxyGAInterface: *useCSIProxyGAInterface, - EnableDiskOnlineResize: *enableDiskOnlineResize, - AllowEmptyCloudConfig: *allowEmptyCloudConfig, - EnableTrafficManager: *enableTrafficManager, - TrafficManagerPort: *trafficManagerPort, - EnableAsyncAttach: *enableAsyncAttach, - EnableListVolumes: *enableListVolumes, - EnableListSnapshots: *enableListSnapshots, - SupportZone: *supportZone, - GetNodeInfoFromLabels: *getNodeInfoFromLabels, - EnableDiskCapacityCheck: *enableDiskCapacityCheck, - DisableUpdateCache: *disableUpdateCache, - AttachDetachInitialDelayInMs: *attachDetachInitialDelayInMs, - VMSSCacheTTLInSeconds: *vmssCacheTTLInSeconds, - VMType: *vmType, - EnableWindowsHostProcess: *enableWindowsHostProcess, - GetNodeIDFromIMDS: *getNodeIDFromIMDS, - EnableOtelTracing: *enableOtelTracing, - KMSAddr: *kmsAddr, - } driver := azuredisk.NewDriver(&driverOptions) if driver == nil { klog.Fatalln("Failed to initialize azuredisk CSI Driver") } - testingMock := false - driver.Run(*endpoint, *kubeconfig, *disableAVSetNodes, testingMock) + if err := driver.Run(context.Background()); err != nil { + klog.Fatalf("Failed to run azuredisk CSI Driver: %v", err) + } } func exportMetrics() { @@ -174,7 +99,7 @@ func exportMetrics() { serve(context.Background(), l, serveMetrics) } -func serve(ctx context.Context, l net.Listener, serveFunc func(net.Listener) error) { +func serve(_ context.Context, l net.Listener, serveFunc func(net.Listener) error) { path := l.Addr().String() klog.V(2).Infof("set up prometheus server on %v", path) go func() { diff --git a/pkg/azureutils/azure_disk_utils.go b/pkg/azureutils/azure_disk_utils.go index 5a3c04f5d..7a8e75e9b 100644 --- a/pkg/azureutils/azure_disk_utils.go +++ b/pkg/azureutils/azure_disk_utils.go @@ -36,7 +36,6 @@ package azureutils import ( "context" - "errors" "fmt" "os" "os/exec" @@ -48,23 +47,23 @@ import ( "time" "unicode" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "github.com/container-storage-interface/spec/lib/go/csi" - "github.com/pborman/uuid" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/client-go/rest" + "k8s.io/apimachinery/pkg/util/uuid" + clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" "k8s.io/klog/v2" - "k8s.io/mount-utils" - "k8s.io/utils/pointer" - - clientset "k8s.io/client-go/kubernetes" api "k8s.io/kubernetes/pkg/apis/core" volumeUtil "k8s.io/kubernetes/pkg/volume/util" + "k8s.io/mount-utils" + "k8s.io/utils/pointer" consts "sigs.k8s.io/azuredisk-csi-driver/pkg/azureconstants" "sigs.k8s.io/azuredisk-csi-driver/pkg/optimization" "sigs.k8s.io/azuredisk-csi-driver/pkg/util" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader" azclients "sigs.k8s.io/cloud-provider-azure/pkg/azureclients" azure "sigs.k8s.io/cloud-provider-azure/pkg/provider" ) @@ -72,8 +71,8 @@ import ( const ( azurePublicCloud = "AZUREPUBLICCLOUD" azureStackCloud = "AZURESTACKCLOUD" - azurePublicCloudDefaultStorageAccountType = compute.StandardSSDLRS - azureStackCloudDefaultStorageAccountType = compute.StandardLRS + azurePublicCloudDefaultStorageAccountType = armcompute.DiskStorageAccountTypesStandardSSDLRS + azureStackCloudDefaultStorageAccountType = armcompute.DiskStorageAccountTypesStandardLRS defaultAzureDataDiskCachingMode = v1.AzureDataDiskCachingReadOnly // default IOPS Caps & Throughput Cap (MBps) per https://docs.microsoft.com/en-us/azure/virtual-machines/linux/disks-ultra-ssd // see https://docs.microsoft.com/en-us/rest/api/compute/disks/createorupdate#uri-parameters @@ -134,7 +133,6 @@ type ManagedDiskParameters struct { DiskIOPSReadWrite string DiskMBPSReadWrite string DiskName string - EnableAsyncAttach *bool EnableBursting *bool PerformancePlus *bool FsType string @@ -153,7 +151,7 @@ type ManagedDiskParameters struct { Zoned string } -func GetCachingMode(attributes map[string]string) (compute.CachingTypes, error) { +func GetCachingMode(attributes map[string]string) (armcompute.CachingTypes, error) { var ( cachingMode v1.AzureDataDiskCachingMode err error @@ -167,7 +165,7 @@ func GetCachingMode(attributes map[string]string) (compute.CachingTypes, error) } cachingMode, err = NormalizeCachingMode(cachingMode) - return compute.CachingTypes(cachingMode), err + return armcompute.CachingTypes(cachingMode), err } // GetAttachDiskInitialDelay gttachDiskInitialDelay from attributes @@ -185,32 +183,33 @@ func GetAttachDiskInitialDelay(attributes map[string]string) int { } // GetCloudProviderFromClient get Azure Cloud Provider -func GetCloudProviderFromClient(ctx context.Context, kubeClient *clientset.Clientset, secretName, secretNamespace, userAgent string, +func GetCloudProviderFromClient(ctx context.Context, kubeClient clientset.Interface, secretName, secretNamespace, userAgent string, allowEmptyCloudConfig bool, enableTrafficMgr bool, trafficMgrPort int64) (*azure.Cloud, error) { var config *azure.Config var fromSecret bool var err error - az := &azure.Cloud{ - InitSecretConfig: azure.InitSecretConfig{ - SecretName: secretName, - SecretNamespace: secretNamespace, - CloudConfigKey: "azure.json", - }, - } + az := &azure.Cloud{} if kubeClient != nil { - klog.V(2).Infof("reading cloud config from secret %s/%s", az.SecretNamespace, az.SecretName) - az.KubeClient = kubeClient - config, err = az.GetConfigFromSecret() + klog.V(2).Infof("reading cloud config from secret %s/%s", secretNamespace, secretName) + config, err = configloader.Load[azure.Config](ctx, &configloader.K8sSecretLoaderConfig{ + K8sSecretConfig: configloader.K8sSecretConfig{ + SecretName: secretName, + SecretNamespace: secretNamespace, + CloudConfigKey: "cloud-config", + }, + KubeClient: kubeClient, + }, nil) + if err != nil { + klog.V(2).Infof("InitializeCloudFromSecret: failed to get cloud config from secret %s/%s: %v", secretNamespace, secretName, err) + } if err == nil && config != nil { fromSecret = true } - if err != nil { - klog.V(2).Infof("InitializeCloudFromSecret: failed to get cloud config from secret %s/%s: %v", az.SecretNamespace, az.SecretName, err) - } + az.KubeClient = kubeClient } if config == nil { - klog.V(2).Infof("could not read cloud config from secret %s/%s", az.SecretNamespace, az.SecretName) + klog.V(2).Infof("could not read cloud config from secret %s/%s", secretNamespace, secretName) credFile, ok := os.LookupEnv(consts.DefaultAzureCredentialFileEnv) if ok && strings.TrimSpace(credFile) != "" { klog.V(2).Infof("%s env var set as %v", consts.DefaultAzureCredentialFileEnv, credFile) @@ -222,16 +221,9 @@ func GetCloudProviderFromClient(ctx context.Context, kubeClient *clientset.Clien } klog.V(2).Infof("use default %s env var: %v", consts.DefaultAzureCredentialFileEnv, credFile) } - - credFileConfig, err := os.Open(credFile) + config, err = configloader.Load[azure.Config](ctx, nil, &configloader.FileLoaderConfig{FilePath: credFile}) if err != nil { klog.Warningf("load azure config from file(%s) failed with %v", credFile, err) - } else { - defer credFileConfig.Close() - klog.V(2).Infof("read cloud config from file: %s successfully", credFile) - if config, err = azure.ParseConfig(credFileConfig); err != nil { - klog.Warningf("parse config file(%s) failed with error: %v", credFile, err) - } } } @@ -243,7 +235,7 @@ func GetCloudProviderFromClient(ctx context.Context, kubeClient *clientset.Clien } } else { // Location may be either upper case with spaces (e.g. "East US") or lower case without spaces (e.g. "eastus") - // Kubernetes does not allow whitespaces or upper case characters in label values, e.g. for topology keys + // Kubernetes does not allow whitespaces in label values, e.g. for topology keys // ensure Kubernetes compatible format for Location by enforcing lowercase-no-space format config.Location = strings.ToLower(strings.ReplaceAll(config.Location, " ", "")) @@ -260,6 +252,17 @@ func GetCloudProviderFromClient(ctx context.Context, kubeClient *clientset.Clien klog.V(2).Infof("set ResourceManagerEndpoint as %s", trafficMgrAddr) config.ResourceManagerEndpoint = trafficMgrAddr } + // these environment variables are injected by workload identity webhook + if tenantID := os.Getenv("AZURE_TENANT_ID"); tenantID != "" { + config.TenantID = tenantID + } + if clientID := os.Getenv("AZURE_CLIENT_ID"); clientID != "" { + config.AADClientID = clientID + } + if federatedTokenFile := os.Getenv("AZURE_FEDERATED_TOKEN_FILE"); federatedTokenFile != "" { + config.AADFederatedTokenFile = federatedTokenFile + config.UseFederatedWorkloadIdentityExtension = true + } if err = az.InitializeCloudFromConfig(ctx, config, fromSecret, false); err != nil { klog.Warningf("InitializeCloudFromConfig failed with error: %v", err) } @@ -272,36 +275,8 @@ func GetCloudProviderFromClient(ctx context.Context, kubeClient *clientset.Clien return az, nil } -// GetCloudProviderFromConfig get Azure Cloud Provider -func GetCloudProvider(ctx context.Context, kubeConfig, secretName, secretNamespace, userAgent string, - allowEmptyCloudConfig, enableTrafficMgr bool, trafficMgrPort int64) (*azure.Cloud, error) { - kubeClient, err := GetKubeClient(kubeConfig) - if err != nil { - klog.Warningf("get kubeconfig(%s) failed with error: %v", kubeConfig, err) - if !os.IsNotExist(err) && !errors.Is(err, rest.ErrNotInCluster) { - return nil, fmt.Errorf("failed to get KubeClient: %v", err) - } - } - return GetCloudProviderFromClient(ctx, kubeClient, secretName, secretNamespace, userAgent, - allowEmptyCloudConfig, enableTrafficMgr, trafficMgrPort) -} - -// GetKubeConfig gets config object from config file -func GetKubeConfig(kubeconfig string) (config *rest.Config, err error) { - if kubeconfig != "" { - if config, err = clientcmd.BuildConfigFromFlags("", kubeconfig); err != nil { - return nil, err - } - } else { - if config, err = rest.InClusterConfig(); err != nil { - return nil, err - } - } - return config, err -} - -func GetKubeClient(kubeconfig string) (*clientset.Clientset, error) { - config, err := GetKubeConfig(kubeconfig) +func GetKubeClient(kubeconfig string) (clientset.Interface, error) { + config, err := clientcmd.BuildConfigFromFlags("", kubeconfig) if err != nil { return nil, err } @@ -356,7 +331,7 @@ func CreateValidDiskName(volumeName string) string { } if !checkDiskName(diskName) || len(diskName) < diskNameMinLength { // todo: get cluster name - diskName = volumeUtil.GenerateVolumeName("pvc-disk", uuid.NewUUID().String(), diskNameGenerateMaxLength) + diskName = volumeUtil.GenerateVolumeName("pvc-disk", string(uuid.NewUUID()), diskNameGenerateMaxLength) klog.Warningf("the requested volume name (%q) is invalid, so it is regenerated as (%q)", volumeName, diskName) } @@ -408,10 +383,10 @@ func GetSubscriptionIDFromURI(diskURI string) string { return "" } -func GetValidCreationData(subscriptionID, resourceGroup, sourceResourceID, sourceType string) (compute.CreationData, error) { +func GetValidCreationData(subscriptionID, resourceGroup, sourceResourceID, sourceType string) (armcompute.CreationData, error) { if sourceResourceID == "" { - return compute.CreationData{ - CreateOption: compute.Empty, + return armcompute.CreationData{ + CreateOption: to.Ptr(armcompute.DiskCreateOptionEmpty), }, nil } @@ -426,21 +401,21 @@ func GetValidCreationData(subscriptionID, resourceGroup, sourceResourceID, sourc sourceResourceID = fmt.Sprintf(consts.ManagedDiskPath, subscriptionID, resourceGroup, sourceResourceID) } default: - return compute.CreationData{ - CreateOption: compute.Empty, + return armcompute.CreationData{ + CreateOption: to.Ptr(armcompute.DiskCreateOptionEmpty), }, nil } splits := strings.Split(sourceResourceID, "/") if len(splits) > 9 { if sourceType == consts.SourceSnapshot { - return compute.CreationData{}, fmt.Errorf("sourceResourceID(%s) is invalid, correct format: %s", sourceResourceID, diskSnapshotPathRE) + return armcompute.CreationData{}, fmt.Errorf("sourceResourceID(%s) is invalid, correct format: %s", sourceResourceID, diskSnapshotPathRE) } - return compute.CreationData{}, fmt.Errorf("sourceResourceID(%s) is invalid, correct format: %s", sourceResourceID, consts.ManagedDiskPathRE) + return armcompute.CreationData{}, fmt.Errorf("sourceResourceID(%s) is invalid, correct format: %s", sourceResourceID, consts.ManagedDiskPathRE) } - return compute.CreationData{ - CreateOption: compute.Copy, + return armcompute.CreationData{ + CreateOption: to.Ptr(armcompute.DiskCreateOptionCopy), SourceResourceID: &sourceResourceID, }, nil } @@ -477,31 +452,35 @@ func IsValidDiskURI(diskURI string) error { return nil } -func IsValidVolumeCapabilities(volCaps []*csi.VolumeCapability, maxShares int) bool { +// IsValidVolumeCapabilities checks whether the volume capabilities are valid +func IsValidVolumeCapabilities(volCaps []*csi.VolumeCapability, maxShares int) error { if ok := IsValidAccessModes(volCaps); !ok { - return false + return fmt.Errorf("invalid access mode: %v", volCaps) } for _, c := range volCaps { blockVolume := c.GetBlock() mountVolume := c.GetMount() accessMode := c.GetAccessMode().GetMode() - if (blockVolume == nil && mountVolume == nil) || - (blockVolume != nil && mountVolume != nil) { - return false + if blockVolume == nil && mountVolume == nil { + return fmt.Errorf("blockVolume and mountVolume are both nil") + } + + if blockVolume != nil && mountVolume != nil { + return fmt.Errorf("blockVolume and mountVolume are both not nil") } if mountVolume != nil && (accessMode == csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER || accessMode == csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY || accessMode == csi.VolumeCapability_AccessMode_MULTI_NODE_SINGLE_WRITER) { - return false + return fmt.Errorf("mountVolume is not supported for access mode: %s", accessMode.String()) } if maxShares < 2 && (accessMode == csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER || accessMode == csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY || accessMode == csi.VolumeCapability_AccessMode_MULTI_NODE_SINGLE_WRITER) { - return false + return fmt.Errorf("access mode: %s is not supported for non-shared disk", accessMode.String()) } } - return true + return nil } func IsValidAccessModes(volCaps []*csi.VolumeCapability) bool { @@ -535,33 +514,33 @@ func NormalizeCachingMode(cachingMode v1.AzureDataDiskCachingMode) (v1.AzureData return cachingMode, nil } -func NormalizeNetworkAccessPolicy(networkAccessPolicy string) (compute.NetworkAccessPolicy, error) { +func NormalizeNetworkAccessPolicy(networkAccessPolicy string) (armcompute.NetworkAccessPolicy, error) { if networkAccessPolicy == "" { - return compute.NetworkAccessPolicy(networkAccessPolicy), nil + return armcompute.NetworkAccessPolicy(networkAccessPolicy), nil } - policy := compute.NetworkAccessPolicy(networkAccessPolicy) - for _, s := range compute.PossibleNetworkAccessPolicyValues() { + policy := armcompute.NetworkAccessPolicy(networkAccessPolicy) + for _, s := range armcompute.PossibleNetworkAccessPolicyValues() { if policy == s { return policy, nil } } - return "", fmt.Errorf("azureDisk - %s is not supported NetworkAccessPolicy. Supported values are %s", networkAccessPolicy, compute.PossibleNetworkAccessPolicyValues()) + return "", fmt.Errorf("azureDisk - %s is not supported NetworkAccessPolicy. Supported values are %s", networkAccessPolicy, armcompute.PossibleNetworkAccessPolicyValues()) } -func NormalizePublicNetworkAccess(publicNetworkAccess string) (compute.PublicNetworkAccess, error) { +func NormalizePublicNetworkAccess(publicNetworkAccess string) (armcompute.PublicNetworkAccess, error) { if publicNetworkAccess == "" { - return compute.PublicNetworkAccess(publicNetworkAccess), nil + return armcompute.PublicNetworkAccess(publicNetworkAccess), nil } - access := compute.PublicNetworkAccess(publicNetworkAccess) - for _, s := range compute.PossiblePublicNetworkAccessValues() { + access := armcompute.PublicNetworkAccess(publicNetworkAccess) + for _, s := range armcompute.PossiblePublicNetworkAccessValues() { if access == s { return access, nil } } - return "", fmt.Errorf("azureDisk - %s is not supported PublicNetworkAccess. Supported values are %s", publicNetworkAccess, compute.PossiblePublicNetworkAccessValues()) + return "", fmt.Errorf("azureDisk - %s is not supported PublicNetworkAccess. Supported values are %s", publicNetworkAccess, armcompute.PossiblePublicNetworkAccessValues()) } -func NormalizeStorageAccountType(storageAccountType, cloud string, disableAzureStackCloud bool) (compute.DiskStorageAccountTypes, error) { +func NormalizeStorageAccountType(storageAccountType, cloud string, disableAzureStackCloud bool) (armcompute.DiskStorageAccountTypes, error) { if storageAccountType == "" { if IsAzureStackCloud(cloud, disableAzureStackCloud) { return azureStackCloudDefaultStorageAccountType, nil @@ -569,10 +548,10 @@ func NormalizeStorageAccountType(storageAccountType, cloud string, disableAzureS return azurePublicCloudDefaultStorageAccountType, nil } - sku := compute.DiskStorageAccountTypes(storageAccountType) - supportedSkuNames := compute.PossibleDiskStorageAccountTypesValues() + sku := armcompute.DiskStorageAccountTypes(storageAccountType) + supportedSkuNames := armcompute.PossibleDiskStorageAccountTypesValues() if IsAzureStackCloud(cloud, disableAzureStackCloud) { - supportedSkuNames = []compute.DiskStorageAccountTypes{compute.StandardLRS, compute.PremiumLRS} + supportedSkuNames = []armcompute.DiskStorageAccountTypes{armcompute.DiskStorageAccountTypesStandardLRS, armcompute.DiskStorageAccountTypesPremiumLRS} } for _, s := range supportedSkuNames { if sku == s { @@ -587,7 +566,7 @@ func ValidateDiskEncryptionType(encryptionType string) error { if encryptionType == "" { return nil } - supportedTypes := compute.PossibleEncryptionTypeValues() + supportedTypes := armcompute.PossibleEncryptionTypeValues() for _, s := range supportedTypes { if encryptionType == string(s) { return nil @@ -600,7 +579,7 @@ func ValidateDataAccessAuthMode(dataAccessAuthMode string) error { if dataAccessAuthMode == "" { return nil } - supportedModes := compute.PossibleDataAccessAuthModeValues() + supportedModes := armcompute.PossibleDataAccessAuthModeValues() for _, s := range supportedModes { if dataAccessAuthMode == string(s) { return nil @@ -696,7 +675,7 @@ func ParseDiskParameters(parameters map[string]string) (ManagedDiskParameters, e case consts.UserAgentField: diskParams.UserAgent = v case consts.EnableAsyncAttachField: - diskParams.VolumeContext[consts.EnableAsyncAttachField] = v + // no op, only for backward compatibility case consts.ZonedField: // no op, only for backward compatibility with in-tree driver case consts.PerformancePlusField: @@ -720,9 +699,9 @@ func ParseDiskParameters(parameters map[string]string) (ManagedDiskParameters, e } } - if strings.EqualFold(diskParams.AccountType, string(compute.PremiumV2LRS)) { + if strings.EqualFold(diskParams.AccountType, string(armcompute.DiskStorageAccountTypesPremiumV2LRS)) { if diskParams.CachingMode != "" && !strings.EqualFold(string(diskParams.CachingMode), string(v1.AzureDataDiskCachingNone)) { - return diskParams, fmt.Errorf("cachingMode %s is not supported for %s", diskParams.CachingMode, compute.PremiumV2LRS) + return diskParams, fmt.Errorf("cachingMode %s is not supported for %s", diskParams.CachingMode, armcompute.DiskStorageAccountTypesPremiumV2LRS) } } @@ -776,18 +755,18 @@ func checkDiskName(diskName string) bool { return true } -// InsertDiskProperties: insert disk properties to map -func InsertDiskProperties(disk *compute.Disk, publishConext map[string]string) { +// InsertProperties: insert disk properties to map +func InsertDiskProperties(disk *armcompute.Disk, publishConext map[string]string) { if disk == nil || publishConext == nil { return } - if disk.Sku != nil { - publishConext[consts.SkuNameField] = string(disk.Sku.Name) + if disk.SKU != nil { + publishConext[consts.SkuNameField] = string(*disk.SKU.Name) } - prop := disk.DiskProperties + prop := disk.Properties if prop != nil { - publishConext[consts.NetworkAccessPolicyField] = string(prop.NetworkAccessPolicy) + publishConext[consts.NetworkAccessPolicyField] = string(*prop.NetworkAccessPolicy) if prop.DiskIOPSReadWrite != nil { publishConext[consts.DiskIOPSReadWriteField] = strconv.Itoa(int(*prop.DiskIOPSReadWrite)) } @@ -807,11 +786,41 @@ func InsertDiskProperties(disk *compute.Disk, publishConext map[string]string) { } } -func SleepIfThrottled(err error, sleepSec int) { - if err != nil && strings.Contains(strings.ToLower(err.Error()), strings.ToLower(consts.TooManyRequests)) || strings.Contains(strings.ToLower(err.Error()), consts.ClientThrottled) { - klog.Warningf("sleep %d more seconds, waiting for throttling complete", sleepSec) - time.Sleep(time.Duration(sleepSec) * time.Second) +func SleepIfThrottled(err error, defaultSleepSec int) { + if err != nil && IsThrottlingError(err) { + retryAfter := getRetryAfterSeconds(err) + if retryAfter == 0 { + retryAfter = defaultSleepSec + } + klog.Warningf("sleep %d more seconds, waiting for throttling complete", retryAfter) + time.Sleep(time.Duration(retryAfter) * time.Second) + } +} + +func IsThrottlingError(err error) bool { + if err != nil { + errMsg := strings.ToLower(err.Error()) + return strings.Contains(errMsg, strings.ToLower(consts.TooManyRequests)) || strings.Contains(errMsg, consts.ClientThrottled) + } + return false +} + +// getRetryAfterSeconds returns the number of seconds to wait from the error message +func getRetryAfterSeconds(err error) int { + if err == nil { + return 0 + } + re := regexp.MustCompile(`RetryAfter: (\d+)s`) + match := re.FindStringSubmatch(err.Error()) + if len(match) > 1 { + if retryAfter, err := strconv.Atoi(match[1]); err == nil { + if retryAfter > consts.MaxThrottlingSleepSec { + return consts.MaxThrottlingSleepSec + } + return retryAfter + } } + return 0 } // SetKeyValueInMap set key/value pair in map diff --git a/pkg/azureutils/azure_disk_utils_test.go b/pkg/azureutils/azure_disk_utils_test.go index 9e51136ed..1cc945c36 100644 --- a/pkg/azureutils/azure_disk_utils_test.go +++ b/pkg/azureutils/azure_disk_utils_test.go @@ -22,12 +22,13 @@ import ( "fmt" "os" "reflect" - "runtime" + "regexp" "strings" "testing" "time" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "github.com/container-storage-interface/spec/lib/go/csi" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -131,42 +132,42 @@ func TestCheckDiskName(t *testing.T) { func TestGetCachingMode(t *testing.T) { tests := []struct { options map[string]string - expectedCachingMode compute.CachingTypes + expectedCachingMode armcompute.CachingTypes expectedError bool }{ { nil, - compute.CachingTypes(defaultAzureDataDiskCachingMode), + armcompute.CachingTypes(defaultAzureDataDiskCachingMode), false, }, { map[string]string{}, - compute.CachingTypes(defaultAzureDataDiskCachingMode), + armcompute.CachingTypes(defaultAzureDataDiskCachingMode), false, }, { map[string]string{consts.CachingModeField: ""}, - compute.CachingTypes(defaultAzureDataDiskCachingMode), + armcompute.CachingTypes(defaultAzureDataDiskCachingMode), false, }, { map[string]string{consts.CachingModeField: "None"}, - compute.CachingTypes("None"), + armcompute.CachingTypes("None"), false, }, { map[string]string{consts.CachingModeField: "ReadOnly"}, - compute.CachingTypes("ReadOnly"), + armcompute.CachingTypes("ReadOnly"), false, }, { map[string]string{consts.CachingModeField: "ReadWrite"}, - compute.CachingTypes("ReadWrite"), + armcompute.CachingTypes("ReadWrite"), false, }, { map[string]string{consts.CachingModeField: "WriteOnly"}, - compute.CachingTypes(""), + armcompute.CachingTypes(""), true, }, } @@ -179,93 +180,8 @@ func TestGetCachingMode(t *testing.T) { } } -func TestGetKubeConfig(t *testing.T) { - // skip for now as this is very flaky on Windows - skipIfTestingOnWindows(t) - emptyKubeConfig := "empty-Kube-Config" - validKubeConfig := "valid-Kube-Config" - fakeContent := ` -apiVersion: v1 -clusters: -- cluster: - server: https://localhost:8080 - name: foo-cluster -contexts: -- context: - cluster: foo-cluster - user: foo-user - namespace: bar - name: foo-context -current-context: foo-context -kind: Config -users: -- name: foo-user - user: - exec: - apiVersion: client.authentication.k8s.io/v1beta1 - args: - - arg-1 - - arg-2 - command: foo-command -` - - err := createTestFile(emptyKubeConfig) - if err != nil { - t.Error(err) - } - defer func() { - if err := os.Remove(emptyKubeConfig); err != nil { - t.Error(err) - } - }() - - err = createTestFile(validKubeConfig) - if err != nil { - t.Error(err) - } - defer func() { - if err := os.Remove(validKubeConfig); err != nil { - t.Error(err) - } - }() - - if err := os.WriteFile(validKubeConfig, []byte(fakeContent), 0666); err != nil { - t.Error(err) - } - - tests := []struct { - desc string - kubeconfig string - expectError bool - envVariableHasConfig bool - envVariableConfigIsValid bool - }{ - { - desc: "[success] valid kube config passed", - kubeconfig: validKubeConfig, - expectError: false, - envVariableHasConfig: false, - envVariableConfigIsValid: false, - }, - { - desc: "[failure] invalid kube config passed", - kubeconfig: emptyKubeConfig, - expectError: true, - envVariableHasConfig: false, - envVariableConfigIsValid: false, - }, - } - - for _, test := range tests { - _, err := GetKubeConfig(test.kubeconfig) - receiveError := (err != nil) - if test.expectError != receiveError { - t.Errorf("desc: %s,\n input: %q, GetCloudProvider err: %v, expectErr: %v", test.desc, test.kubeconfig, err, test.expectError) - } - } -} - func TestGetCloudProvider(t *testing.T) { + locationRxp := regexp.MustCompile("(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?") fakeCredFile, err := testutil.GetWorkDirPath("fake-cred-file.json") if err != nil { t.Errorf("GetWorkDirPath failed with %v", err) @@ -317,6 +233,7 @@ users: desc string createFakeCredFile bool createFakeKubeConfig bool + credFile string kubeconfig string userAgent string allowEmptyCloudConfig bool @@ -335,7 +252,7 @@ users: desc: "[failure] out of cluster & in cluster, specify a empty kubeconfig, no credential file", kubeconfig: emptyKubeConfig, allowEmptyCloudConfig: true, - expectedErr: fmt.Errorf("failed to get KubeClient: invalid configuration: no configuration has been provided, try setting KUBERNETES_MASTER environment variable"), + expectedErr: fmt.Errorf("invalid configuration: no configuration has been provided, try setting KUBERNETES_MASTER environment variable"), }, { desc: "[success] out of cluster & in cluster, no kubeconfig, a fake credential file", @@ -352,6 +269,15 @@ users: allowEmptyCloudConfig: true, expectedErr: nil, }, + { + desc: "[success] out of cluster & in cluster, no kubeconfig, a fake credential file with upper case Location format", + createFakeCredFile: true, + kubeconfig: "", + credFile: "location: \"East US\"\n", + userAgent: "useragent", + allowEmptyCloudConfig: false, + expectedErr: nil, + }, } for _, test := range tests { @@ -363,6 +289,10 @@ users: os.Remove(fakeCredFile) }() + if err := os.WriteFile(fakeCredFile, []byte(test.credFile), 0666); err != nil { + t.Error(err) + } + t.Setenv(consts.DefaultAzureCredentialFileEnv, fakeCredFile) } if test.createFakeKubeConfig { @@ -377,11 +307,19 @@ users: t.Error(err) } } - cloud, err := GetCloudProvider(context.Background(), test.kubeconfig, "", "", test.userAgent, test.allowEmptyCloudConfig, false, -1) - if !reflect.DeepEqual(err, test.expectedErr) && !strings.Contains(err.Error(), test.expectedErr.Error()) { + + kubeClient, err := GetKubeClient(test.kubeconfig) + if err != nil { + if ((err == nil) == (test.expectedErr == nil)) && !reflect.DeepEqual(err, test.expectedErr) && !strings.Contains(err.Error(), test.expectedErr.Error()) { + t.Errorf("desc: %s,\n input: %q, GetCloudProvider err: %v, expectedErr: %v", test.desc, test.kubeconfig, err, test.expectedErr) + } + } + cloud, err := GetCloudProviderFromClient(context.Background(), kubeClient, "", "", test.userAgent, test.allowEmptyCloudConfig, false, -1) + if ((err == nil) == (test.expectedErr == nil)) && !reflect.DeepEqual(err, test.expectedErr) && !strings.Contains(err.Error(), test.expectedErr.Error()) { t.Errorf("desc: %s,\n input: %q, GetCloudProvider err: %v, expectedErr: %v", test.desc, test.kubeconfig, err, test.expectedErr) } if cloud != nil { + assert.Regexp(t, locationRxp, cloud.Location) assert.Equal(t, cloud.UserAgent, test.userAgent) assert.Equal(t, cloud.DiskRateLimit != nil && cloud.DiskRateLimit.CloudProviderRateLimit, false) assert.Equal(t, cloud.SnapshotRateLimit != nil && cloud.SnapshotRateLimit.CloudProviderRateLimit, false) @@ -631,13 +569,13 @@ func TestGetSourceVolumeID(t *testing.T) { SourceResourceID := "test" tests := []struct { - snapshot *compute.Snapshot + snapshot *armcompute.Snapshot expected string }{ { - snapshot: &compute.Snapshot{ - SnapshotProperties: &compute.SnapshotProperties{ - CreationData: &compute.CreationData{ + snapshot: &armcompute.Snapshot{ + Properties: &armcompute.SnapshotProperties{ + CreationData: &armcompute.CreationData{ SourceResourceID: &SourceResourceID, }, }, @@ -645,21 +583,21 @@ func TestGetSourceVolumeID(t *testing.T) { expected: "test", }, { - snapshot: &compute.Snapshot{ - SnapshotProperties: &compute.SnapshotProperties{ - CreationData: &compute.CreationData{}, + snapshot: &armcompute.Snapshot{ + Properties: &armcompute.SnapshotProperties{ + CreationData: &armcompute.CreationData{}, }, }, expected: "", }, { - snapshot: &compute.Snapshot{ - SnapshotProperties: &compute.SnapshotProperties{}, + snapshot: &armcompute.Snapshot{ + Properties: &armcompute.SnapshotProperties{}, }, expected: "", }, { - snapshot: &compute.Snapshot{}, + snapshot: &armcompute.Snapshot{}, expected: "", }, { @@ -685,7 +623,7 @@ func TestGetValidCreationData(t *testing.T) { resourceGroup string sourceResourceID string sourceType string - expected1 compute.CreationData + expected1 armcompute.CreationData expected2 error }{ { @@ -693,8 +631,8 @@ func TestGetValidCreationData(t *testing.T) { resourceGroup: "", sourceResourceID: "", sourceType: "", - expected1: compute.CreationData{ - CreateOption: compute.Empty, + expected1: armcompute.CreationData{ + CreateOption: to.Ptr(armcompute.DiskCreateOptionEmpty), }, expected2: nil, }, @@ -703,8 +641,8 @@ func TestGetValidCreationData(t *testing.T) { resourceGroup: "", sourceResourceID: "/subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Compute/snapshots/xxx", sourceType: consts.SourceSnapshot, - expected1: compute.CreationData{ - CreateOption: compute.Copy, + expected1: armcompute.CreationData{ + CreateOption: to.Ptr(armcompute.DiskCreateOptionCopy), SourceResourceID: &sourceResourceSnapshotID, }, expected2: nil, @@ -714,8 +652,8 @@ func TestGetValidCreationData(t *testing.T) { resourceGroup: "xxx", sourceResourceID: "xxx", sourceType: consts.SourceSnapshot, - expected1: compute.CreationData{ - CreateOption: compute.Copy, + expected1: armcompute.CreationData{ + CreateOption: to.Ptr(armcompute.DiskCreateOptionCopy), SourceResourceID: &sourceResourceSnapshotID, }, expected2: nil, @@ -725,7 +663,7 @@ func TestGetValidCreationData(t *testing.T) { resourceGroup: "", sourceResourceID: "/subscriptions/23/providers/Microsoft.Compute/disks/name", sourceType: consts.SourceSnapshot, - expected1: compute.CreationData{}, + expected1: armcompute.CreationData{}, expected2: fmt.Errorf("sourceResourceID(%s) is invalid, correct format: %s", "/subscriptions//resourceGroups//providers/Microsoft.Compute/snapshots//subscriptions/23/providers/Microsoft.Compute/disks/name", diskSnapshotPathRE), }, { @@ -733,7 +671,7 @@ func TestGetValidCreationData(t *testing.T) { resourceGroup: "", sourceResourceID: "http://test.com/vhds/name", sourceType: consts.SourceSnapshot, - expected1: compute.CreationData{}, + expected1: armcompute.CreationData{}, expected2: fmt.Errorf("sourceResourceID(%s) is invalid, correct format: %s", "/subscriptions//resourceGroups//providers/Microsoft.Compute/snapshots/http://test.com/vhds/name", diskSnapshotPathRE), }, { @@ -741,7 +679,7 @@ func TestGetValidCreationData(t *testing.T) { resourceGroup: "", sourceResourceID: "/subscriptions/xxx/snapshots/xxx", sourceType: consts.SourceSnapshot, - expected1: compute.CreationData{}, + expected1: armcompute.CreationData{}, expected2: fmt.Errorf("sourceResourceID(%s) is invalid, correct format: %s", "/subscriptions//resourceGroups//providers/Microsoft.Compute/snapshots//subscriptions/xxx/snapshots/xxx", diskSnapshotPathRE), }, { @@ -749,7 +687,7 @@ func TestGetValidCreationData(t *testing.T) { resourceGroup: "", sourceResourceID: "/subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Compute/snapshots/xxx/snapshots/xxx/snapshots/xxx", sourceType: consts.SourceSnapshot, - expected1: compute.CreationData{}, + expected1: armcompute.CreationData{}, expected2: fmt.Errorf("sourceResourceID(%s) is invalid, correct format: %s", "/subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Compute/snapshots/xxx/snapshots/xxx/snapshots/xxx", diskSnapshotPathRE), }, { @@ -757,8 +695,8 @@ func TestGetValidCreationData(t *testing.T) { resourceGroup: "", sourceResourceID: "xxx", sourceType: "", - expected1: compute.CreationData{ - CreateOption: compute.Empty, + expected1: armcompute.CreationData{ + CreateOption: to.Ptr(armcompute.DiskCreateOptionEmpty), }, expected2: nil, }, @@ -767,8 +705,8 @@ func TestGetValidCreationData(t *testing.T) { resourceGroup: "", sourceResourceID: "/subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Compute/disks/xxx", sourceType: consts.SourceVolume, - expected1: compute.CreationData{ - CreateOption: compute.Copy, + expected1: armcompute.CreationData{ + CreateOption: to.Ptr(armcompute.DiskCreateOptionCopy), SourceResourceID: &sourceResourceVolumeID, }, expected2: nil, @@ -778,8 +716,8 @@ func TestGetValidCreationData(t *testing.T) { resourceGroup: "xxx", sourceResourceID: "xxx", sourceType: consts.SourceVolume, - expected1: compute.CreationData{ - CreateOption: compute.Copy, + expected1: armcompute.CreationData{ + CreateOption: to.Ptr(armcompute.DiskCreateOptionCopy), SourceResourceID: &sourceResourceVolumeID, }, expected2: nil, @@ -789,7 +727,7 @@ func TestGetValidCreationData(t *testing.T) { resourceGroup: "", sourceResourceID: "/subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Compute/snapshots/xxx", sourceType: consts.SourceVolume, - expected1: compute.CreationData{}, + expected1: armcompute.CreationData{}, expected2: fmt.Errorf("sourceResourceID(%s) is invalid, correct format: %s", "/subscriptions//resourceGroups//providers/Microsoft.Compute/disks//subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Compute/snapshots/xxx", consts.ManagedDiskPathRE), }, } @@ -1006,7 +944,7 @@ func TestIsValidVolumeCapabilities(t *testing.T) { description string volCaps []*csi.VolumeCapability maxShares int - expectedResult bool + expectedResult error }{ { description: "[Success] Returns true for valid mount capabilities", @@ -1021,7 +959,7 @@ func TestIsValidVolumeCapabilities(t *testing.T) { }, }, maxShares: 1, - expectedResult: true, + expectedResult: nil, }, { description: "[Failure] Returns false for unsupported mount access mode", @@ -1036,7 +974,7 @@ func TestIsValidVolumeCapabilities(t *testing.T) { }, }, maxShares: 2, - expectedResult: false, + expectedResult: fmt.Errorf("mountVolume is not supported for access mode: MULTI_NODE_MULTI_WRITER"), }, { description: "[Failure] Returns false for invalid mount access mode", @@ -1051,7 +989,7 @@ func TestIsValidVolumeCapabilities(t *testing.T) { }, }, maxShares: 1, - expectedResult: false, + expectedResult: fmt.Errorf("invalid access mode: [mount:<> access_mode: ]"), }, { description: "[Success] Returns true for valid block capabilities", @@ -1066,7 +1004,7 @@ func TestIsValidVolumeCapabilities(t *testing.T) { }, }, maxShares: 1, - expectedResult: true, + expectedResult: nil, }, { description: "[Success] Returns true for shared block access mode", @@ -1081,7 +1019,7 @@ func TestIsValidVolumeCapabilities(t *testing.T) { }, }, maxShares: 2, - expectedResult: true, + expectedResult: nil, }, { description: "[Failure] Returns false for unsupported mount access mode", @@ -1096,7 +1034,7 @@ func TestIsValidVolumeCapabilities(t *testing.T) { }, }, maxShares: 1, - expectedResult: false, + expectedResult: fmt.Errorf("access mode: MULTI_NODE_MULTI_WRITER is not supported for non-shared disk"), }, { description: "[Failure] Returns false for invalid block access mode", @@ -1111,7 +1049,7 @@ func TestIsValidVolumeCapabilities(t *testing.T) { }, }, maxShares: 1, - expectedResult: false, + expectedResult: fmt.Errorf("invalid access mode: [block:<> access_mode: ]"), }, { description: "[Failure] Returns false for empty volume capability", @@ -1122,7 +1060,7 @@ func TestIsValidVolumeCapabilities(t *testing.T) { }, }, maxShares: 1, - expectedResult: false, + expectedResult: fmt.Errorf("invalid access mode: []"), }, } @@ -1143,8 +1081,8 @@ func TestIsValidVolumeCapabilities(t *testing.T) { }, } caps = append(caps, &stdVolCap) - if !IsValidVolumeCapabilities(caps, 1) { - t.Errorf("Unexpected error") + if err := IsValidVolumeCapabilities(caps, 1); err != nil { + t.Errorf("Unexpected error: %v", err) } stdVolCap1 := csi.VolumeCapability{ AccessMode: &csi.VolumeCapability_AccessMode{ @@ -1152,8 +1090,8 @@ func TestIsValidVolumeCapabilities(t *testing.T) { }, } caps = append(caps, &stdVolCap1) - if IsValidVolumeCapabilities(caps, 1) { - t.Errorf("Unexpected error") + if err := IsValidVolumeCapabilities(caps, 1); err == nil { + t.Errorf("Unexpected success") } } @@ -1257,37 +1195,37 @@ func TestValidateDataAccessAuthMode(t *testing.T) { func TestNormalizeNetworkAccessPolicy(t *testing.T) { tests := []struct { networkAccessPolicy string - expectedNetworkAccessPolicy compute.NetworkAccessPolicy + expectedNetworkAccessPolicy armcompute.NetworkAccessPolicy expectError bool }{ { networkAccessPolicy: "", - expectedNetworkAccessPolicy: compute.NetworkAccessPolicy(""), + expectedNetworkAccessPolicy: armcompute.NetworkAccessPolicy(""), expectError: false, }, { networkAccessPolicy: "AllowAll", - expectedNetworkAccessPolicy: compute.AllowAll, + expectedNetworkAccessPolicy: armcompute.NetworkAccessPolicyAllowAll, expectError: false, }, { networkAccessPolicy: "DenyAll", - expectedNetworkAccessPolicy: compute.DenyAll, + expectedNetworkAccessPolicy: armcompute.NetworkAccessPolicyDenyAll, expectError: false, }, { networkAccessPolicy: "AllowPrivate", - expectedNetworkAccessPolicy: compute.AllowPrivate, + expectedNetworkAccessPolicy: armcompute.NetworkAccessPolicyAllowPrivate, expectError: false, }, { networkAccessPolicy: "allowAll", - expectedNetworkAccessPolicy: compute.NetworkAccessPolicy(""), + expectedNetworkAccessPolicy: armcompute.NetworkAccessPolicy(""), expectError: true, }, { networkAccessPolicy: "invalid", - expectedNetworkAccessPolicy: compute.NetworkAccessPolicy(""), + expectedNetworkAccessPolicy: armcompute.NetworkAccessPolicy(""), expectError: true, }, } @@ -1302,32 +1240,32 @@ func TestNormalizeNetworkAccessPolicy(t *testing.T) { func TestNormalizePublicNetworkAccess(t *testing.T) { tests := []struct { publicNetworkAccess string - expectedPublicNetworkAccess compute.PublicNetworkAccess + expectedPublicNetworkAccess armcompute.PublicNetworkAccess expectError bool }{ { publicNetworkAccess: "", - expectedPublicNetworkAccess: compute.PublicNetworkAccess(""), + expectedPublicNetworkAccess: armcompute.PublicNetworkAccess(""), expectError: false, }, { publicNetworkAccess: "Enabled", - expectedPublicNetworkAccess: compute.Enabled, + expectedPublicNetworkAccess: armcompute.PublicNetworkAccessEnabled, expectError: false, }, { publicNetworkAccess: "Disabled", - expectedPublicNetworkAccess: compute.Disabled, + expectedPublicNetworkAccess: armcompute.PublicNetworkAccessDisabled, expectError: false, }, { publicNetworkAccess: "enabled", - expectedPublicNetworkAccess: compute.PublicNetworkAccess(""), + expectedPublicNetworkAccess: armcompute.PublicNetworkAccess(""), expectError: true, }, { publicNetworkAccess: "disabled", - expectedPublicNetworkAccess: compute.PublicNetworkAccess(""), + expectedPublicNetworkAccess: armcompute.PublicNetworkAccess(""), expectError: true, }, } @@ -1344,21 +1282,21 @@ func TestNormalizeStorageAccountType(t *testing.T) { cloud string storageAccountType string disableAzureStackCloud bool - expectedAccountType compute.DiskStorageAccountTypes + expectedAccountType armcompute.DiskStorageAccountTypes expectError bool }{ { cloud: azurePublicCloud, storageAccountType: "", disableAzureStackCloud: false, - expectedAccountType: compute.StandardSSDLRS, + expectedAccountType: armcompute.DiskStorageAccountTypesStandardSSDLRS, expectError: false, }, { cloud: azureStackCloud, storageAccountType: "", disableAzureStackCloud: false, - expectedAccountType: compute.StandardLRS, + expectedAccountType: armcompute.DiskStorageAccountTypesStandardLRS, expectError: false, }, { @@ -1372,28 +1310,28 @@ func TestNormalizeStorageAccountType(t *testing.T) { cloud: azurePublicCloud, storageAccountType: "Standard_LRS", disableAzureStackCloud: false, - expectedAccountType: compute.StandardLRS, + expectedAccountType: armcompute.DiskStorageAccountTypesStandardLRS, expectError: false, }, { cloud: azurePublicCloud, storageAccountType: "Premium_LRS", disableAzureStackCloud: false, - expectedAccountType: compute.PremiumLRS, + expectedAccountType: armcompute.DiskStorageAccountTypesPremiumLRS, expectError: false, }, { cloud: azurePublicCloud, storageAccountType: "StandardSSD_LRS", disableAzureStackCloud: false, - expectedAccountType: compute.StandardSSDLRS, + expectedAccountType: armcompute.DiskStorageAccountTypesStandardSSDLRS, expectError: false, }, { cloud: azurePublicCloud, storageAccountType: "UltraSSD_LRS", disableAzureStackCloud: false, - expectedAccountType: compute.UltraSSDLRS, + expectedAccountType: armcompute.DiskStorageAccountTypesUltraSSDLRS, expectError: false, }, { @@ -1407,7 +1345,7 @@ func TestNormalizeStorageAccountType(t *testing.T) { cloud: azureStackCloud, storageAccountType: "UltraSSD_LRS", disableAzureStackCloud: true, - expectedAccountType: compute.UltraSSDLRS, + expectedAccountType: armcompute.DiskStorageAccountTypesUltraSSDLRS, expectError: false, }, } @@ -1737,16 +1675,10 @@ func createTestFile(path string) error { return nil } -func skipIfTestingOnWindows(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("Skipping tests on Windows") - } -} - func TestInsertDiskProperties(t *testing.T) { tests := []struct { desc string - disk *compute.Disk + disk *armcompute.Disk inputMap map[string]string expectedMap map[string]string }{ @@ -1755,37 +1687,37 @@ func TestInsertDiskProperties(t *testing.T) { }, { desc: "empty", - disk: &compute.Disk{}, + disk: &armcompute.Disk{}, inputMap: map[string]string{}, expectedMap: map[string]string{}, }, { desc: "skuName", - disk: &compute.Disk{ - Sku: &compute.DiskSku{Name: compute.PremiumLRS}, + disk: &armcompute.Disk{ + SKU: &armcompute.DiskSKU{Name: to.Ptr(armcompute.DiskStorageAccountTypesPremiumLRS)}, }, inputMap: map[string]string{}, - expectedMap: map[string]string{"skuname": string(compute.PremiumLRS)}, + expectedMap: map[string]string{"skuname": string(armcompute.DiskStorageAccountTypesPremiumLRS)}, }, { desc: "DiskProperties", - disk: &compute.Disk{ - Sku: &compute.DiskSku{Name: compute.StandardSSDLRS}, - DiskProperties: &compute.DiskProperties{ - NetworkAccessPolicy: compute.AllowPrivate, + disk: &armcompute.Disk{ + SKU: &armcompute.DiskSKU{Name: to.Ptr(armcompute.DiskStorageAccountTypesStandardSSDLRS)}, + Properties: &armcompute.DiskProperties{ + NetworkAccessPolicy: to.Ptr(armcompute.NetworkAccessPolicyAllowPrivate), DiskIOPSReadWrite: pointer.Int64(6400), DiskMBpsReadWrite: pointer.Int64(100), - CreationData: &compute.CreationData{ + CreationData: &armcompute.CreationData{ LogicalSectorSize: pointer.Int32(512), }, - Encryption: &compute.Encryption{DiskEncryptionSetID: pointer.String("/subs/DiskEncryptionSetID")}, + Encryption: &armcompute.Encryption{DiskEncryptionSetID: pointer.String("/subs/DiskEncryptionSetID")}, MaxShares: pointer.Int32(3), }, }, inputMap: map[string]string{}, expectedMap: map[string]string{ - consts.SkuNameField: string(compute.StandardSSDLRS), - consts.NetworkAccessPolicyField: string(compute.AllowPrivate), + consts.SkuNameField: string(armcompute.DiskStorageAccountTypesStandardSSDLRS), + consts.NetworkAccessPolicyField: string(armcompute.NetworkAccessPolicyAllowPrivate), consts.DiskIOPSReadWriteField: "6400", consts.DiskMBPSReadWriteField: "100", consts.LogicalSectorSizeField: "512", @@ -1928,3 +1860,87 @@ func TestGetAttachDiskInitialDelay(t *testing.T) { } } } + +func TestGetRetryAfterSeconds(t *testing.T) { + tests := []struct { + desc string + err error + expected int + }{ + { + desc: "nil error", + err: nil, + expected: 0, + }, + { + desc: "no match", + err: errors.New("no match"), + expected: 0, + }, + { + desc: "match", + err: errors.New("RetryAfter: 10s"), + expected: 10, + }, + { + desc: "match error message", + err: errors.New("could not list storage accounts for account type Premium_LRS: Retriable: true, RetryAfter: 217s, HTTPStatusCode: 0, RawError: azure cloud provider throttled for operation StorageAccountListByResourceGroup with reason \"client throttled\""), + expected: 217, + }, + { + desc: "match error message exceeds 1200s", + err: errors.New("could not list storage accounts for account type Premium_LRS: Retriable: true, RetryAfter: 2170s, HTTPStatusCode: 0, RawError: azure cloud provider throttled for operation StorageAccountListByResourceGroup with reason \"client throttled\""), + expected: consts.MaxThrottlingSleepSec, + }, + } + + for _, test := range tests { + result := getRetryAfterSeconds(test.err) + if result != test.expected { + t.Errorf("desc: (%s), input: err(%v), getRetryAfterSeconds returned with int(%d), not equal to expected(%d)", + test.desc, test.err, result, test.expected) + } + } +} + +func TestIsThrottlingError(t *testing.T) { + tests := []struct { + desc string + err error + expected bool + }{ + { + desc: "nil error", + err: nil, + expected: false, + }, + { + desc: "no match", + err: errors.New("no match"), + expected: false, + }, + { + desc: "match error message", + err: errors.New("could not list storage accounts for account type Premium_LRS: Retriable: true, RetryAfter: 217s, HTTPStatusCode: 0, RawError: azure cloud provider throttled for operation StorageAccountListByResourceGroup with reason \"client throttled\""), + expected: true, + }, + { + desc: "match error message exceeds 1200s", + err: errors.New("could not list storage accounts for account type Premium_LRS: Retriable: true, RetryAfter: 2170s, HTTPStatusCode: 0, RawError: azure cloud provider throttled for operation StorageAccountListByResourceGroup with reason \"client throttled\""), + expected: true, + }, + { + desc: "match error message with TooManyRequests throttling", + err: errors.New("could not list storage accounts for account type Premium_LRS: Retriable: true, RetryAfter: 2170s, HTTPStatusCode: 429, RawError: azure cloud provider throttled for operation StorageAccountListByResourceGroup with reason \"TooManyRequests\""), + expected: true, + }, + } + + for _, test := range tests { + result := IsThrottlingError(test.err) + if result != test.expected { + t.Errorf("desc: (%s), input: err(%v), IsThrottlingError returned with bool(%t), not equal to expected(%t)", + test.desc, test.err, result, test.expected) + } + } +} diff --git a/pkg/azureutils/azure_snapshot_utils.go b/pkg/azureutils/azure_snapshot_utils.go index fba3c2fc4..7456a89b8 100644 --- a/pkg/azureutils/azure_snapshot_utils.go +++ b/pkg/azureutils/azure_snapshot_utils.go @@ -21,7 +21,7 @@ import ( "strconv" "strings" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "github.com/container-storage-interface/spec/lib/go/csi" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -29,29 +29,29 @@ import ( volumehelper "sigs.k8s.io/azuredisk-csi-driver/pkg/util" ) -func GenerateCSISnapshot(sourceVolumeID string, snapshot *compute.Snapshot) (*csi.Snapshot, error) { - if snapshot == nil || snapshot.SnapshotProperties == nil { +func GenerateCSISnapshot(sourceVolumeID string, snapshot *armcompute.Snapshot) (*csi.Snapshot, error) { + if snapshot == nil || snapshot.Properties == nil { return nil, fmt.Errorf("snapshot property is nil") } - if snapshot.SnapshotProperties.TimeCreated == nil { + if snapshot.Properties.TimeCreated == nil { return nil, fmt.Errorf("TimeCreated of snapshot property is nil") } - if snapshot.SnapshotProperties.DiskSizeGB == nil { + if snapshot.Properties.DiskSizeGB == nil { return nil, fmt.Errorf("diskSizeGB of snapshot property is nil") } - ready, _ := isCSISnapshotReady(*snapshot.SnapshotProperties.ProvisioningState) + ready, _ := isCSISnapshotReady(*snapshot.Properties.ProvisioningState) if sourceVolumeID == "" { sourceVolumeID = GetSourceVolumeID(snapshot) } return &csi.Snapshot{ - SizeBytes: volumehelper.GiBToBytes(int64(*snapshot.SnapshotProperties.DiskSizeGB)), + SizeBytes: volumehelper.GiBToBytes(int64(*snapshot.Properties.DiskSizeGB)), SnapshotId: *snapshot.ID, SourceVolumeId: sourceVolumeID, - CreationTime: timestamppb.New(snapshot.SnapshotProperties.TimeCreated.ToTime()), + CreationTime: timestamppb.New(*snapshot.Properties.TimeCreated), ReadyToUse: ready, }, nil } @@ -61,7 +61,7 @@ func GenerateCSISnapshot(sourceVolumeID string, snapshot *compute.Snapshot) (*cs // 2. StartingToken is null, and MaxEntries is not null. Return `MaxEntries` snapshots from zero. // 3. StartingToken is not null, and MaxEntries is null. Return all snapshots from `StartingToken`. // 4. StartingToken is not null, and MaxEntries is not null. Return `MaxEntries` snapshots from `StartingToken`. -func GetEntriesAndNextToken(req *csi.ListSnapshotsRequest, snapshots []compute.Snapshot) (*csi.ListSnapshotsResponse, error) { +func GetEntriesAndNextToken(req *csi.ListSnapshotsRequest, snapshots []*armcompute.Snapshot) (*csi.ListSnapshotsResponse, error) { if req == nil { return nil, status.Errorf(codes.Aborted, "request is nil") } @@ -88,8 +88,8 @@ func GetEntriesAndNextToken(req *csi.ListSnapshotsRequest, snapshots []compute.S } entries := []*csi.ListSnapshotsResponse_Entry{} for count := 0; start < len(snapshots) && count < maxEntries; start++ { - if (req.SourceVolumeId != "" && req.SourceVolumeId == GetSourceVolumeID(&snapshots[start])) || req.SourceVolumeId == "" { - csiSnapshot, err := GenerateCSISnapshot(req.SourceVolumeId, &snapshots[start]) + if (req.SourceVolumeId != "" && req.SourceVolumeId == GetSourceVolumeID(snapshots[start])) || req.SourceVolumeId == "" { + csiSnapshot, err := GenerateCSISnapshot(req.SourceVolumeId, snapshots[start]) if err != nil { return nil, fmt.Errorf("failed to generate snapshot entry: %v", err) } @@ -119,12 +119,12 @@ func GetSnapshotNameFromURI(snapshotURI string) (string, error) { return matches[1], nil } -func GetSourceVolumeID(snapshot *compute.Snapshot) string { +func GetSourceVolumeID(snapshot *armcompute.Snapshot) string { if snapshot != nil && - snapshot.SnapshotProperties != nil && - snapshot.SnapshotProperties.CreationData != nil && - snapshot.SnapshotProperties.CreationData.SourceResourceID != nil { - return *snapshot.SnapshotProperties.CreationData.SourceResourceID + snapshot.Properties != nil && + snapshot.Properties.CreationData != nil && + snapshot.Properties.CreationData.SourceResourceID != nil { + return *snapshot.Properties.CreationData.SourceResourceID } return "" } diff --git a/pkg/azureutils/azure_snapshot_utils_test.go b/pkg/azureutils/azure_snapshot_utils_test.go index 676e2f5f6..0d8cc6814 100644 --- a/pkg/azureutils/azure_snapshot_utils_test.go +++ b/pkg/azureutils/azure_snapshot_utils_test.go @@ -20,9 +20,9 @@ import ( "fmt" "reflect" "testing" + "time" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" - "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "github.com/container-storage-interface/spec/lib/go/csi" "github.com/stretchr/testify/assert" "google.golang.org/grpc/codes" @@ -39,9 +39,9 @@ func TestGenerateCSISnapshot(t *testing.T) { { name: "snap shot property not exist", testFunc: func(t *testing.T) { - snapshot := compute.Snapshot{} + snapshot := &armcompute.Snapshot{} sourceVolumeID := "unit-test" - _, err := GenerateCSISnapshot(sourceVolumeID, &snapshot) + _, err := GenerateCSISnapshot(sourceVolumeID, snapshot) expectedErr := fmt.Errorf("snapshot property is nil") if !reflect.DeepEqual(err, expectedErr) { t.Errorf("actualErr: (%v), expectedErr: (%v)", err, expectedErr) @@ -52,14 +52,14 @@ func TestGenerateCSISnapshot(t *testing.T) { name: "diskSizeGB of snapshot property is nil", testFunc: func(t *testing.T) { provisioningState := "true" - snapshot := compute.Snapshot{ - SnapshotProperties: &compute.SnapshotProperties{ - TimeCreated: &date.Time{}, + snapshot := &armcompute.Snapshot{ + Properties: &armcompute.SnapshotProperties{ + TimeCreated: &time.Time{}, ProvisioningState: &provisioningState, }, } sourceVolumeID := "unit-test" - _, err := GenerateCSISnapshot(sourceVolumeID, &snapshot) + _, err := GenerateCSISnapshot(sourceVolumeID, snapshot) expectedErr := fmt.Errorf("diskSizeGB of snapshot property is nil") if !reflect.DeepEqual(err, expectedErr) { t.Errorf("actualErr: (%v), expectedErr: (%v)", err, expectedErr) @@ -72,9 +72,9 @@ func TestGenerateCSISnapshot(t *testing.T) { provisioningState := "succeeded" DiskSize := int32(10) snapshotID := "test" - snapshot := compute.Snapshot{ - SnapshotProperties: &compute.SnapshotProperties{ - TimeCreated: &date.Time{}, + snapshot := armcompute.Snapshot{ + Properties: &armcompute.SnapshotProperties{ + TimeCreated: &time.Time{}, ProvisioningState: &provisioningState, DiskSizeGB: &DiskSize, }, @@ -82,10 +82,10 @@ func TestGenerateCSISnapshot(t *testing.T) { } sourceVolumeID := "unit-test" response, err := GenerateCSISnapshot(sourceVolumeID, &snapshot) - tp := timestamppb.New(snapshot.SnapshotProperties.TimeCreated.ToTime()) + tp := timestamppb.New(*snapshot.Properties.TimeCreated) ready := true expectedresponse := &csi.Snapshot{ - SizeBytes: volumehelper.GiBToBytes(int64(*snapshot.SnapshotProperties.DiskSizeGB)), + SizeBytes: volumehelper.GiBToBytes(int64(*snapshot.Properties.DiskSizeGB)), SnapshotId: *snapshot.ID, SourceVolumeId: sourceVolumeID, CreationTime: tp, @@ -104,23 +104,23 @@ func TestGenerateCSISnapshot(t *testing.T) { DiskSize := int32(10) snapshotID := "test" sourceResourceID := "unit test" - snapshot := compute.Snapshot{ - SnapshotProperties: &compute.SnapshotProperties{ - TimeCreated: &date.Time{}, + snapshot := &armcompute.Snapshot{ + Properties: &armcompute.SnapshotProperties{ + TimeCreated: &time.Time{}, ProvisioningState: &provisioningState, DiskSizeGB: &DiskSize, - CreationData: &compute.CreationData{ + CreationData: &armcompute.CreationData{ SourceResourceID: &sourceResourceID, }, }, ID: &snapshotID, } sourceVolumeID := "" - response, err := GenerateCSISnapshot(sourceVolumeID, &snapshot) - tp := timestamppb.New(snapshot.SnapshotProperties.TimeCreated.ToTime()) + response, err := GenerateCSISnapshot(sourceVolumeID, snapshot) + tp := timestamppb.New(*snapshot.Properties.TimeCreated) ready := true expectedresponse := &csi.Snapshot{ - SizeBytes: volumehelper.GiBToBytes(int64(*snapshot.SnapshotProperties.DiskSizeGB)), + SizeBytes: volumehelper.GiBToBytes(int64(*snapshot.Properties.DiskSizeGB)), SnapshotId: *snapshot.ID, SourceVolumeId: sourceResourceID, CreationTime: tp, @@ -145,32 +145,32 @@ func TestGetEntriesAndNextToken(t *testing.T) { DiskSize := int32(10) snapshotID := "test" sourceVolumeID := "unit-test" - creationdate := compute.CreationData{ + creationdate := armcompute.CreationData{ SourceResourceID: &sourceVolumeID, } - snapshot := compute.Snapshot{ - SnapshotProperties: &compute.SnapshotProperties{ - TimeCreated: &date.Time{}, + snapshot := &armcompute.Snapshot{ + Properties: &armcompute.SnapshotProperties{ + TimeCreated: &time.Time{}, ProvisioningState: &provisioningState, DiskSizeGB: &DiskSize, CreationData: &creationdate, }, ID: &snapshotID, } - snapshots := []compute.Snapshot{} + snapshots := []*armcompute.Snapshot{} snapshots = append(snapshots, snapshot) entries := []*csi.ListSnapshotsResponse_Entry{} - csiSnapshot, _ := GenerateCSISnapshot(sourceVolumeID, &snapshot) + csiSnapshot, _ := GenerateCSISnapshot(sourceVolumeID, snapshot) entries = append(entries, &csi.ListSnapshotsResponse_Entry{Snapshot: csiSnapshot}) tests := []struct { request *csi.ListSnapshotsRequest - snapshots []compute.Snapshot + snapshots []*armcompute.Snapshot expectedResponse *csi.ListSnapshotsResponse expectedError error }{ { nil, - []compute.Snapshot{}, + []*armcompute.Snapshot{}, nil, status.Errorf(codes.Aborted, "request is nil"), }, @@ -179,7 +179,7 @@ func TestGetEntriesAndNextToken(t *testing.T) { MaxEntries: 2, StartingToken: "a", }, - []compute.Snapshot{}, + []*armcompute.Snapshot{}, nil, status.Errorf(codes.Aborted, "ListSnapshots starting token(a) parsing with error: strconv.Atoi: parsing \"a\": invalid syntax"), }, @@ -188,7 +188,7 @@ func TestGetEntriesAndNextToken(t *testing.T) { MaxEntries: 2, StartingToken: "01", }, - []compute.Snapshot{}, + []*armcompute.Snapshot{}, nil, status.Errorf(codes.Aborted, "ListSnapshots starting token(1) is greater than total number of snapshots"), }, @@ -197,7 +197,7 @@ func TestGetEntriesAndNextToken(t *testing.T) { MaxEntries: 2, StartingToken: "0", }, - []compute.Snapshot{}, + []*armcompute.Snapshot{}, nil, status.Errorf(codes.Aborted, "ListSnapshots starting token(0) is greater than total number of snapshots"), }, @@ -206,7 +206,7 @@ func TestGetEntriesAndNextToken(t *testing.T) { MaxEntries: 2, StartingToken: "-1", }, - []compute.Snapshot{}, + []*armcompute.Snapshot{}, nil, status.Errorf(codes.Aborted, "ListSnapshots starting token(-1) can not be negative"), }, @@ -215,7 +215,7 @@ func TestGetEntriesAndNextToken(t *testing.T) { MaxEntries: 2, StartingToken: "0", }, - append([]compute.Snapshot{}, compute.Snapshot{}), + append([]*armcompute.Snapshot{}, &armcompute.Snapshot{}), nil, fmt.Errorf("failed to generate snapshot entry: %v", fmt.Errorf("snapshot property is nil")), }, diff --git a/pkg/azureutils/fake_iohandler.go b/pkg/azureutils/fake_iohandler.go index d22db0ad0..1cdb0fbd4 100644 --- a/pkg/azureutils/fake_iohandler.go +++ b/pkg/azureutils/fake_iohandler.go @@ -127,11 +127,11 @@ func (handler *fakeIOHandler) ReadDir(dirname string) ([]os.DirEntry, error) { return nil, fmt.Errorf("bad dir") } -func (handler *fakeIOHandler) WriteFile(filename string, data []byte, perm os.FileMode) error { +func (handler *fakeIOHandler) WriteFile(_ string, _ []byte, _ os.FileMode) error { return nil } -func (handler *fakeIOHandler) Readlink(name string) (string, error) { +func (handler *fakeIOHandler) Readlink(_ string) (string, error) { return "/dev/azure/disk/sda", nil } diff --git a/pkg/csi-common/driver.go b/pkg/csi-common/driver.go index 4dc10172d..b2791b784 100644 --- a/pkg/csi-common/driver.go +++ b/pkg/csi-common/driver.go @@ -25,13 +25,14 @@ import ( ) type CSIDriver struct { - Name string - NodeID string - Version string - VolumeAttachLimit int64 - Cap []*csi.ControllerServiceCapability - VC []*csi.VolumeCapability_AccessMode - NSCap []*csi.NodeServiceCapability + Name string + NodeID string + Version string + VolumeAttachLimit int64 + ReservedDataDiskSlotNum int64 + Cap []*csi.ControllerServiceCapability + VC []*csi.VolumeCapability_AccessMode + NSCap []*csi.NodeServiceCapability } // Creates a NewCSIDriver object. Assumes vendor version is equal to driver version & diff --git a/pkg/csi-common/server.go b/pkg/csi-common/server.go deleted file mode 100644 index b0e426ffa..000000000 --- a/pkg/csi-common/server.go +++ /dev/null @@ -1,128 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package csicommon - -import ( - "net" - "os" - "runtime" - "sync" - "time" - - "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" - "google.golang.org/grpc" - "k8s.io/klog/v2" - - "github.com/container-storage-interface/spec/lib/go/csi" -) - -// Defines Non blocking GRPC server interfaces -type NonBlockingGRPCServer interface { - // Start services at the endpoint - Start(endpoint string, ids csi.IdentityServer, cs csi.ControllerServer, ns csi.NodeServer, testMode, enableOtelTracing bool) - // Waits for the service to stop - Wait() - // Stops the service gracefully - Stop() - // Stops the service forcefully - ForceStop() -} - -func NewNonBlockingGRPCServer() NonBlockingGRPCServer { - return &nonBlockingGRPCServer{} -} - -// NonBlocking server -type nonBlockingGRPCServer struct { - wg sync.WaitGroup - server *grpc.Server -} - -func (s *nonBlockingGRPCServer) Start(endpoint string, ids csi.IdentityServer, cs csi.ControllerServer, ns csi.NodeServer, testMode, enableOtelTracing bool) { - s.wg.Add(1) - go s.serve(endpoint, ids, cs, ns, testMode, enableOtelTracing) -} - -func (s *nonBlockingGRPCServer) Wait() { - s.wg.Wait() -} - -func (s *nonBlockingGRPCServer) Stop() { - s.server.GracefulStop() -} - -func (s *nonBlockingGRPCServer) ForceStop() { - s.server.Stop() -} - -func (s *nonBlockingGRPCServer) serve(endpoint string, ids csi.IdentityServer, cs csi.ControllerServer, ns csi.NodeServer, testMode, enableOtelTracing bool) { - proto, addr, err := ParseEndpoint(endpoint) - if err != nil { - klog.Fatal(err.Error()) - } - - if proto == "unix" { - if runtime.GOOS != "windows" { - addr = "/" + addr - } - if err := os.Remove(addr); err != nil && !os.IsNotExist(err) { - klog.Fatalf("Failed to remove %s, error: %s", addr, err.Error()) - } - } - - listener, err := net.Listen(proto, addr) - if err != nil { - klog.Fatalf("Failed to listen: %v", err) - } - - grpcInterceptor := grpc.UnaryInterceptor(logGRPC) - if enableOtelTracing { - grpcInterceptor = grpc.ChainUnaryInterceptor(logGRPC, otelgrpc.UnaryServerInterceptor()) - } - - opts := []grpc.ServerOption{ - grpcInterceptor, - } - - server := grpc.NewServer(opts...) - s.server = server - - if ids != nil { - csi.RegisterIdentityServer(server, ids) - } - if cs != nil { - csi.RegisterControllerServer(server, cs) - } - if ns != nil { - csi.RegisterNodeServer(server, ns) - } - // Used to stop the server while running tests - if testMode { - s.wg.Done() - go func() { - // make sure Serve() is called - s.wg.Wait() - time.Sleep(time.Millisecond * 1000) - s.server.GracefulStop() - }() - } - - klog.Infof("Listening for connections on address: %#v", listener.Addr()) - if err := server.Serve(listener); err != nil { - klog.Errorf("Listening for connections on address: %#v, error: %v", listener.Addr(), err) - } -} diff --git a/pkg/csi-common/server_test.go b/pkg/csi-common/server_test.go deleted file mode 100644 index ffd417057..000000000 --- a/pkg/csi-common/server_test.go +++ /dev/null @@ -1,75 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package csicommon - -import ( - "sync" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "google.golang.org/grpc" -) - -func TestNewNonBlockingGRPCServer(t *testing.T) { - s := NewNonBlockingGRPCServer() - assert.NotNil(t, s) -} - -func TestStart(t *testing.T) { - s := NewNonBlockingGRPCServer() - // sleep a while to avoid race condition in unit test - time.Sleep(time.Millisecond * 500) - s.Start("tcp://127.0.0.1:0", nil, nil, nil, true, false) - time.Sleep(time.Millisecond * 500) -} - -func TestStartWithOtelTracing(t *testing.T) { - s := NewNonBlockingGRPCServer() - // sleep a while to avoid race condition in unit test - time.Sleep(time.Millisecond * 500) - s.Start("tcp://127.0.0.1:0", nil, nil, nil, true, true) - time.Sleep(time.Millisecond * 500) -} - -func TestServe(t *testing.T) { - s := nonBlockingGRPCServer{} - s.server = grpc.NewServer() - s.wg = sync.WaitGroup{} - //need to add one here as the actual also requires one. - s.wg.Add(1) - s.serve("tcp://127.0.0.1:0", nil, nil, nil, true, false) -} - -func TestWait(t *testing.T) { - s := nonBlockingGRPCServer{} - s.server = grpc.NewServer() - s.wg = sync.WaitGroup{} - s.Wait() -} - -func TestStop(t *testing.T) { - s := nonBlockingGRPCServer{} - s.server = grpc.NewServer() - s.Stop() -} - -func TestForceStop(t *testing.T) { - s := nonBlockingGRPCServer{} - s.server = grpc.NewServer() - s.ForceStop() -} diff --git a/pkg/csi-common/utils.go b/pkg/csi-common/utils.go index 18550642a..42fa0cf3f 100644 --- a/pkg/csi-common/utils.go +++ b/pkg/csi-common/utils.go @@ -18,6 +18,9 @@ package csicommon import ( "fmt" + "net" + "os" + "runtime" "strings" "golang.org/x/net/context" @@ -38,6 +41,31 @@ func ParseEndpoint(ep string) (string, string, error) { return "", "", fmt.Errorf("Invalid endpoint: %v", ep) } +func Listen(ctx context.Context, endpoint string) (net.Listener, error) { + proto, addr, err := ParseEndpoint(endpoint) + if err != nil { + klog.Errorf(err.Error()) + return nil, err + } + + if proto == "unix" { + if runtime.GOOS != "windows" { + addr = "/" + addr + } + if err := os.Remove(addr); err != nil && !os.IsNotExist(err) { + klog.Errorf("Failed to remove %s, error: %s", addr, err.Error()) + return nil, err + } + } + listenConfig := net.ListenConfig{} + listener, err := listenConfig.Listen(ctx, proto, addr) + if err != nil { + klog.Errorf("Failed to listen: %v", err) + return nil, err + } + return listener, nil +} + func NewVolumeCapabilityAccessMode(mode csi.VolumeCapability_AccessMode_Mode) *csi.VolumeCapability_AccessMode { return &csi.VolumeCapability_AccessMode{Mode: mode} } @@ -72,7 +100,7 @@ func getLogLevel(method string) int32 { return 2 } -func logGRPC(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { +func LogGRPC(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { level := klog.Level(getLogLevel(info.FullMethod)) klog.V(level).Infof("GRPC call: %s", info.FullMethod) klog.V(level).Infof("GRPC request: %s", protosanitizer.StripSecrets(req)) diff --git a/pkg/csi-common/utils_test.go b/pkg/csi-common/utils_test.go index b152cda5e..1feca4d27 100644 --- a/pkg/csi-common/utils_test.go +++ b/pkg/csi-common/utils_test.go @@ -20,6 +20,8 @@ import ( "bytes" "context" "flag" + "os" + "runtime" "testing" "github.com/container-storage-interface/spec/lib/go/csi" @@ -133,7 +135,7 @@ func TestLogGRPC(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { // EXECUTE - _, _ = logGRPC(context.Background(), test.req, &info, handler) + _, _ = LogGRPC(context.Background(), test.req, &info, handler) klog.Flush() // ASSERT @@ -232,3 +234,53 @@ func TestGetLogLevel(t *testing.T) { } } } + +func TestListen(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Skip test on Windows") + } + + tests := []struct { + name string + endpoint string + filePath string + wantErr bool + }{ + { + name: "unix socket", + endpoint: "unix:///tmp/csi.sock", + filePath: "/tmp/csi.sock", + wantErr: false, + }, + { + name: "tcp socket", + endpoint: "tcp://127.0.0.1:0", + wantErr: false, + }, + { + name: "invalid endpoint", + endpoint: "invalid://", + wantErr: true, + }, + { + name: "invalid unix socket", + endpoint: "unix://does/not/exist", + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := Listen(context.Background(), tt.endpoint) + if (err != nil) != tt.wantErr { + t.Errorf("Listen() error = %v, wantErr %v", err, tt.wantErr) + return + } + if err == nil { + got.Close() + if tt.filePath != "" { + os.Remove(tt.filePath) + } + } + }) + } +} diff --git a/pkg/mounter/fake_safe_mounter.go b/pkg/mounter/fake_safe_mounter.go index 0f94d01ca..4d392e790 100644 --- a/pkg/mounter/fake_safe_mounter.go +++ b/pkg/mounter/fake_safe_mounter.go @@ -48,7 +48,7 @@ func NewFakeSafeMounter() (*mount.SafeFormatAndMount, error) { } // Mount overrides mount.FakeMounter.Mount. -func (f *FakeSafeMounter) Mount(source, target, fstype string, options []string) error { +func (f *FakeSafeMounter) Mount(source, target, _ string, _ []string) error { if strings.Contains(source, "error_mount") { return fmt.Errorf("fake Mount: source error") } else if strings.Contains(target, "error_mount") { @@ -59,7 +59,7 @@ func (f *FakeSafeMounter) Mount(source, target, fstype string, options []string) } // MountSensitive overrides mount.FakeMounter.MountSensitive. -func (f *FakeSafeMounter) MountSensitive(source, target, fstype string, options, sensitiveOptions []string) error { +func (f *FakeSafeMounter) MountSensitive(source, target, _ string, _, _ []string) error { if strings.Contains(source, "error_mount_sens") { return fmt.Errorf("fake MountSensitive: source error") } else if strings.Contains(target, "error_mount_sens") { diff --git a/pkg/mounter/safe_mounter_unix.go b/pkg/mounter/safe_mounter_unix.go index f1f539fb9..22189c28c 100644 --- a/pkg/mounter/safe_mounter_unix.go +++ b/pkg/mounter/safe_mounter_unix.go @@ -24,7 +24,7 @@ import ( utilexec "k8s.io/utils/exec" ) -func NewSafeMounter(enableWindowsHostProcess, useCSIProxyGAInterface bool) (*mount.SafeFormatAndMount, error) { +func NewSafeMounter(_, _ bool) (*mount.SafeFormatAndMount, error) { return &mount.SafeFormatAndMount{ Interface: mount.New(""), Exec: utilexec.New(), diff --git a/pkg/optimization/azure_skus_map.go b/pkg/optimization/azure_skus_map.go index e138c6b9c..9a5a334b6 100644 --- a/pkg/optimization/azure_skus_map.go +++ b/pkg/optimization/azure_skus_map.go @@ -16,6 +16,7 @@ limitations under the License. package optimization +//nolint:dupl var ( DiskSkuMap = map[string]map[string]DiskSkuInfo{ "premium_lrs": { diff --git a/pkg/optimization/device_perf_linux.go b/pkg/optimization/device_perf_linux.go index 6366657c4..6b1957e2c 100644 --- a/pkg/optimization/device_perf_linux.go +++ b/pkg/optimization/device_perf_linux.go @@ -160,7 +160,7 @@ func echoToFile(content, filePath string) (err error) { } func getOptimalDeviceSettings(nodeInfo *NodeInfo, diskSkus map[string]map[string]DiskSkuInfo, perfProfile, accountType, diskSizeGibStr, diskIopsStr, diskBwMbpsStr string) (queueDepth, nrRequests, scheduler, maxSectorsKb, readAheadKb string, err error) { - klog.V(12).Infof("Calculating perf optimizations for rofile %s accountType %s diskSize", perfProfile, accountType, diskSizeGibStr) + klog.V(12).Infof("Calculating perf optimizations for rofile %s accountType %s diskSize %s", perfProfile, accountType, diskSizeGibStr) iopsHeadRoom := .25 maxHwSectorsKb := 512.0 diff --git a/pkg/optimization/mockoptimization/interface.go b/pkg/optimization/mockoptimization/interface.go index 0702478a4..a850479cc 100644 --- a/pkg/optimization/mockoptimization/interface.go +++ b/pkg/optimization/mockoptimization/interface.go @@ -23,7 +23,7 @@ package mockoptimization import ( reflect "reflect" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" optimization "sigs.k8s.io/azuredisk-csi-driver/pkg/optimization" ) diff --git a/pkg/optimization/skus_test.go b/pkg/optimization/skus_test.go index f8eb72028..b5ef900b2 100644 --- a/pkg/optimization/skus_test.go +++ b/pkg/optimization/skus_test.go @@ -31,7 +31,7 @@ type fakeCloud struct { fakecloud.Cloud } -func (fake *fakeCloud) InstanceType(ctx context.Context, nodeName types.NodeName) (string, error) { +func (fake *fakeCloud) InstanceType(_ context.Context, nodeName types.NodeName) (string, error) { if instanceType, ok := fake.InstanceTypes[nodeName]; ok { return instanceType, nil } diff --git a/pkg/os/filesystem/filesystem.go b/pkg/os/filesystem/filesystem.go index 919550b40..13fe3f237 100644 --- a/pkg/os/filesystem/filesystem.go +++ b/pkg/os/filesystem/filesystem.go @@ -80,7 +80,7 @@ func PathExists(path string) (bool, error) { return pathExists(path) } -func PathValid(ctx context.Context, path string) (bool, error) { +func PathValid(_ context.Context, path string) (bool, error) { cmd := `Test-Path $Env:remotepath` output, err := util.RunPowershellCmd(cmd, fmt.Sprintf("remotepath=%s", path)) if err != nil { diff --git a/pkg/os/volume/volume.go b/pkg/os/volume/volume.go index 4b84d2351..475588777 100644 --- a/pkg/os/volume/volume.go +++ b/pkg/os/volume/volume.go @@ -228,10 +228,13 @@ func GetDiskNumberFromVolumeID(volumeID string) (uint32, error) { // GetVolumeIDFromTargetPath - gets the volume ID given a mount point, the function is recursive until it find a volume or errors out func GetVolumeIDFromTargetPath(mount string) (string, error) { - return getTarget(mount) + return getTarget(mount, 5 /*max depth*/) } -func getTarget(mount string) (string, error) { +func getTarget(mount string, depth int) (string, error) { + if depth == 0 { + return "", fmt.Errorf("maximum depth reached on mount %s", mount) + } cmd := "(Get-Item -Path $Env:mount).Target" out, err := azureutils.RunPowershellCmd(cmd, fmt.Sprintf("mount=%s", mount)) if err != nil || len(out) == 0 { @@ -239,7 +242,7 @@ func getTarget(mount string) (string, error) { } volumeString := strings.TrimSpace(string(out)) if !strings.HasPrefix(volumeString, "Volume") { - return getTarget(volumeString) + return getTarget(volumeString, depth-1) } return ensureVolumePrefix(volumeString), nil diff --git a/pkg/tool/gen-disk-skus-map.go b/pkg/tool/gen-disk-skus-map.go index 7969186ed..6f6783cb7 100644 --- a/pkg/tool/gen-disk-skus-map.go +++ b/pkg/tool/gen-disk-skus-map.go @@ -26,7 +26,7 @@ import ( "strconv" "strings" - compute "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "k8s.io/klog/v2" "sigs.k8s.io/azuredisk-csi-driver/pkg/optimization" @@ -88,7 +88,7 @@ import ( skuMap := map[string]bool{} - var resources []compute.ResourceSku + var resources []*armcompute.ResourceSKU if err := getAllSkus(skusFullfilePath); err != nil { klog.Errorf("Could not get skus. Error: %v", err) @@ -134,16 +134,16 @@ import ( if _, ok := diskSkuInfoMap[account]; !ok { diskSkuInfoMap[account] = map[string]optimization.DiskSkuInfo{} } - diskSkuInfoMap[account][diskSize], err = getDiskCapabilities(&sku) + diskSkuInfoMap[account][diskSize], err = getDiskCapabilities(sku) if err != nil { - klog.Errorf("populateSkuMap: Failed to get disk capabilities for disk %s %s %s. Error: %v", sku.Name, sku.Size, sku.Tier, err) + klog.Errorf("populateSkuMap: Failed to get disk capabilities for disk %s %s %s. Error: %v", *sku.Name, *sku.Size, *sku.Tier, err) os.Exit(1) } } else if resType == "virtualmachines" { nodeInfo := optimization.NodeInfo{} nodeInfo.SkuName = *sku.Name - err = populateNodeCapabilities(&sku, &nodeInfo) + err = populateNodeCapabilities(sku, &nodeInfo) if err != nil { klog.Errorf("populateSkuMap: Failed to populate node capabilities. Error: %v", err) os.Exit(1) @@ -236,9 +236,9 @@ func getAllSkus(filePath string) (err error) { } // populateNodeCapabilities populates node capabilities from SkuInfo -func populateNodeCapabilities(sku *compute.ResourceSku, nodeInfo *optimization.NodeInfo) (err error) { +func populateNodeCapabilities(sku *armcompute.ResourceSKU, nodeInfo *optimization.NodeInfo) (err error) { if sku.Capabilities != nil { - for _, capability := range *sku.Capabilities { + for _, capability := range sku.Capabilities { err = nil if capability.Name != nil { switch strings.ToLower(*capability.Name) { @@ -279,14 +279,14 @@ func populateNodeCapabilities(sku *compute.ResourceSku, nodeInfo *optimization.N } // getDiskCapabilities gets disk capabilities from SkuInfo -func getDiskCapabilities(sku *compute.ResourceSku) (diskSku optimization.DiskSkuInfo, err error) { +func getDiskCapabilities(sku *armcompute.ResourceSKU) (diskSku optimization.DiskSkuInfo, err error) { diskSku = optimization.DiskSkuInfo{} diskSku.StorageAccountType = *sku.Name diskSku.StorageTier = *sku.Tier diskSku.DiskSize = *sku.Size if sku.Capabilities != nil { - for _, capability := range *sku.Capabilities { + for _, capability := range sku.Capabilities { err = nil if capability.Name != nil { switch strings.ToLower(*capability.Name) { diff --git a/pkg/util/util.go b/pkg/util/util.go index 1135885c5..f75f2afe1 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -39,6 +39,7 @@ import ( "os" "regexp" "runtime" + "sort" "strings" "sync" @@ -114,6 +115,10 @@ func ConvertTagsToMap(tags string) (map[string]string, error) { if key == "" { return nil, fmt.Errorf("Tags '%s' are invalid, the format should like: 'key1=value1,key2=value2'", tags) } + // <>%&?/. are not allowed in tag key + if strings.ContainsAny(key, "<>%&?/.") { + return nil, fmt.Errorf("Tag key '%s' contains invalid characters", key) + } value := strings.TrimSpace(kv[1]) m[key] = value } @@ -169,6 +174,30 @@ func (vl *VolumeLocks) Release(volumeID string) { vl.locks.Delete(volumeID) } +func GetElementsInArray1NotInArray2(arr1 []int, arr2 []int) []int { + sort.Ints(arr1) + sort.Ints(arr2) + + i, j := 0, 0 + result := []int{} + for i < len(arr1) && j < len(arr2) { + if arr1[i] < arr2[j] { + result = append(result, arr1[i]) + i++ + } else if arr1[i] > arr2[j] { + j++ + } else { + i++ + j++ + } + } + for i < len(arr1) { + result = append(result, arr1[i]) + i++ + } + return result +} + // GetVolumeName parses an Azure disk URI and returns its name func GetVolumeName(diskURI string) (string, error) { r := regexp.MustCompile(`Microsoft\.Compute\/disks\/(\S+)`) diff --git a/pkg/util/util_test.go b/pkg/util/util_test.go index 8ec6f488a..259553682 100644 --- a/pkg/util/util_test.go +++ b/pkg/util/util_test.go @@ -127,6 +127,18 @@ func TestConvertTagsToMap(t *testing.T) { expectedOutput: nil, expectedError: true, }, + { + desc: "should return error for invalid characters in key", + tags: "key/1=value1,%&?/.", + expectedOutput: map[string]string{"key1": "value1", "key2": "<>%&?/."}, + expectedError: false, + }, } for i, c := range testCases { @@ -283,6 +295,64 @@ func TestVolumeLock(t *testing.T) { } } +func TestGetElementsInArray1NotInArray2(t *testing.T) { + testCases := []struct { + desc string + array1 []int + array2 []int + expectedOutput []int + }{ + { + desc: "should return empty array when both arrays are empty", + array1: []int{}, + array2: []int{}, + expectedOutput: []int{}, + }, + { + desc: "should return empty array when array1 is empty", + array1: []int{}, + array2: []int{1, 2, 3}, + expectedOutput: []int{}, + }, + { + desc: "should return array1 when array2 is empty", + array1: []int{1, 2, 3}, + array2: []int{}, + expectedOutput: []int{1, 2, 3}, + }, + { + desc: "should return empty array when array1 is subset of array2", + array1: []int{1, 2}, + array2: []int{1, 2, 3}, + expectedOutput: []int{}, + }, + { + desc: "should return array1 when array2 is subset of array1", + array1: []int{1, 2, 3}, + array2: []int{1, 2}, + expectedOutput: []int{3}, + }, + { + desc: "should return array1 when array2 is equal to array1", + array1: []int{1, 2, 3}, + array2: []int{1, 2, 3}, + expectedOutput: []int{}, + }, + { + desc: "should return array1 when array2 is not equal to array1", + array1: []int{1, 2, 3}, + array2: []int{1, 2, 4}, + expectedOutput: []int{3}, + }, + } + for _, testCase := range testCases { + output := GetElementsInArray1NotInArray2(testCase.array1, testCase.array2) + if !reflect.DeepEqual(output, testCase.expectedOutput) { + t.Errorf("got: %v, expected: %v, desc: %v", output, testCase.expectedOutput, testCase.desc) + } + } +} + func TestGetVolumeName(t *testing.T) { testCases := map[string]struct { diskName string diff --git a/test/e2e/dynamic_provisioning_test.go b/test/e2e/dynamic_provisioning_test.go index b49ff4a15..faf9c2f39 100644 --- a/test/e2e/dynamic_provisioning_test.go +++ b/test/e2e/dynamic_provisioning_test.go @@ -133,7 +133,7 @@ func (t *dynamicProvisioningTestSuite) defineTests(isMultiZone bool) { }, } - if isMultiZone && !isUsingInTreeVolumePlugin { + if isMultiZone && !isUsingInTreeVolumePlugin && !isCapzTest { test.StorageClassParameters = map[string]string{ "skuName": "UltraSSD_LRS", "cachingmode": "None", @@ -154,7 +154,7 @@ func (t *dynamicProvisioningTestSuite) defineTests(isMultiZone bool) { pods := []testsuites.PodDetails{ { Cmd: convertToPowershellorCmdCommandIfNecessary("echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data"), - Volumes: []testsuites.VolumeDetails{ + Volumes: t.normalizeVolumes([]testsuites.VolumeDetails{ { ClaimSize: "10Gi", VolumeMount: testsuites.VolumeMountDetails{ @@ -163,7 +163,7 @@ func (t *dynamicProvisioningTestSuite) defineTests(isMultiZone bool) { }, VolumeAccessMode: v1.ReadWriteOnce, }, - }, + }, isMultiZone), IsWindows: isWindowsCluster, WinServerVer: winServerVer, }, @@ -547,7 +547,7 @@ func (t *dynamicProvisioningTestSuite) defineTests(isMultiZone bool) { Cmd: convertToPowershellorCmdCommandIfNecessary("echo 'hello world' > /mnt/test-1/data"), Volumes: t.normalizeVolumes([]testsuites.VolumeDetails{ { - FSType: "ext4", + FSType: "xfs", ClaimSize: "10Gi", VolumeMount: testsuites.VolumeMountDetails{ NameGenerate: "test-volume-", @@ -585,7 +585,7 @@ func (t *dynamicProvisioningTestSuite) defineTests(isMultiZone bool) { pod := testsuites.PodDetails{ Volumes: t.normalizeVolumes([]testsuites.VolumeDetails{ { - FSType: "ext4", + FSType: "xfs", ClaimSize: "10Gi", VolumeMount: testsuites.VolumeMountDetails{ NameGenerate: "test-volume-", @@ -600,7 +600,7 @@ func (t *dynamicProvisioningTestSuite) defineTests(isMultiZone bool) { clonedVolumeSize := "20Gi" podWithClonedVolume := testsuites.PodDetails{ - Cmd: convertToPowershellorCmdCommandIfNecessary("df -h | grep /mnt/test- | awk '{print $2}' | grep 20.0G"), + Cmd: convertToPowershellorCmdCommandIfNecessary("df -h | grep /mnt/test- | awk '{print $2}' | grep -E '19|20'"), IsWindows: isWindowsCluster, WinServerVer: winServerVer, } @@ -747,6 +747,7 @@ func (t *dynamicProvisioningTestSuite) defineTests(isMultiZone bool) { CSIDriver: testDriver, Pod: pod, ShouldOverwrite: false, + IsWindowsHPCDeployment: isWindowsHPCDeployment, PodWithSnapshot: podWithSnapshot, StorageClassParameters: map[string]string{"skuName": "StandardSSD_LRS"}, SnapshotStorageClassParameters: map[string]string{ @@ -812,6 +813,7 @@ func (t *dynamicProvisioningTestSuite) defineTests(isMultiZone bool) { test.Run(ctx, cs, snapshotrcs, ns) }) + //nolint:dupl ginkgo.It("should create a pod with small storage size, take a volume snapshot cross region, and restore disk in another region [disk.csi.azure.com]", func(ctx ginkgo.SpecContext) { skipIfUsingInTreeVolumePlugin() skipIfTestingInWindowsCluster() @@ -855,6 +857,7 @@ func (t *dynamicProvisioningTestSuite) defineTests(isMultiZone bool) { test.Run(ctx, cs, snapshotrcs, ns) }) + //nolint:dupl ginkgo.It("should create a pod with large storage size, take a volume snapshot cross region, and restore disk in another region [disk.csi.azure.com]", func(ctx ginkgo.SpecContext) { skipIfUsingInTreeVolumePlugin() skipIfTestingInWindowsCluster() @@ -973,7 +976,7 @@ func (t *dynamicProvisioningTestSuite) defineTests(isMultiZone bool) { ginkgo.It("should create a volume on demand and dynamically resize it without detaching [disk.csi.azure.com] ", func(ctx ginkgo.SpecContext) { skipIfUsingInTreeVolumePlugin() - skipIfNotDynamicallyResizeSuported() + //Subscription must be registered for LiveResize volume := testsuites.VolumeDetails{ ClaimSize: "10Gi", @@ -1013,7 +1016,7 @@ func (t *dynamicProvisioningTestSuite) defineTests(isMultiZone bool) { ginkgo.It("should create a block volume on demand and dynamically resize it without detaching [disk.csi.azure.com] ", func(ctx ginkgo.SpecContext) { skipIfUsingInTreeVolumePlugin() - skipIfNotDynamicallyResizeSuported() + //Subscription must be registered for LiveResize volume := testsuites.VolumeDetails{ ClaimSize: "10Gi", @@ -1269,6 +1272,9 @@ func (t *dynamicProvisioningTestSuite) defineTests(isMultiZone bool) { skipIfTestingInWindowsCluster() if isMultiZone { skipIfNotZRSSupported() + if isCapzTest { + ginkgo.Skip("skip shared disk multi zone test on capz cluster") + } } pod := testsuites.PodDetails{ diff --git a/test/e2e/suite_test.go b/test/e2e/suite_test.go index f92d8ee78..80afe613a 100644 --- a/test/e2e/suite_test.go +++ b/test/e2e/suite_test.go @@ -17,6 +17,7 @@ limitations under the License. package e2e import ( + "context" "flag" "fmt" "log" @@ -30,7 +31,7 @@ import ( "github.com/onsi/ginkgo/v2" "github.com/onsi/ginkgo/v2/reporters" "github.com/onsi/gomega" - "github.com/pborman/uuid" + "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework/config" consts "sigs.k8s.io/azuredisk-csi-driver/pkg/azureconstants" @@ -59,9 +60,9 @@ var ( winServerVer = os.Getenv(testWinServerVerEnvVar) isAzureStackCloud = strings.EqualFold(os.Getenv(cloudNameEnvVar), "AZURESTACKCLOUD") isWindowsHPCDeployment = strings.EqualFold(os.Getenv("WINDOWS_USE_HOST_PROCESS_CONTAINERS"), "true") + isCapzTest = os.Getenv("NODE_MACHINE_TYPE") != "" location string supportsZRS bool - supportsDynamicResize bool ) type testCmd struct { @@ -83,8 +84,6 @@ var _ = ginkgo.BeforeSuite(func(ctx ginkgo.SpecContext) { kubeconfig := filepath.Join(os.Getenv("HOME"), ".kube", "config") os.Setenv(kubeconfigEnvVar, kubeconfig) } - handleFlags() - framework.AfterReadingAllFlags(&framework.TestContext) // Default storage driver configuration is CSI. Freshly built // CSI driver is installed for that case. @@ -97,36 +96,36 @@ var _ = ginkgo.BeforeSuite(func(ctx ginkgo.SpecContext) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) location = creds.Location - - if location == "westus2" || location == "westeurope" || location == "northeurope" || location == "francecentral" { - supportsZRS = true - } - - dynamicResizeZones := []string{ - "westcentralus", - "francesouth", - "westindia", - "norwaywest", - "eastasia", - "francecentral", - "germanywestcentral", - "japanwest", + supportZRSRegions := []string{ "southafricanorth", - "jioindiawest", - "canadacentral", - "australiacentral", - "japaneast", + "eastasia", + "southeastasia", + "australiaeast", + "brazilsouth", + "westeurope", "northeurope", + "francecentral", "centralindia", - "uaecentral", - "switzerlandwest", - "brazilsouth", - "uksouth"} - - supportsDynamicResize = false - for _, zone := range dynamicResizeZones { - if location == zone { - supportsDynamicResize = true + "italynorth", + "japaneast", + "koreacentral", + "norwayeast", + "polandcentral", + "qatarcentral", + "swedencentral", + "switzerlandnorth", + "uaenorth", + "uksouth", + "eastus", + "eastus2", + "southcentralus", + "westus2", + "westus3", + } + supportsZRS = false + for _, region := range supportZRSRegions { + if location == region { + supportsZRS = true break } } @@ -152,12 +151,15 @@ var _ = ginkgo.BeforeSuite(func(ctx ginkgo.SpecContext) { DriverName: consts.DefaultDriverName, VolumeAttachLimit: 16, EnablePerfOptimization: false, + Kubeconfig: os.Getenv(kubeconfigEnvVar), + Endpoint: fmt.Sprintf("unix:///tmp/csi-%s.sock", string(uuid.NewUUID())), } + os.Setenv("AZURE_CREDENTIAL_FILE", credentials.TempAzureCredentialFilePath) azurediskDriver = azuredisk.NewDriver(&driverOptions) - kubeconfig := os.Getenv(kubeconfigEnvVar) + go func() { - os.Setenv("AZURE_CREDENTIAL_FILE", credentials.TempAzureCredentialFilePath) - azurediskDriver.Run(fmt.Sprintf("unix:///tmp/csi-%s.sock", uuid.NewUUID().String()), kubeconfig, false, false) + err := azurediskDriver.Run(context.Background()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() } }) @@ -253,7 +255,6 @@ var _ = ginkgo.AfterSuite(func(ctx ginkgo.SpecContext) { } execTestCmd([]testCmd{uninstallDriver}) } - err := credentials.DeleteAzureCredentialFile() gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -317,12 +318,6 @@ func skipIfNotZRSSupported() { } } -func skipIfNotDynamicallyResizeSuported() { - if !supportsDynamicResize { - ginkgo.Skip("test case not supported on regions without dynamic resize support") - } -} - func convertToPowershellorCmdCommandIfNecessary(command string) string { if !isWindowsCluster { return command @@ -347,19 +342,21 @@ func convertToPowershellorCmdCommandIfNecessary(command string) string { return "echo 'overwrite' | Out-File -FilePath C:\\mnt\\test-1\\data.txt; Start-Sleep 3600" case "grep 'hello world' /mnt/test-1/data": return "Get-Content C:\\mnt\\test-1\\data.txt | findstr 'hello world'" - case "df -h | grep /mnt/test- | awk '{print $2}' | grep 20.0G": - return "fsutil volume diskfree C:\\mnt\\ | Select-String 'Total bytes' | Select-String '19.9 GB'" + case "df -h | grep /mnt/test- | awk '{print $2}' | grep -E '19|20'": + return "fsutil volume diskfree C:\\mnt\\ | Select-String 'Total bytes' | Select-String -Pattern '19|20'" } return command } // handleFlags sets up all flags and parses the command line. -func handleFlags() { +func TestMain(m *testing.M) { config.CopyFlags(config.Flags, flag.CommandLine) framework.RegisterCommonFlags(flag.CommandLine) framework.RegisterClusterFlags(flag.CommandLine) + framework.AfterReadingAllFlags(&framework.TestContext) flag.Parse() + os.Exit(m.Run()) } func getFSType(IsWindowsCluster bool) string { diff --git a/test/e2e/testsuites/dynamically_provisioned_azuredisk_detach_tester.go b/test/e2e/testsuites/dynamically_provisioned_azuredisk_detach_tester.go index 19330c3da..47543cb2f 100644 --- a/test/e2e/testsuites/dynamically_provisioned_azuredisk_detach_tester.go +++ b/test/e2e/testsuites/dynamically_provisioned_azuredisk_detach_tester.go @@ -21,7 +21,8 @@ import ( "fmt" "time" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + compute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" @@ -86,7 +87,7 @@ func (t *DynamicallyProvisionedAzureDiskDetach) Run(ctx context.Context, client framework.ExpectNoError(err, fmt.Sprintf("Error getting client for azuredisk %v", err)) disktest, err := disksClient.Get(ctx, resourceGroup, diskName) framework.ExpectNoError(err, fmt.Sprintf("Error getting disk for azuredisk %v", err)) - framework.ExpectEqual(compute.Attached, disktest.DiskState) + gomega.Expect(compute.DiskStateAttached).To(gomega.Equal(*disktest.Properties.DiskState)) ginkgo.By("begin to delete the pod") tpod.Cleanup(ctx) @@ -96,10 +97,10 @@ func (t *DynamicallyProvisionedAzureDiskDetach) Run(ctx context.Context, client if err != nil { return false, fmt.Errorf("Error getting disk for azuredisk %v", err) } - if disktest.DiskState == compute.Unattached { + if *disktest.Properties.DiskState == armcompute.DiskStateUnattached { return true, nil } - ginkgo.By(fmt.Sprintf("current disk state(%v) is not in unattached state, wait and recheck", disktest.DiskState)) + ginkgo.By(fmt.Sprintf("current disk state(%v) is not in unattached state, wait and recheck", *disktest.Properties.DiskState)) return false, nil }) framework.ExpectNoError(err, fmt.Sprintf("waiting for disk detach complete returned with error: %v", err)) diff --git a/test/e2e/testsuites/dynamically_provisioned_azuredisk_tag_tester.go b/test/e2e/testsuites/dynamically_provisioned_azuredisk_tag_tester.go index 4eabf1838..922ec7896 100644 --- a/test/e2e/testsuites/dynamically_provisioned_azuredisk_tag_tester.go +++ b/test/e2e/testsuites/dynamically_provisioned_azuredisk_tag_tester.go @@ -90,28 +90,28 @@ func (t *DynamicallyProvisionedAzureDiskWithTag) Run(ctx context.Context, client for k, v := range test { _, ok := disktest.Tags[k] - framework.ExpectEqual(ok, true) + gomega.Expect(ok).To(gomega.BeTrue(), fmt.Sprintf("Tag %s not found", k)) if ok { - framework.ExpectEqual(*disktest.Tags[k], v) + gomega.Expect(*disktest.Tags[k]).To(gomega.Equal(v)) } } tag, ok := disktest.Tags["kubernetes.io-created-for-pv-name"] - framework.ExpectEqual(ok, true) - framework.ExpectEqual(tag != nil, true) + gomega.Expect(ok).To(gomega.BeTrue()) + gomega.Expect(tag != nil).To(gomega.BeTrue()) if tag != nil { ginkgo.By(fmt.Sprintf("kubernetes.io-created-for-pv-name: %s", *tag)) } tag, ok = disktest.Tags["kubernetes.io-created-for-pvc-name"] - framework.ExpectEqual(ok, true) - framework.ExpectEqual(tag != nil, true) + gomega.Expect(ok).To(gomega.BeTrue()) + gomega.Expect(tag != nil).To(gomega.BeTrue()) if tag != nil { ginkgo.By(fmt.Sprintf("kubernetes.io-created-for-pvc-name: %s", *tag)) } tag, ok = disktest.Tags["kubernetes.io-created-for-pvc-namespace"] - framework.ExpectEqual(ok, true) - framework.ExpectEqual(tag != nil, true) + gomega.Expect(ok).To(gomega.BeTrue()) + gomega.Expect(tag != nil).To(gomega.BeTrue()) if tag != nil { ginkgo.By(fmt.Sprintf("kubernetes.io-created-for-pvc-namespace: %s", *tag)) } diff --git a/test/e2e/testsuites/dynamically_provisioned_delete_pod_tester.go b/test/e2e/testsuites/dynamically_provisioned_delete_pod_tester.go index e9db6f093..c4686958e 100644 --- a/test/e2e/testsuites/dynamically_provisioned_delete_pod_tester.go +++ b/test/e2e/testsuites/dynamically_provisioned_delete_pod_tester.go @@ -40,6 +40,7 @@ type PodExecCheck struct { ExpectedString string } +//nolint:dupl func (t *DynamicallyProvisionedDeletePodTest) Run(ctx context.Context, client clientset.Interface, namespace *v1.Namespace) { tDeployment, cleanup := t.Pod.SetupDeployment(ctx, client, namespace, t.CSIDriver, driver.GetParameters()) // defer must be called here for resources not get removed before using them diff --git a/test/e2e/testsuites/dynamically_provisioned_external_rg_volume_tester.go b/test/e2e/testsuites/dynamically_provisioned_external_rg_volume_tester.go index 5cc86cdad..1325938cf 100644 --- a/test/e2e/testsuites/dynamically_provisioned_external_rg_volume_tester.go +++ b/test/e2e/testsuites/dynamically_provisioned_external_rg_volume_tester.go @@ -27,7 +27,7 @@ import ( "sigs.k8s.io/azuredisk-csi-driver/test/utils/credentials" "github.com/onsi/ginkgo/v2" - "github.com/pborman/uuid" + "k8s.io/apimachinery/pkg/util/uuid" v1 "k8s.io/api/core/v1" clientset "k8s.io/client-go/kubernetes" @@ -79,7 +79,7 @@ func (t *DynamicallyProvisionedExternalRgVolumeTest) Run(ctx context.Context, cl if t.SeparateResourceGroups || externalRG == "" { //create external resource group - externalRG = credentials.ResourceGroupPrefix + uuid.NewUUID().String() + externalRG = credentials.ResourceGroupPrefix + string(uuid.NewUUID()) ginkgo.By("Creating external resource group: " + externalRG) _, err = azureClient.EnsureResourceGroup(ctx, externalRG, creds.Location, nil) framework.ExpectNoError(err) diff --git a/test/e2e/testsuites/dynamically_provisioned_read_only_volume_tester.go b/test/e2e/testsuites/dynamically_provisioned_read_only_volume_tester.go index 579271ac8..3a447727c 100644 --- a/test/e2e/testsuites/dynamically_provisioned_read_only_volume_tester.go +++ b/test/e2e/testsuites/dynamically_provisioned_read_only_volume_tester.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +//nolint:dupl package testsuites import ( diff --git a/test/e2e/testsuites/dynamically_provisioned_resize_volume_tester.go b/test/e2e/testsuites/dynamically_provisioned_resize_volume_tester.go index 36a99c434..420ae39fc 100644 --- a/test/e2e/testsuites/dynamically_provisioned_resize_volume_tester.go +++ b/test/e2e/testsuites/dynamically_provisioned_resize_volume_tester.go @@ -157,7 +157,7 @@ func (t *DynamicallyProvisionedResizeVolumeTest) Run(ctx context.Context, client framework.ExpectNoError(err, fmt.Sprintf("Error getting client for azuredisk %v", err)) disktest, err := disksClient.Get(ctx, resourceGroup, diskName) framework.ExpectNoError(err, fmt.Sprintf("Error getting disk for azuredisk %v", err)) - newdiskSize := strconv.Itoa(int(*disktest.DiskSizeGB)) + "Gi" + newdiskSize := strconv.Itoa(int(*disktest.Properties.DiskSizeGB)) + "Gi" if !(newSize.String() == newdiskSize) { framework.Failf("newPVCSize(%+v) is not equal to new azurediskSize(%+v)", newSize.String(), newdiskSize) } diff --git a/test/e2e/testsuites/dynamically_provisioned_shared_disk_tester.go b/test/e2e/testsuites/dynamically_provisioned_shared_disk_tester.go index d36737e41..fe0deb5a6 100644 --- a/test/e2e/testsuites/dynamically_provisioned_shared_disk_tester.go +++ b/test/e2e/testsuites/dynamically_provisioned_shared_disk_tester.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +//nolint:dupl package testsuites import ( diff --git a/test/e2e/testsuites/dynamically_provisioned_statefulset_e2e_tester.go b/test/e2e/testsuites/dynamically_provisioned_statefulset_e2e_tester.go index 7b97b0cd9..438a91c30 100644 --- a/test/e2e/testsuites/dynamically_provisioned_statefulset_e2e_tester.go +++ b/test/e2e/testsuites/dynamically_provisioned_statefulset_e2e_tester.go @@ -34,6 +34,7 @@ type DynamicallyProvisionedStatefulSetTest struct { PodCheck *PodExecCheck } +//nolint:dupl func (t *DynamicallyProvisionedStatefulSetTest) Run(ctx context.Context, client clientset.Interface, namespace *v1.Namespace) { tStatefulSet, cleanup := t.Pod.SetupStatefulset(ctx, client, namespace, t.CSIDriver, driver.GetParameters()) // defer must be called here for resources not get removed before using them diff --git a/test/e2e/testsuites/dynamically_provisioned_volume_cloning_tester.go b/test/e2e/testsuites/dynamically_provisioned_volume_cloning_tester.go index 19a3457e1..40e230a51 100644 --- a/test/e2e/testsuites/dynamically_provisioned_volume_cloning_tester.go +++ b/test/e2e/testsuites/dynamically_provisioned_volume_cloning_tester.go @@ -69,20 +69,12 @@ func (t *DynamicallyProvisionedVolumeCloningTest) Run(ctx context.Context, clien clonedVolume.ClaimSize = t.ClonedVolumeSize } - zone := tpod.GetZoneForVolume(ctx, 0) - t.PodWithClonedVolume.Volumes = []VolumeDetails{clonedVolume} tpod, cleanups = t.PodWithClonedVolume.SetupWithDynamicVolumes(ctx, client, namespace, t.CSIDriver, t.StorageClassParameters) for i := range cleanups { defer cleanups[i](ctx) } - // Since an LRS disk cannot be cloned to a different zone, add a selector to the pod so - // that it is created in the same zone as the source disk. - if len(zone) != 0 { - tpod.SetNodeSelector(map[string]string{"topology.disk.csi.azure.com/zone": zone}) - } - ginkgo.By("deploying a second pod with cloned volume") tpod.Create(ctx) defer tpod.Cleanup(ctx) diff --git a/test/e2e/testsuites/dynamically_provisioned_volume_snapshot_cross_region_tester.go b/test/e2e/testsuites/dynamically_provisioned_volume_snapshot_cross_region_tester.go index 08decce5a..bc6097a80 100644 --- a/test/e2e/testsuites/dynamically_provisioned_volume_snapshot_cross_region_tester.go +++ b/test/e2e/testsuites/dynamically_provisioned_volume_snapshot_cross_region_tester.go @@ -26,7 +26,7 @@ import ( "sigs.k8s.io/azuredisk-csi-driver/test/utils/credentials" "github.com/onsi/ginkgo/v2" - "github.com/pborman/uuid" + "k8s.io/apimachinery/pkg/util/uuid" v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" @@ -78,7 +78,7 @@ func (t *DynamicallyProvisionedVolumeSnapshotCrossRegionTest) Run(ctx context.Co framework.ExpectNoError(err) //create external resource group - externalRG := credentials.ResourceGroupPrefix + uuid.NewUUID().String() + externalRG := credentials.ResourceGroupPrefix + string(uuid.NewUUID()) ginkgo.By("Creating external resource group: " + externalRG) _, err = azureClient.EnsureResourceGroup(ctx, externalRG, creds.Location, nil) framework.ExpectNoError(err) diff --git a/test/e2e/testsuites/dynamically_provisioned_volume_snapshot_tester.go b/test/e2e/testsuites/dynamically_provisioned_volume_snapshot_tester.go index 78f5dfd58..a0dc0eef5 100644 --- a/test/e2e/testsuites/dynamically_provisioned_volume_snapshot_tester.go +++ b/test/e2e/testsuites/dynamically_provisioned_volume_snapshot_tester.go @@ -27,7 +27,7 @@ import ( "sigs.k8s.io/azuredisk-csi-driver/test/utils/credentials" "github.com/onsi/ginkgo/v2" - "github.com/pborman/uuid" + "k8s.io/apimachinery/pkg/util/uuid" v1 "k8s.io/api/core/v1" clientset "k8s.io/client-go/kubernetes" @@ -49,6 +49,7 @@ type DynamicallyProvisionedVolumeSnapshotTest struct { PodWithSnapshot PodDetails StorageClassParameters map[string]string SnapshotStorageClassParameters map[string]string + IsWindowsHPCDeployment bool } func (t *DynamicallyProvisionedVolumeSnapshotTest) Run(ctx context.Context, client clientset.Interface, restclient restclientset.Interface, namespace *v1.Namespace) { @@ -81,7 +82,7 @@ func (t *DynamicallyProvisionedVolumeSnapshotTest) Run(ctx context.Context, clie framework.ExpectNoError(err) //create external resource group - externalRG := credentials.ResourceGroupPrefix + uuid.NewUUID().String() + externalRG := credentials.ResourceGroupPrefix + string(uuid.NewUUID()) ginkgo.By("Creating external resource group: " + externalRG) _, err = azureClient.EnsureResourceGroup(ctx, externalRG, creds.Location, nil) framework.ExpectNoError(err) @@ -132,7 +133,7 @@ func (t *DynamicallyProvisionedVolumeSnapshotTest) Run(ctx context.Context, clie defer tPodWithSnapshotCleanup[i](ctx) } - if t.ShouldOverwrite { + if t.ShouldOverwrite && !t.IsWindowsHPCDeployment { // TODO: add test case which will schedule the original disk and the copied disk on the same node once the conflicting UUID issue is fixed. ginkgo.By("Set pod anti-affinity to make sure two pods are scheduled on different nodes") tPodWithSnapshot.SetAffinity(&TestPodAntiAffinity) diff --git a/test/e2e/testsuites/pre_provisioned_inline_volume_tester.go b/test/e2e/testsuites/pre_provisioned_inline_volume_tester.go index 21d87cda1..3cf016351 100644 --- a/test/e2e/testsuites/pre_provisioned_inline_volume_tester.go +++ b/test/e2e/testsuites/pre_provisioned_inline_volume_tester.go @@ -38,7 +38,7 @@ type PreProvisionedInlineVolumeTest struct { func (t *PreProvisionedInlineVolumeTest) Run(ctx context.Context, client clientset.Interface, namespace *v1.Namespace) { for _, pod := range t.Pods { - tpod, cleanup := pod.SetupWithInlineVolumes(client, namespace, t.CSIDriver, t.DiskURI, t.ReadOnly) + tpod, cleanup := pod.SetupWithInlineVolumes(client, namespace, t.DiskURI, t.ReadOnly) // defer must be called here for resources not get removed before using them for i := range cleanup { defer cleanup[i](ctx) diff --git a/test/e2e/testsuites/pre_provisioned_read_only_volume_tester.go b/test/e2e/testsuites/pre_provisioned_read_only_volume_tester.go index f51c7ff37..bc40afeef 100644 --- a/test/e2e/testsuites/pre_provisioned_read_only_volume_tester.go +++ b/test/e2e/testsuites/pre_provisioned_read_only_volume_tester.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +//nolint:dupl package testsuites import ( diff --git a/test/e2e/testsuites/pre_provisioned_shared_disk_tester.go b/test/e2e/testsuites/pre_provisioned_shared_disk_tester.go index 43fb196d7..b454baf57 100644 --- a/test/e2e/testsuites/pre_provisioned_shared_disk_tester.go +++ b/test/e2e/testsuites/pre_provisioned_shared_disk_tester.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +//nolint:dupl package testsuites import ( diff --git a/test/e2e/testsuites/specs.go b/test/e2e/testsuites/specs.go index b5df077d2..8d1497e1e 100644 --- a/test/e2e/testsuites/specs.go +++ b/test/e2e/testsuites/specs.go @@ -148,7 +148,7 @@ func (pod *PodDetails) SetupWithDynamicVolumesWithSubpath(ctx context.Context, c return tpod, cleanupFuncs } -func (pod *PodDetails) SetupWithInlineVolumes(client clientset.Interface, namespace *v1.Namespace, csiDriver driver.PreProvisionedVolumeTestDriver, diskURI string, readOnly bool) (*TestPod, []func(context.Context)) { +func (pod *PodDetails) SetupWithInlineVolumes(client clientset.Interface, namespace *v1.Namespace, diskURI string, readOnly bool) (*TestPod, []func(context.Context)) { tpod := NewTestPod(client, namespace, pod.Cmd, pod.IsWindows, pod.WinServerVer) cleanupFuncs := make([]func(context.Context), 0) for n, v := range pod.Volumes { diff --git a/test/e2e/testsuites/testsuites.go b/test/e2e/testsuites/testsuites.go index 0ec755d2f..40a7b811d 100644 --- a/test/e2e/testsuites/testsuites.go +++ b/test/e2e/testsuites/testsuites.go @@ -336,7 +336,7 @@ func generatePVC(namespace, storageClassName, name, claimSize string, volumeMode AccessModes: []v1.PersistentVolumeAccessMode{ accessMode, }, - Resources: v1.ResourceRequirements{ + Resources: v1.VolumeResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceName(v1.ResourceStorage): resource.MustParse(claimSize), }, diff --git a/test/external-e2e/manifest/testdriver.yaml b/test/external-e2e/manifest/testdriver.yaml index 09abaa8ea..53b3222d5 100644 --- a/test/external-e2e/manifest/testdriver.yaml +++ b/test/external-e2e/manifest/testdriver.yaml @@ -19,5 +19,8 @@ DriverInfo: volumeLimits: true snapshotDataSource: true pvcDataSource: true + singleNodeVolume: true + multiplePVsSameID: true + readWriteOncePod: true SupportedSizeRange: Min: 1Gi diff --git a/test/external-e2e/run.sh b/test/external-e2e/run.sh index 1be09eb2e..99386d140 100755 --- a/test/external-e2e/run.sh +++ b/test/external-e2e/run.sh @@ -22,14 +22,14 @@ PROJECT_ROOT=$(git rev-parse --show-toplevel) DRIVER="test" install_ginkgo () { - go install github.com/onsi/ginkgo/ginkgo@v1.14.0 + go install github.com/onsi/ginkgo/v2/ginkgo@v2.13.2 } setup_e2e_binaries() { mkdir /tmp/csi-azuredisk # download k8s external e2e binary for kubernetes - curl -sL https://dl.k8s.io/release/v1.24.0/kubernetes-test-linux-amd64.tar.gz --output e2e-tests.tar.gz + curl -sL https://dl.k8s.io/release/v1.27.0/kubernetes-test-linux-amd64.tar.gz --output e2e-tests.tar.gz tar -xvf e2e-tests.tar.gz && rm e2e-tests.tar.gz # test on alternative driver name diff --git a/test/integration/README.md b/test/integration/README.md deleted file mode 100644 index e5edf3883..000000000 --- a/test/integration/README.md +++ /dev/null @@ -1,27 +0,0 @@ -## Integration Test -Integration test verifies the functionality of CSI driver as a standalone server outside Kubernetes. It exercises the lifecycle of the volume by creating, attaching, staging, mounting volumes and the reverse operations. - -## Run Integration Tests Locally -### Prerequisite - - make sure `GOPATH` is set and [csc](https://github.com/rexray/gocsi/tree/master/csc) tool is installed under `$GOPATH/bin/csc` -``` -export set GOPATH=/root/go -go get github.com/rexray/gocsi/csc -``` - - - set Azure credentials by environment variables - > you could get these variables from `/etc/kubernetes/azure.json` on a kubernetes cluster node -``` -export set tenantId= -export set subscriptionId= -export set aadClientId= -export set aadClientSecret= -export set resourceGroup= -export set location= -export set nodeid= -``` - -### Run integration tests -``` -make test-integration -``` diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go deleted file mode 100644 index 85a331966..000000000 --- a/test/integration/integration_test.go +++ /dev/null @@ -1,101 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package integration - -import ( - "context" - "flag" - "log" - "os" - "os/exec" - "strings" - "testing" - - "sigs.k8s.io/azuredisk-csi-driver/test/utils/azure" - "sigs.k8s.io/azuredisk-csi-driver/test/utils/credentials" - - "github.com/stretchr/testify/assert" -) - -const ( - nodeid = "integration-test-node" -) - -var useDriverV2 = flag.Bool("temp-use-driver-v2", false, "A temporary flag to enable early test and development of Azure Disk CSI Driver V2. This will be removed in the future.") - -func TestIntegrationOnAzurePublicCloud(t *testing.T) { - // Test on AzurePublicCloud - t.Setenv("AZURE_VM_TYPE", "standard") - creds, err := credentials.CreateAzureCredentialFile() - defer func() { - err := credentials.DeleteAzureCredentialFile() - assert.NoError(t, err) - }() - assert.NoError(t, err) - assert.NotNil(t, creds) - - // Set necessary env vars for sanity test - t.Setenv("AZURE_CREDENTIAL_FILE", credentials.TempAzureCredentialFilePath) - t.Setenv("nodeid", nodeid) - - azureClient, err := azure.GetAzureClient(creds.Cloud, creds.SubscriptionID, creds.AADClientID, creds.TenantID, creds.AADClientSecret) - assert.NoError(t, err) - - ctx := context.Background() - // Create an empty resource group for integration test - log.Printf("Creating resource group %s in %s", creds.ResourceGroup, creds.Cloud) - _, err = azureClient.EnsureResourceGroup(ctx, creds.ResourceGroup, creds.Location, nil) - assert.NoError(t, err) - defer func() { - // Only delete resource group the test created - if strings.HasPrefix(creds.ResourceGroup, credentials.ResourceGroupPrefix) { - log.Printf("Deleting resource group %s", creds.ResourceGroup) - err := azureClient.DeleteResourceGroup(ctx, creds.ResourceGroup) - assert.NoError(t, err) - } - }() - - log.Printf("Creating a VM in %s", creds.ResourceGroup) - _, err = azureClient.EnsureVirtualMachine(ctx, creds.ResourceGroup, creds.Location, nodeid) - assert.NoError(t, err) - - // Execute the script from project root - err = os.Chdir("../..") - assert.NoError(t, err) - // Change directory back to test/integration in preparation for next test - defer func() { - err := os.Chdir("test/integration") - assert.NoError(t, err) - }() - - cwd, err := os.Getwd() - assert.NoError(t, err) - assert.True(t, strings.HasSuffix(cwd, "azuredisk-csi-driver")) - - args := []string{creds.Cloud} - if *useDriverV2 { - args = append(args, "v2") - } - - cmd := exec.Command("./test/integration/run-tests-all-clouds.sh", args...) - cmd.Dir = cwd - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - if err := cmd.Run(); err != nil { - t.Fatalf("Integration test failed %v", err) - } -} diff --git a/test/integration/run-test.sh b/test/integration/run-test.sh deleted file mode 100755 index 9eed0d17b..000000000 --- a/test/integration/run-test.sh +++ /dev/null @@ -1,108 +0,0 @@ -#!/bin/bash - -# Copyright 2019 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -euo pipefail - -function cleanup { - echo 'pkill -f azurediskplugin' - pkill -f azurediskplugin -} - -t="$(date +%s)" -readonly CSC_BIN="$GOBIN/csc" -readonly volname="citest-$t" - -endpoint='tcp://127.0.0.1:10000' -if [[ "$#" -gt 0 ]]; then - endpoint="$1" -fi - -node='CSINode' -if [ $# -gt 1 ]; then - node="$2" -fi - -cloud='AzurePublicCloud' -if [[ "$#" -gt 2 ]]; then - cloud="$3" -fi - -echo "Begin to run integration test on $cloud..." - -ARCH=$(uname -p) -if [[ "${ARCH}" == "x86_64" || ${ARCH} == "unknown" ]]; then - ARCH="amd64" -fi - -# Run CSI driver as a background service -if [[ $# -lt 4 || "$4" != "v2" ]]; then - _output/${ARCH}/azurediskplugin --endpoint "$endpoint" --nodeid "$node" -v=5 -support-zone=false & -else - _output/${ARCH}/azurediskpluginv2 --endpoint "$endpoint" --nodeid "$node" -v=5 --temp-use-driver-v2 -support-zone=false & -fi -trap cleanup EXIT - -if [[ "$cloud" == 'AzureChinaCloud' ]]; then - sleep 25 -else - sleep 5 -fi - -# begin to run CSI functions one by one -"$CSC_BIN" node get-info --endpoint "$endpoint" - -echo 'Create volume test:' -value=$("$CSC_BIN" controller new --endpoint "$endpoint" --cap 1,block "$volname" --req-bytes 2147483648 --params skuname=Standard_LRS,kind=managed) -sleep 15 - -volumeid=$(echo "$value" | awk '{print $1}' | sed 's/"//g') -echo "Got volume id: $volumeid" - -"$CSC_BIN" controller validate-volume-capabilities --endpoint "$endpoint" --cap 1,block "$volumeid" - -echo 'Expand volume test' -"$CSC_BIN" controller expand-volume --endpoint "$endpoint" --req-bytes 21474836480 --cap 1,block "$volumeid" - -echo 'Attach volume test:' -"$CSC_BIN" controller publish --endpoint "$endpoint" --node-id "$node" --cap 1,block "$volumeid" -sleep 20 - -echo 'ListVolumes test:' -"$CSC_BIN" controller list-volumes --endpoint "$endpoint" --max-entries 1 --starting-token 0 - -echo 'Detach volume test:' -"$CSC_BIN" controller unpublish --endpoint "$endpoint" --node-id "$node" "$volumeid" -sleep 30 - -echo 'Create snapshot test:' -"$CSC_BIN" controller create-snapshot snapshot-test-name --endpoint "$endpoint" --source-volume "$volumeid" -sleep 5 - -echo 'List snapshots test:' -"$CSC_BIN" controller list-snapshots --endpoint "$endpoint" -sleep 5 - -echo 'Delete snapshot test:' -"$CSC_BIN" controller delete-snapshot snapshot-test-name --endpoint "$endpoint" -sleep 5 - -echo 'Delete volume test:' -"$CSC_BIN" controller del --endpoint "$endpoint" "$volumeid" -sleep 15 - -"$CSC_BIN" identity plugin-info --endpoint "$endpoint" - -echo "Integration test on $cloud is completed." diff --git a/test/integration/run-tests-all-clouds.sh b/test/integration/run-tests-all-clouds.sh deleted file mode 100755 index 3ffa3f4c2..000000000 --- a/test/integration/run-tests-all-clouds.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash - -# Copyright 2019 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -euo pipefail - -apt update && apt install cifs-utils procps -y -GO111MODULE=off go get github.com/rexray/gocsi/csc - -cloud='AzurePublicCloud' -if [[ "$#" -gt 0 ]]; then - cloud="$1" -fi - -version='v1' -if [[ "$#" -gt 1 ]]; then - version="$2" -fi - -declare nodeid -test/integration/run-test.sh 'tcp://127.0.0.1:10000' "$nodeid" "$cloud" "$version" diff --git a/test/utils/azure/azure_helpers.go b/test/utils/azure/azure_helpers.go index 65c07ffdf..a105a00d4 100644 --- a/test/utils/azure/azure_helpers.go +++ b/test/utils/azure/azure_helpers.go @@ -19,60 +19,69 @@ package azure import ( "context" "fmt" - "log" "os" - "strings" "time" - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" - "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network" - "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-05-01/resources" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/jongio/azidext/go/azidext" - "k8s.io/utils/pointer" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + compute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + network "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + resources "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient/interfaceclient" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient/resourcegroupclient" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient/sshpublickeyresourceclient" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient/subnetclient" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachineclient" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworkclient" ) type Client struct { - environment azure.Environment - subscriptionID string - groupsClient resources.GroupsClient - vmClient compute.VirtualMachinesClient - nicClient network.InterfacesClient - subnetsClient network.SubnetsClient - vnetClient network.VirtualNetworksClient - disksClient compute.DisksClient - sshPublicKeysClient compute.SSHPublicKeysClient + groupsClient resourcegroupclient.Interface + vmClient virtualmachineclient.Interface + nicClient interfaceclient.Interface + subnetsClient subnetclient.Interface + vnetClient virtualnetworkclient.Interface + disksClient diskclient.Interface + sshPublicKeysClient sshpublickeyresourceclient.Interface } func GetAzureClient(cloud, subscriptionID, clientID, tenantID, clientSecret string) (*Client, error) { - env, err := azure.EnvironmentFromName(cloud) + armConfig := &azclient.ARMClientConfig{ + Cloud: cloud, + TenantID: tenantID, + } + credProvider, err := azclient.NewAuthProvider(armConfig, &azclient.AzureAuthConfig{ + AADClientID: clientID, + AADClientSecret: clientSecret, + }) if err != nil { return nil, err } - - options := azidentity.ClientSecretCredentialOptions{ - ClientOptions: azcore.ClientOptions{ - Cloud: getCloudConfig(env), - }, - } - cred, err := azidentity.NewClientSecretCredential(tenantID, clientID, clientSecret, &options) + cred := credProvider.GetAzIdentity() + factory, err := azclient.NewClientFactory(&azclient.ClientFactoryConfig{ + SubscriptionID: subscriptionID, + }, armConfig, cred) if err != nil { return nil, err } - - return getClient(env, subscriptionID, tenantID, cred, env.TokenAudience), nil + return &Client{ + groupsClient: factory.GetResourceGroupClient(), + vmClient: factory.GetVirtualMachineClient(), + nicClient: factory.GetInterfaceClient(), + subnetsClient: factory.GetSubnetClient(), + vnetClient: factory.GetVirtualNetworkClient(), + disksClient: factory.GetDiskClient(), + sshPublicKeysClient: factory.GetSSHPublicKeyResourceClient(), + }, nil } - -func (az *Client) GetAzureDisksClient() (compute.DisksClient, error) { - +func (az *Client) GetAzureDisksClient() (diskclient.Interface, error) { return az.disksClient, nil } -func (az *Client) EnsureSSHPublicKey(ctx context.Context, subscriptionID, resourceGroupName, location, keyName string) (publicKey string, err error) { - _, err = az.sshPublicKeysClient.Create(ctx, resourceGroupName, keyName, compute.SSHPublicKeyResource{Location: &location}) +func (az *Client) EnsureSSHPublicKey(ctx context.Context, resourceGroupName, location, keyName string) (publicKey string, err error) { + _, err = az.sshPublicKeysClient.Create(ctx, resourceGroupName, keyName, armcompute.SSHPublicKeyResource{Location: &location}) if err != nil { return "", err } @@ -83,103 +92,97 @@ func (az *Client) EnsureSSHPublicKey(ctx context.Context, subscriptionID, resour return *result.PublicKey, nil } -func (az *Client) EnsureResourceGroup(ctx context.Context, name, location string, managedBy *string) (resourceGroup *resources.Group, err error) { +func (az *Client) EnsureResourceGroup(ctx context.Context, name, location string, managedBy *string) (resourceGroup *resources.ResourceGroup, err error) { var tags map[string]*string group, err := az.groupsClient.Get(ctx, name) - if err == nil && group.Tags != nil { + if err == nil && group != nil && group.Tags != nil { tags = group.Tags } else { tags = make(map[string]*string) } - if managedBy == nil { + if managedBy == nil && group != nil { managedBy = group.ManagedBy } // Tags for correlating resource groups with prow jobs on testgrid - tags["buildID"] = stringPointer(os.Getenv("BUILD_ID")) - tags["jobName"] = stringPointer(os.Getenv("JOB_NAME")) - tags["creationTimestamp"] = stringPointer(time.Now().UTC().Format(time.RFC3339)) + tags["buildID"] = to.Ptr(os.Getenv("BUILD_ID")) + tags["jobName"] = to.Ptr(os.Getenv("JOB_NAME")) + tags["creationTimestamp"] = to.Ptr(time.Now().UTC().Format(time.RFC3339)) - response, err := az.groupsClient.CreateOrUpdate(ctx, name, resources.Group{ + response, err := az.groupsClient.CreateOrUpdate(ctx, name, resources.ResourceGroup{ Name: &name, Location: &location, ManagedBy: managedBy, Tags: tags, }) if err != nil { - return &response, err + return response, err } - return &response, nil + return response, nil } func (az *Client) DeleteResourceGroup(ctx context.Context, groupName string) error { _, err := az.groupsClient.Get(ctx, groupName) if err == nil { - future, err := az.groupsClient.Delete(ctx, groupName) + err = az.groupsClient.Delete(ctx, groupName) if err != nil { return fmt.Errorf("cannot delete resource group %v: %v", groupName, err) } - err = future.WaitForCompletionRef(ctx, az.groupsClient.Client) - if err != nil { - // Skip the teardown errors because of https://github.com/Azure/go-autorest/issues/357 - // TODO(feiskyer): fix the issue by upgrading go-autorest version >= v11.3.2. - log.Printf("Warning: failed to delete resource group %q with error %v", groupName, err) - } } return nil } -func (az *Client) EnsureVirtualMachine(ctx context.Context, groupName, location, vmName string) (vm compute.VirtualMachine, err error) { +func (az *Client) EnsureVirtualMachine(ctx context.Context, groupName, location, vmName string) (compute.VirtualMachine, error) { nic, err := az.EnsureNIC(ctx, groupName, location, vmName+"-nic", vmName+"-vnet", vmName+"-subnet") if err != nil { - return vm, err + return armcompute.VirtualMachine{}, err } - publicKey, err := az.EnsureSSHPublicKey(ctx, az.subscriptionID, groupName, location, "test-key") + publicKey, err := az.EnsureSSHPublicKey(ctx, groupName, location, "test-key") if err != nil { - return vm, err + return armcompute.VirtualMachine{}, err } - future, err := az.vmClient.CreateOrUpdate( + resp, err := az.vmClient.CreateOrUpdate( ctx, groupName, vmName, compute.VirtualMachine{ - Location: pointer.String(location), - VirtualMachineProperties: &compute.VirtualMachineProperties{ - HardwareProfile: &compute.HardwareProfile{ - VMSize: "Standard_DS2_v2", + Location: to.Ptr(location), + Properties: &armcompute.VirtualMachineProperties{ + HardwareProfile: &armcompute.HardwareProfile{ + VMSize: to.Ptr(compute.VirtualMachineSizeTypesStandardDS2V2), }, - StorageProfile: &compute.StorageProfile{ - ImageReference: &compute.ImageReference{ - Publisher: pointer.String("Canonical"), - Offer: pointer.String("UbuntuServer"), - Sku: pointer.String("16.04.0-LTS"), - Version: pointer.String("latest"), + StorageProfile: &armcompute.StorageProfile{ + ImageReference: &armcompute.ImageReference{ + Publisher: to.Ptr("Canonical"), + Offer: to.Ptr("UbuntuServer"), + SKU: to.Ptr("16.04.0-LTS"), + Version: to.Ptr("latest"), }, }, - OsProfile: &compute.OSProfile{ - ComputerName: pointer.String(vmName), - AdminUsername: pointer.String("azureuser"), - AdminPassword: pointer.String("Azureuser1234"), - LinuxConfiguration: &compute.LinuxConfiguration{ - DisablePasswordAuthentication: pointer.Bool(true), - SSH: &compute.SSHConfiguration{ - PublicKeys: &[]compute.SSHPublicKey{ + OSProfile: &armcompute.OSProfile{ + ComputerName: to.Ptr(vmName), + AdminUsername: to.Ptr("azureuser"), + AdminPassword: to.Ptr("Azureuser1234"), + LinuxConfiguration: &armcompute.LinuxConfiguration{ + DisablePasswordAuthentication: to.Ptr(true), + SSH: &armcompute.SSHConfiguration{ + PublicKeys: []*compute.SSHPublicKey{ { - Path: pointer.String("/home/azureuser/.ssh/authorized_keys"), + Path: to.Ptr("/home/azureuser/.ssh/authorized_keys"), KeyData: &publicKey, }, }, }, }, }, - NetworkProfile: &compute.NetworkProfile{ - NetworkInterfaces: &[]compute.NetworkInterfaceReference{ + NetworkProfile: &armcompute.NetworkProfile{ + NetworkInterfaces: []*compute.NetworkInterfaceReference{ { ID: nic.ID, - NetworkInterfaceReferenceProperties: &compute.NetworkInterfaceReferenceProperties{ - Primary: pointer.Bool(true), + Properties: &armcompute.NetworkInterfaceReferenceProperties{ + Primary: to.Ptr(true), }, }, }, @@ -188,42 +191,37 @@ func (az *Client) EnsureVirtualMachine(ctx context.Context, groupName, location, }, ) if err != nil { - return vm, fmt.Errorf("cannot create vm: %v", err) - } - - err = future.WaitForCompletionRef(ctx, az.vmClient.Client) - if err != nil { - return vm, fmt.Errorf("cannot get the vm create or update future response: %v", err) + return armcompute.VirtualMachine{}, fmt.Errorf("cannot create vm: %v", err) } - return future.Result(az.vmClient) + return *resp, nil } -func (az *Client) EnsureNIC(ctx context.Context, groupName, location, nicName, vnetName, subnetName string) (nic network.Interface, err error) { - _, err = az.EnsureVirtualNetworkAndSubnet(ctx, groupName, location, vnetName, subnetName) +func (az *Client) EnsureNIC(ctx context.Context, groupName, location, nicName, vnetName, subnetName string) (network.Interface, error) { + _, err := az.EnsureVirtualNetworkAndSubnet(ctx, groupName, location, vnetName, subnetName) if err != nil { - return nic, err + return network.Interface{}, err } subnet, err := az.GetVirtualNetworkSubnet(ctx, groupName, vnetName, subnetName) if err != nil { - return nic, fmt.Errorf("cannot get subnet %s of virtual network %s in %s: %v", subnetName, vnetName, groupName, err) + return network.Interface{}, fmt.Errorf("cannot get subnet %s of virtual network %s in %s: %v", subnetName, vnetName, groupName, err) } - future, err := az.nicClient.CreateOrUpdate( + nic, err := az.nicClient.CreateOrUpdate( ctx, groupName, nicName, network.Interface{ - Name: pointer.String(nicName), - Location: pointer.String(location), - InterfacePropertiesFormat: &network.InterfacePropertiesFormat{ - IPConfigurations: &[]network.InterfaceIPConfiguration{ + Name: to.Ptr(nicName), + Location: to.Ptr(location), + Properties: &network.InterfacePropertiesFormat{ + IPConfigurations: []*network.InterfaceIPConfiguration{ { - Name: pointer.String("ipConfig1"), - InterfaceIPConfigurationPropertiesFormat: &network.InterfaceIPConfigurationPropertiesFormat{ + Name: to.Ptr("ipConfig1"), + Properties: &network.InterfaceIPConfigurationPropertiesFormat{ Subnet: &subnet, - PrivateIPAllocationMethod: network.Dynamic, + PrivateIPAllocationMethod: to.Ptr(network.IPAllocationMethodDynamic), }, }, }, @@ -231,33 +229,28 @@ func (az *Client) EnsureNIC(ctx context.Context, groupName, location, nicName, v }, ) if err != nil { - return nic, fmt.Errorf("cannot create nic: %v", err) + return network.Interface{}, fmt.Errorf("cannot create nic: %v", err) } - err = future.WaitForCompletionRef(ctx, az.nicClient.Client) - if err != nil { - return nic, fmt.Errorf("cannot get nic create or update future response: %v", err) - } - - return future.Result(az.nicClient) + return *nic, nil } -func (az *Client) EnsureVirtualNetworkAndSubnet(ctx context.Context, groupName, location, vnetName, subnetName string) (vnet network.VirtualNetwork, err error) { - future, err := az.vnetClient.CreateOrUpdate( +func (az *Client) EnsureVirtualNetworkAndSubnet(ctx context.Context, groupName, location, vnetName, subnetName string) (network.VirtualNetwork, error) { + vnet, err := az.vnetClient.CreateOrUpdate( ctx, groupName, vnetName, network.VirtualNetwork{ - Location: pointer.String(location), - VirtualNetworkPropertiesFormat: &network.VirtualNetworkPropertiesFormat{ + Location: to.Ptr(location), + Properties: &network.VirtualNetworkPropertiesFormat{ AddressSpace: &network.AddressSpace{ - AddressPrefixes: &[]string{"10.0.0.0/8"}, + AddressPrefixes: []*string{to.Ptr("10.0.0.0/8")}, }, - Subnets: &[]network.Subnet{ + Subnets: []*network.Subnet{ { - Name: pointer.String(subnetName), - SubnetPropertiesFormat: &network.SubnetPropertiesFormat{ - AddressPrefix: pointer.String("10.0.0.0/16"), + Name: to.Ptr(subnetName), + Properties: &network.SubnetPropertiesFormat{ + AddressPrefix: to.Ptr("10.0.0.0/16"), }, }, }, @@ -265,76 +258,13 @@ func (az *Client) EnsureVirtualNetworkAndSubnet(ctx context.Context, groupName, }) if err != nil { - return vnet, fmt.Errorf("cannot create virtual network: %v", err) - } - - err = future.WaitForCompletionRef(ctx, az.vnetClient.Client) - if err != nil { - return vnet, fmt.Errorf("cannot get the vnet create or update future response: %v", err) + return network.VirtualNetwork{}, fmt.Errorf("cannot create virtual network: %v", err) } - return future.Result(az.vnetClient) + return *vnet, nil } func (az *Client) GetVirtualNetworkSubnet(ctx context.Context, groupName, vnetName, subnetName string) (network.Subnet, error) { - return az.subnetsClient.Get(ctx, groupName, vnetName, subnetName, "") -} - -func getCloudConfig(env azure.Environment) cloud.Configuration { - switch env.Name { - case azure.USGovernmentCloud.Name: - return cloud.AzureGovernment - case azure.ChinaCloud.Name: - return cloud.AzureChina - case azure.PublicCloud.Name: - return cloud.AzurePublic - default: - return cloud.Configuration{ - ActiveDirectoryAuthorityHost: env.ActiveDirectoryEndpoint, - Services: map[cloud.ServiceName]cloud.ServiceConfiguration{ - cloud.ResourceManager: { - Audience: env.TokenAudience, - Endpoint: env.ResourceManagerEndpoint, - }, - }, - } - } -} - -func getClient(env azure.Environment, subscriptionID, tenantID string, cred *azidentity.ClientSecretCredential, scope string) *Client { - c := &Client{ - environment: env, - subscriptionID: subscriptionID, - groupsClient: resources.NewGroupsClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID), - vmClient: compute.NewVirtualMachinesClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID), - nicClient: network.NewInterfacesClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID), - subnetsClient: network.NewSubnetsClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID), - vnetClient: network.NewVirtualNetworksClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID), - disksClient: compute.NewDisksClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID), - sshPublicKeysClient: compute.NewSSHPublicKeysClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID), - } - - if !strings.HasSuffix(scope, "/.default") { - scope += "/.default" - } - // Use an adapter so azidentity in the Azure SDK can be used as Authorizer - // when calling the Azure Management Packages, which we currently use. Once - // the Azure SDK clients (found in /sdk) move to stable, we can update our - // clients and they will be able to use the creds directly without the - // authorizer. - authorizer := azidext.NewTokenCredentialAdapter(cred, []string{scope}) - - c.groupsClient.Authorizer = authorizer - c.vmClient.Authorizer = authorizer - c.nicClient.Authorizer = authorizer - c.subnetsClient.Authorizer = authorizer - c.vnetClient.Authorizer = authorizer - c.disksClient.Authorizer = authorizer - c.sshPublicKeysClient.Authorizer = authorizer - - return c -} - -func stringPointer(s string) *string { - return &s + subnet, err := az.subnetsClient.Get(ctx, groupName, vnetName, subnetName, nil) + return *subnet, err } diff --git a/test/utils/credentials/credentials.go b/test/utils/credentials/credentials.go index 30b566da9..75e3bd05c 100644 --- a/test/utils/credentials/credentials.go +++ b/test/utils/credentials/credentials.go @@ -24,8 +24,8 @@ import ( "sigs.k8s.io/azuredisk-csi-driver/test/utils/testutil" - "github.com/pborman/uuid" "github.com/pelletier/go-toml" + "k8s.io/apimachinery/pkg/util/uuid" ) const ( @@ -104,7 +104,7 @@ func CreateAzureCredentialFile() (*Credentials, error) { vmType = os.Getenv(vmTypeEnvVar) if resourceGroup == "" { - resourceGroup = ResourceGroupPrefix + uuid.NewUUID().String() + resourceGroup = ResourceGroupPrefix + string(uuid.NewUUID()) } if location == "" {