From 11cc0c09950548f97f15caccd6e67d681d210802 Mon Sep 17 00:00:00 2001 From: Andi Skrgat Date: Tue, 9 Jul 2024 10:06:03 +0200 Subject: [PATCH 01/39] Helm operator --- .gitignore | 14 ++ Dockerfile | 7 + Makefile | 227 ++++++++++++++++++ PROJECT | 20 ++ ....my.domain_memgraphhighavailabilities.yaml | 44 ++++ config/crd/kustomization.yaml | 6 + config/default/kustomization.yaml | 28 +++ config/default/manager_auth_proxy_patch.yaml | 40 +++ config/default/manager_config_patch.yaml | 10 + config/manager/kustomization.yaml | 2 + config/manager/manager.yaml | 101 ++++++++ config/manifests/kustomization.yaml | 7 + config/prometheus/kustomization.yaml | 2 + config/prometheus/monitor.yaml | 25 ++ .../rbac/auth_proxy_client_clusterrole.yaml | 16 ++ config/rbac/auth_proxy_role.yaml | 24 ++ config/rbac/auth_proxy_role_binding.yaml | 19 ++ config/rbac/auth_proxy_service.yaml | 21 ++ config/rbac/kustomization.yaml | 18 ++ config/rbac/leader_election_role.yaml | 44 ++++ config/rbac/leader_election_role_binding.yaml | 19 ++ .../memgraphhighavailability_editor_role.yaml | 31 +++ .../memgraphhighavailability_viewer_role.yaml | 27 +++ config/rbac/role.yaml | 67 ++++++ config/rbac/role_binding.yaml | 19 ++ config/rbac/service_account.yaml | 12 + ...rts_v1alpha1_memgraphhighavailability.yaml | 99 ++++++++ config/samples/kustomization.yaml | 4 + config/scorecard/bases/config.yaml | 7 + config/scorecard/kustomization.yaml | 16 ++ config/scorecard/patches/basic.config.yaml | 10 + config/scorecard/patches/olm.config.yaml | 50 ++++ .../memgraph-high-availability/.helmignore | 23 ++ .../memgraph-high-availability/Chart.yaml | 21 ++ .../memgraph-high-availability/README.md | 72 ++++++ .../templates/NOTES.txt | 17 ++ .../templates/_helpers.tpl | 44 ++++ .../templates/cluster-setup.yaml | 51 ++++ .../templates/coordinators.yaml | 116 +++++++++ .../templates/data.yaml | 118 +++++++++ .../templates/services-coordinators.yaml | 39 +++ .../templates/services-data.yaml | 43 ++++ .../templates/tests/test-cluster-setup.yaml | 53 ++++ .../templates/tests/test-connection.yaml | 29 +++ .../memgraph-high-availability/values.yaml | 100 ++++++++ watches.yaml | 6 + 46 files changed, 1768 insertions(+) create mode 100644 .gitignore create mode 100644 Dockerfile create mode 100644 Makefile create mode 100644 PROJECT create mode 100644 config/crd/bases/charts.my.domain_memgraphhighavailabilities.yaml create mode 100644 config/crd/kustomization.yaml create mode 100644 config/default/kustomization.yaml create mode 100644 config/default/manager_auth_proxy_patch.yaml create mode 100644 config/default/manager_config_patch.yaml create mode 100644 config/manager/kustomization.yaml create mode 100644 config/manager/manager.yaml create mode 100644 config/manifests/kustomization.yaml create mode 100644 config/prometheus/kustomization.yaml create mode 100644 config/prometheus/monitor.yaml create mode 100644 config/rbac/auth_proxy_client_clusterrole.yaml create mode 100644 config/rbac/auth_proxy_role.yaml create mode 100644 config/rbac/auth_proxy_role_binding.yaml create mode 100644 config/rbac/auth_proxy_service.yaml create mode 100644 config/rbac/kustomization.yaml create mode 100644 config/rbac/leader_election_role.yaml create mode 100644 config/rbac/leader_election_role_binding.yaml create mode 100644 config/rbac/memgraphhighavailability_editor_role.yaml create mode 100644 config/rbac/memgraphhighavailability_viewer_role.yaml create mode 100644 config/rbac/role.yaml create mode 100644 config/rbac/role_binding.yaml create mode 100644 config/rbac/service_account.yaml create mode 100644 config/samples/charts_v1alpha1_memgraphhighavailability.yaml create mode 100644 config/samples/kustomization.yaml create mode 100644 config/scorecard/bases/config.yaml create mode 100644 config/scorecard/kustomization.yaml create mode 100644 config/scorecard/patches/basic.config.yaml create mode 100644 config/scorecard/patches/olm.config.yaml create mode 100644 helm-charts/memgraph-high-availability/.helmignore create mode 100644 helm-charts/memgraph-high-availability/Chart.yaml create mode 100644 helm-charts/memgraph-high-availability/README.md create mode 100644 helm-charts/memgraph-high-availability/templates/NOTES.txt create mode 100644 helm-charts/memgraph-high-availability/templates/_helpers.tpl create mode 100644 helm-charts/memgraph-high-availability/templates/cluster-setup.yaml create mode 100644 helm-charts/memgraph-high-availability/templates/coordinators.yaml create mode 100644 helm-charts/memgraph-high-availability/templates/data.yaml create mode 100644 helm-charts/memgraph-high-availability/templates/services-coordinators.yaml create mode 100644 helm-charts/memgraph-high-availability/templates/services-data.yaml create mode 100644 helm-charts/memgraph-high-availability/templates/tests/test-cluster-setup.yaml create mode 100644 helm-charts/memgraph-high-availability/templates/tests/test-connection.yaml create mode 100644 helm-charts/memgraph-high-availability/values.yaml create mode 100644 watches.yaml diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..62fd3e3 --- /dev/null +++ b/.gitignore @@ -0,0 +1,14 @@ + +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib +bin + +# editor and IDE paraphernalia +.idea +*.swp +*.swo +*~ diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..509b516 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,7 @@ +# Build the manager binary +FROM quay.io/operator-framework/helm-operator:v1.35.0 + +ENV HOME=/opt/helm +COPY watches.yaml ${HOME}/watches.yaml +COPY helm-charts ${HOME}/helm-charts +WORKDIR ${HOME} diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..dc86b78 --- /dev/null +++ b/Makefile @@ -0,0 +1,227 @@ +# VERSION defines the project version for the bundle. +# Update this value when you upgrade the version of your project. +# To re-generate a bundle for another specific version without changing the standard setup, you can: +# - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2) +# - use environment variables to overwrite this value (e.g export VERSION=0.0.2) +VERSION ?= 0.0.1 + +# CHANNELS define the bundle channels used in the bundle. +# Add a new line here if you would like to change its default config. (E.g CHANNELS = "candidate,fast,stable") +# To re-generate a bundle for other specific channels without changing the standard setup, you can: +# - use the CHANNELS as arg of the bundle target (e.g make bundle CHANNELS=candidate,fast,stable) +# - use environment variables to overwrite this value (e.g export CHANNELS="candidate,fast,stable") +ifneq ($(origin CHANNELS), undefined) +BUNDLE_CHANNELS := --channels=$(CHANNELS) +endif + +# DEFAULT_CHANNEL defines the default channel used in the bundle. +# Add a new line here if you would like to change its default config. (E.g DEFAULT_CHANNEL = "stable") +# To re-generate a bundle for any other default channel without changing the default setup, you can: +# - use the DEFAULT_CHANNEL as arg of the bundle target (e.g make bundle DEFAULT_CHANNEL=stable) +# - use environment variables to overwrite this value (e.g export DEFAULT_CHANNEL="stable") +ifneq ($(origin DEFAULT_CHANNEL), undefined) +BUNDLE_DEFAULT_CHANNEL := --default-channel=$(DEFAULT_CHANNEL) +endif +BUNDLE_METADATA_OPTS ?= $(BUNDLE_CHANNELS) $(BUNDLE_DEFAULT_CHANNEL) + +# IMAGE_TAG_BASE defines the docker.io namespace and part of the image name for remote images. +# This variable is used to construct full image tags for bundle and catalog images. +# +# For example, running 'make bundle-build bundle-push catalog-build catalog-push' will build and push both +# my.domain/kubernetes-operator-bundle:$VERSION and my.domain/kubernetes-operator-catalog:$VERSION. +IMAGE_TAG_BASE ?= my.domain/kubernetes-operator + +# BUNDLE_IMG defines the image:tag used for the bundle. +# You can use it as an arg. (E.g make bundle-build BUNDLE_IMG=/:) +BUNDLE_IMG ?= $(IMAGE_TAG_BASE)-bundle:v$(VERSION) + +# BUNDLE_GEN_FLAGS are the flags passed to the operator-sdk generate bundle command +BUNDLE_GEN_FLAGS ?= -q --overwrite --version $(VERSION) $(BUNDLE_METADATA_OPTS) + +# USE_IMAGE_DIGESTS defines if images are resolved via tags or digests +# You can enable this value if you would like to use SHA Based Digests +# To enable set flag to true +USE_IMAGE_DIGESTS ?= false +ifeq ($(USE_IMAGE_DIGESTS), true) + BUNDLE_GEN_FLAGS += --use-image-digests +endif + +# Set the Operator SDK version to use. By default, what is installed on the system is used. +# This is useful for CI or a project to utilize a specific version of the operator-sdk toolkit. +OPERATOR_SDK_VERSION ?= v1.35.0 + +# Image URL to use all building/pushing image targets +IMG ?= controller:latest + +.PHONY: all +all: docker-build + +##@ General + +# The help target prints out all targets with their descriptions organized +# beneath their categories. The categories are represented by '##@' and the +# target descriptions by '##'. The awk commands is responsible for reading the +# entire set of makefiles included in this invocation, looking for lines of the +# file as xyz: ## something, and then pretty-format the target and help. Then, +# if there's a line with ##@ something, that gets pretty-printed as a category. +# More info on the usage of ANSI control characters for terminal formatting: +# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters +# More info on the awk command: +# http://linuxcommand.org/lc3_adv_awk.php + +.PHONY: help +help: ## Display this help. + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) + +##@ Build + +.PHONY: run +run: helm-operator ## Run against the configured Kubernetes cluster in ~/.kube/config + $(HELM_OPERATOR) run + +.PHONY: docker-build +docker-build: ## Build docker image with the manager. + docker build -t ${IMG} . + +.PHONY: docker-push +docker-push: ## Push docker image with the manager. + docker push ${IMG} + +# PLATFORMS defines the target platforms for the manager image be build to provide support to multiple +# architectures. (i.e. make docker-buildx IMG=myregistry/mypoperator:0.0.1). To use this option you need to: +# - able to use docker buildx . More info: https://docs.docker.com/build/buildx/ +# - have enable BuildKit, More info: https://docs.docker.com/develop/develop-images/build_enhancements/ +# - be able to push the image for your registry (i.e. if you do not inform a valid value via IMG=> than the export will fail) +# To properly provided solutions that supports more than one platform you should use this option. +PLATFORMS ?= linux/arm64,linux/amd64,linux/s390x,linux/ppc64le +.PHONY: docker-buildx +docker-buildx: ## Build and push docker image for the manager for cross-platform support + - docker buildx create --name project-v3-builder + docker buildx use project-v3-builder + - docker buildx build --push --platform=$(PLATFORMS) --tag ${IMG} -f Dockerfile . + - docker buildx rm project-v3-builder + +##@ Deployment + +.PHONY: install +install: kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config. + $(KUSTOMIZE) build config/crd | kubectl apply -f - + +.PHONY: uninstall +uninstall: kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. + $(KUSTOMIZE) build config/crd | kubectl delete -f - + +.PHONY: deploy +deploy: kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config. + cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} + $(KUSTOMIZE) build config/default | kubectl apply -f - + +.PHONY: undeploy +undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. + $(KUSTOMIZE) build config/default | kubectl delete -f - + +OS := $(shell uname -s | tr '[:upper:]' '[:lower:]') +ARCH := $(shell uname -m | sed 's/x86_64/amd64/' | sed 's/aarch64/arm64/') + +.PHONY: kustomize +KUSTOMIZE = $(shell pwd)/bin/kustomize +kustomize: ## Download kustomize locally if necessary. +ifeq (,$(wildcard $(KUSTOMIZE))) +ifeq (,$(shell which kustomize 2>/dev/null)) + @{ \ + set -e ;\ + mkdir -p $(dir $(KUSTOMIZE)) ;\ + curl -sSLo - https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize/v5.2.1/kustomize_v5.2.1_$(OS)_$(ARCH).tar.gz | \ + tar xzf - -C bin/ ;\ + } +else +KUSTOMIZE = $(shell which kustomize) +endif +endif + +.PHONY: helm-operator +HELM_OPERATOR = $(shell pwd)/bin/helm-operator +helm-operator: ## Download helm-operator locally if necessary, preferring the $(pwd)/bin path over global if both exist. +ifeq (,$(wildcard $(HELM_OPERATOR))) +ifeq (,$(shell which helm-operator 2>/dev/null)) + @{ \ + set -e ;\ + mkdir -p $(dir $(HELM_OPERATOR)) ;\ + curl -sSLo $(HELM_OPERATOR) https://github.com/operator-framework/operator-sdk/releases/download/v1.35.0/helm-operator_$(OS)_$(ARCH) ;\ + chmod +x $(HELM_OPERATOR) ;\ + } +else +HELM_OPERATOR = $(shell which helm-operator) +endif +endif + +.PHONY: operator-sdk +OPERATOR_SDK ?= $(LOCALBIN)/operator-sdk +operator-sdk: ## Download operator-sdk locally if necessary. +ifeq (,$(wildcard $(OPERATOR_SDK))) +ifeq (, $(shell which operator-sdk 2>/dev/null)) + @{ \ + set -e ;\ + mkdir -p $(dir $(OPERATOR_SDK)) ;\ + curl -sSLo $(OPERATOR_SDK) https://github.com/operator-framework/operator-sdk/releases/download/$(OPERATOR_SDK_VERSION)/operator-sdk_$(OS)_$(ARCH) ;\ + chmod +x $(OPERATOR_SDK) ;\ + } +else +OPERATOR_SDK = $(shell which operator-sdk) +endif +endif + +.PHONY: bundle +bundle: kustomize operator-sdk ## Generate bundle manifests and metadata, then validate generated files. + $(OPERATOR_SDK) generate kustomize manifests -q + cd config/manager && $(KUSTOMIZE) edit set image controller=$(IMG) + $(KUSTOMIZE) build config/manifests | $(OPERATOR_SDK) generate bundle $(BUNDLE_GEN_FLAGS) + $(OPERATOR_SDK) bundle validate ./bundle + +.PHONY: bundle-build +bundle-build: ## Build the bundle image. + docker build -f bundle.Dockerfile -t $(BUNDLE_IMG) . + +.PHONY: bundle-push +bundle-push: ## Push the bundle image. + $(MAKE) docker-push IMG=$(BUNDLE_IMG) + +.PHONY: opm +OPM = $(LOCALBIN)/opm +opm: ## Download opm locally if necessary. +ifeq (,$(wildcard $(OPM))) +ifeq (,$(shell which opm 2>/dev/null)) + @{ \ + set -e ;\ + mkdir -p $(dir $(OPM)) ;\ + curl -sSLo $(OPM) https://github.com/operator-framework/operator-registry/releases/download/v1.23.0/$(OS)-$(ARCH)-opm ;\ + chmod +x $(OPM) ;\ + } +else +OPM = $(shell which opm) +endif +endif + +# A comma-separated list of bundle images (e.g. make catalog-build BUNDLE_IMGS=example.com/operator-bundle:v0.1.0,example.com/operator-bundle:v0.2.0). +# These images MUST exist in a registry and be pull-able. +BUNDLE_IMGS ?= $(BUNDLE_IMG) + +# The image tag given to the resulting catalog image (e.g. make catalog-build CATALOG_IMG=example.com/operator-catalog:v0.2.0). +CATALOG_IMG ?= $(IMAGE_TAG_BASE)-catalog:v$(VERSION) + +# Set CATALOG_BASE_IMG to an existing catalog image tag to add $BUNDLE_IMGS to that image. +ifneq ($(origin CATALOG_BASE_IMG), undefined) +FROM_INDEX_OPT := --from-index $(CATALOG_BASE_IMG) +endif + +# Build a catalog image by adding bundle images to an empty catalog using the operator package manager tool, 'opm'. +# This recipe invokes 'opm' in 'semver' bundle add mode. For more information on add modes, see: +# https://github.com/operator-framework/community-operators/blob/7f1438c/docs/packaging-operator.md#updating-your-existing-operator +.PHONY: catalog-build +catalog-build: opm ## Build a catalog image. + $(OPM) index add --container-tool docker --mode semver --tag $(CATALOG_IMG) --bundles $(BUNDLE_IMGS) $(FROM_INDEX_OPT) + +# Push the catalog image. +.PHONY: catalog-push +catalog-push: ## Push a catalog image. + $(MAKE) docker-push IMG=$(CATALOG_IMG) diff --git a/PROJECT b/PROJECT new file mode 100644 index 0000000..3c5cdeb --- /dev/null +++ b/PROJECT @@ -0,0 +1,20 @@ +# Code generated by tool. DO NOT EDIT. +# This file is used to track the info used to scaffold your project +# and allow the plugins properly work. +# More info: https://book.kubebuilder.io/reference/project-config.html +domain: my.domain +layout: +- helm.sdk.operatorframework.io/v1 +plugins: + manifests.sdk.operatorframework.io/v2: {} + scorecard.sdk.operatorframework.io/v2: {} +projectName: kubernetes-operator +resources: +- api: + crdVersion: v1 + namespaced: true + domain: my.domain + group: charts + kind: MemgraphHighAvailability + version: v1alpha1 +version: "3" diff --git a/config/crd/bases/charts.my.domain_memgraphhighavailabilities.yaml b/config/crd/bases/charts.my.domain_memgraphhighavailabilities.yaml new file mode 100644 index 0000000..3973c7d --- /dev/null +++ b/config/crd/bases/charts.my.domain_memgraphhighavailabilities.yaml @@ -0,0 +1,44 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: memgraphhighavailabilities.charts.my.domain +spec: + group: charts.my.domain + names: + kind: MemgraphHighAvailability + listKind: MemgraphHighAvailabilityList + plural: memgraphhighavailabilities + singular: memgraphhighavailability + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: MemgraphHighAvailability is the Schema for the memgraphhighavailabilities API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec defines the desired state of MemgraphHighAvailability + type: object + x-kubernetes-preserve-unknown-fields: true + status: + description: Status defines the observed state of MemgraphHighAvailability + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml new file mode 100644 index 0000000..bb22f5e --- /dev/null +++ b/config/crd/kustomization.yaml @@ -0,0 +1,6 @@ +# This kustomization.yaml is not intended to be run by itself, +# since it depends on service name and namespace that are out of this kustomize package. +# It should be run by config/default +resources: +- bases/charts.my.domain_memgraphhighavailabilities.yaml +#+kubebuilder:scaffold:crdkustomizeresource diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml new file mode 100644 index 0000000..e96fd6a --- /dev/null +++ b/config/default/kustomization.yaml @@ -0,0 +1,28 @@ +# Adds namespace to all resources. +namespace: kubernetes-operator-system + +# Value of this field is prepended to the +# names of all resources, e.g. a deployment named +# "wordpress" becomes "alices-wordpress". +# Note that it should also match with the prefix (text before '-') of the namespace +# field above. +namePrefix: kubernetes-operator- + +# Labels to add to all resources and selectors. +#labels: +#- includeSelectors: true +# pairs: +# someName: someValue + +resources: +- ../crd +- ../rbac +- ../manager +# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. +#- ../prometheus + +patches: +# Protect the /metrics endpoint by putting it behind auth. +# If you want your controller-manager to expose the /metrics +# endpoint w/o any authn/z, please comment the following line. +- path: manager_auth_proxy_patch.yaml diff --git a/config/default/manager_auth_proxy_patch.yaml b/config/default/manager_auth_proxy_patch.yaml new file mode 100644 index 0000000..e252f1b --- /dev/null +++ b/config/default/manager_auth_proxy_patch.yaml @@ -0,0 +1,40 @@ +# This patch inject a sidecar container which is a HTTP proxy for the +# controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: kube-rbac-proxy + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "ALL" + image: gcr.io/kubebuilder/kube-rbac-proxy:v0.15.0 + args: + - "--secure-listen-address=0.0.0.0:8443" + - "--upstream=http://127.0.0.1:8080/" + - "--logtostderr=true" + - "--v=0" + ports: + - containerPort: 8443 + protocol: TCP + name: https + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 5m + memory: 64Mi + - name: manager + args: + - "--health-probe-bind-address=:8081" + - "--metrics-bind-address=127.0.0.1:8080" + - "--leader-elect" + - "--leader-election-id=kubernetes-operator" diff --git a/config/default/manager_config_patch.yaml b/config/default/manager_config_patch.yaml new file mode 100644 index 0000000..f6f5891 --- /dev/null +++ b/config/default/manager_config_patch.yaml @@ -0,0 +1,10 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: manager diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml new file mode 100644 index 0000000..5c5f0b8 --- /dev/null +++ b/config/manager/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- manager.yaml diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml new file mode 100644 index 0000000..038b383 --- /dev/null +++ b/config/manager/manager.yaml @@ -0,0 +1,101 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: namespace + app.kubernetes.io/instance: system + app.kubernetes.io/component: manager + app.kubernetes.io/created-by: kubernetes-operator + app.kubernetes.io/part-of: kubernetes-operator + app.kubernetes.io/managed-by: kustomize + name: system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system + labels: + control-plane: controller-manager + app.kubernetes.io/name: deployment + app.kubernetes.io/instance: controller-manager + app.kubernetes.io/component: manager + app.kubernetes.io/created-by: kubernetes-operator + app.kubernetes.io/part-of: kubernetes-operator + app.kubernetes.io/managed-by: kustomize +spec: + selector: + matchLabels: + control-plane: controller-manager + replicas: 1 + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: manager + labels: + control-plane: controller-manager + spec: + # TODO(user): Uncomment the following code to configure the nodeAffinity expression + # according to the platforms which are supported by your solution. + # It is considered best practice to support multiple architectures. You can + # build your manager image using the makefile target docker-buildx. + # affinity: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/arch + # operator: In + # values: + # - amd64 + # - arm64 + # - ppc64le + # - s390x + # - key: kubernetes.io/os + # operator: In + # values: + # - linux + securityContext: + runAsNonRoot: true + # TODO(user): For common cases that do not require escalating privileges + # it is recommended to ensure that all your Pods/Containers are restrictive. + # More info: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted + # Please uncomment the following code if your project does NOT have to work on old Kubernetes + # versions < 1.19 or on vendors versions which do NOT support this field by default (i.e. Openshift < 4.11 ). + # seccompProfile: + # type: RuntimeDefault + containers: + - args: + - --leader-elect + - --leader-election-id=kubernetes-operator + image: controller:latest + name: manager + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "ALL" + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + # TODO(user): Configure the resources accordingly based on the project requirements. + # More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 10m + memory: 64Mi + serviceAccountName: controller-manager + terminationGracePeriodSeconds: 10 diff --git a/config/manifests/kustomization.yaml b/config/manifests/kustomization.yaml new file mode 100644 index 0000000..e8b968a --- /dev/null +++ b/config/manifests/kustomization.yaml @@ -0,0 +1,7 @@ +# These resources constitute the fully configured set of manifests +# used to generate the 'manifests/' directory in a bundle. +resources: +- bases/kubernetes-operator.clusterserviceversion.yaml +- ../default +- ../samples +- ../scorecard diff --git a/config/prometheus/kustomization.yaml b/config/prometheus/kustomization.yaml new file mode 100644 index 0000000..ed13716 --- /dev/null +++ b/config/prometheus/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- monitor.yaml diff --git a/config/prometheus/monitor.yaml b/config/prometheus/monitor.yaml new file mode 100644 index 0000000..7d5f441 --- /dev/null +++ b/config/prometheus/monitor.yaml @@ -0,0 +1,25 @@ +# Prometheus Monitor Service (Metrics) +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: servicemonitor + app.kubernetes.io/instance: controller-manager-metrics-monitor + app.kubernetes.io/component: metrics + app.kubernetes.io/created-by: kubernetes-operator + app.kubernetes.io/part-of: kubernetes-operator + app.kubernetes.io/managed-by: kustomize + name: controller-manager-metrics-monitor + namespace: system +spec: + endpoints: + - path: /metrics + port: https + scheme: https + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + tlsConfig: + insecureSkipVerify: true + selector: + matchLabels: + control-plane: controller-manager diff --git a/config/rbac/auth_proxy_client_clusterrole.yaml b/config/rbac/auth_proxy_client_clusterrole.yaml new file mode 100644 index 0000000..6bfe0a9 --- /dev/null +++ b/config/rbac/auth_proxy_client_clusterrole.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: metrics-reader + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: kubernetes-operator + app.kubernetes.io/part-of: kubernetes-operator + app.kubernetes.io/managed-by: kustomize + name: metrics-reader +rules: +- nonResourceURLs: + - "/metrics" + verbs: + - get diff --git a/config/rbac/auth_proxy_role.yaml b/config/rbac/auth_proxy_role.yaml new file mode 100644 index 0000000..2ff84ee --- /dev/null +++ b/config/rbac/auth_proxy_role.yaml @@ -0,0 +1,24 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: proxy-role + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: kubernetes-operator + app.kubernetes.io/part-of: kubernetes-operator + app.kubernetes.io/managed-by: kustomize + name: proxy-role +rules: +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create diff --git a/config/rbac/auth_proxy_role_binding.yaml b/config/rbac/auth_proxy_role_binding.yaml new file mode 100644 index 0000000..b7f3ab0 --- /dev/null +++ b/config/rbac/auth_proxy_role_binding.yaml @@ -0,0 +1,19 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/name: clusterrolebinding + app.kubernetes.io/instance: proxy-rolebinding + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: kubernetes-operator + app.kubernetes.io/part-of: kubernetes-operator + app.kubernetes.io/managed-by: kustomize + name: proxy-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: proxy-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/config/rbac/auth_proxy_service.yaml b/config/rbac/auth_proxy_service.yaml new file mode 100644 index 0000000..cdd7723 --- /dev/null +++ b/config/rbac/auth_proxy_service.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: service + app.kubernetes.io/instance: controller-manager-metrics-service + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: kubernetes-operator + app.kubernetes.io/part-of: kubernetes-operator + app.kubernetes.io/managed-by: kustomize + name: controller-manager-metrics-service + namespace: system +spec: + ports: + - name: https + port: 8443 + protocol: TCP + targetPort: https + selector: + control-plane: controller-manager diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml new file mode 100644 index 0000000..731832a --- /dev/null +++ b/config/rbac/kustomization.yaml @@ -0,0 +1,18 @@ +resources: +# All RBAC will be applied under this service account in +# the deployment namespace. You may comment out this resource +# if your manager will use a service account that exists at +# runtime. Be sure to update RoleBinding and ClusterRoleBinding +# subjects if changing service account names. +- service_account.yaml +- role.yaml +- role_binding.yaml +- leader_election_role.yaml +- leader_election_role_binding.yaml +# Comment the following 4 lines if you want to disable +# the auth proxy (https://github.com/brancz/kube-rbac-proxy) +# which protects your /metrics endpoint. +- auth_proxy_service.yaml +- auth_proxy_role.yaml +- auth_proxy_role_binding.yaml +- auth_proxy_client_clusterrole.yaml diff --git a/config/rbac/leader_election_role.yaml b/config/rbac/leader_election_role.yaml new file mode 100644 index 0000000..ddf76ba --- /dev/null +++ b/config/rbac/leader_election_role.yaml @@ -0,0 +1,44 @@ +# permissions to do leader election. +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/name: role + app.kubernetes.io/instance: leader-election-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: kubernetes-operator + app.kubernetes.io/part-of: kubernetes-operator + app.kubernetes.io/managed-by: kustomize + name: leader-election-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch diff --git a/config/rbac/leader_election_role_binding.yaml b/config/rbac/leader_election_role_binding.yaml new file mode 100644 index 0000000..55647f2 --- /dev/null +++ b/config/rbac/leader_election_role_binding.yaml @@ -0,0 +1,19 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/name: rolebinding + app.kubernetes.io/instance: leader-election-rolebinding + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: kubernetes-operator + app.kubernetes.io/part-of: kubernetes-operator + app.kubernetes.io/managed-by: kustomize + name: leader-election-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: leader-election-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/config/rbac/memgraphhighavailability_editor_role.yaml b/config/rbac/memgraphhighavailability_editor_role.yaml new file mode 100644 index 0000000..06dd3a6 --- /dev/null +++ b/config/rbac/memgraphhighavailability_editor_role.yaml @@ -0,0 +1,31 @@ +# permissions for end users to edit memgraphhighavailabilities. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: memgraphhighavailability-editor-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: kubernetes-operator + app.kubernetes.io/part-of: kubernetes-operator + app.kubernetes.io/managed-by: kustomize + name: memgraphhighavailability-editor-role +rules: +- apiGroups: + - charts.my.domain + resources: + - memgraphhighavailabilities + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - charts.my.domain + resources: + - memgraphhighavailabilities/status + verbs: + - get diff --git a/config/rbac/memgraphhighavailability_viewer_role.yaml b/config/rbac/memgraphhighavailability_viewer_role.yaml new file mode 100644 index 0000000..e8f8a07 --- /dev/null +++ b/config/rbac/memgraphhighavailability_viewer_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to view memgraphhighavailabilities. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: memgraphhighavailability-viewer-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: kubernetes-operator + app.kubernetes.io/part-of: kubernetes-operator + app.kubernetes.io/managed-by: kustomize + name: memgraphhighavailability-viewer-role +rules: +- apiGroups: + - charts.my.domain + resources: + - memgraphhighavailabilities + verbs: + - get + - list + - watch +- apiGroups: + - charts.my.domain + resources: + - memgraphhighavailabilities/status + verbs: + - get diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml new file mode 100644 index 0000000..f94366c --- /dev/null +++ b/config/rbac/role.yaml @@ -0,0 +1,67 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: manager-role +rules: +## +## Base operator rules +## +# We need to get namespaces so the operator can read namespaces to ensure they exist +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get +# We need to manage Helm release secrets +- apiGroups: + - "" + resources: + - secrets + verbs: + - "*" +# We need to create events on CRs about things happening during reconciliation +- apiGroups: + - "" + resources: + - events + verbs: + - create + +## +## Rules for charts.my.domain/v1alpha1, Kind: MemgraphHighAvailability +## +- apiGroups: + - charts.my.domain + resources: + - memgraphhighavailabilities + - memgraphhighavailabilities/status + - memgraphhighavailabilities/finalizers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- verbs: + - "*" + apiGroups: + - "batch" + resources: + - "jobs" +- verbs: + - "*" + apiGroups: + - "" + resources: + - "services" +- verbs: + - "*" + apiGroups: + - "apps" + resources: + - "statefulsets" + +#+kubebuilder:scaffold:rules diff --git a/config/rbac/role_binding.yaml b/config/rbac/role_binding.yaml new file mode 100644 index 0000000..9eecc5e --- /dev/null +++ b/config/rbac/role_binding.yaml @@ -0,0 +1,19 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/name: clusterrolebinding + app.kubernetes.io/instance: manager-rolebinding + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: kubernetes-operator + app.kubernetes.io/part-of: kubernetes-operator + app.kubernetes.io/managed-by: kustomize + name: manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: manager-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/config/rbac/service_account.yaml b/config/rbac/service_account.yaml new file mode 100644 index 0000000..06eefef --- /dev/null +++ b/config/rbac/service_account.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/name: serviceaccount + app.kubernetes.io/instance: controller-manager-sa + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: kubernetes-operator + app.kubernetes.io/part-of: kubernetes-operator + app.kubernetes.io/managed-by: kustomize + name: controller-manager + namespace: system diff --git a/config/samples/charts_v1alpha1_memgraphhighavailability.yaml b/config/samples/charts_v1alpha1_memgraphhighavailability.yaml new file mode 100644 index 0000000..430f390 --- /dev/null +++ b/config/samples/charts_v1alpha1_memgraphhighavailability.yaml @@ -0,0 +1,99 @@ +apiVersion: charts.my.domain/v1alpha1 +kind: MemgraphHighAvailability +metadata: + name: memgraphhighavailability-sample +spec: + # Default values copied from /helm-charts/memgraph-high-availability/values.yaml + coordinators: + - args: + - --experimental-enabled=high-availability + - --coordinator-id=1 + - --coordinator-port=12000 + - --bolt-port=7687 + - --also-log-to-stderr + - --log-level=TRACE + - --coordinator-hostname=memgraph-coordinator-1.default.svc.cluster.local + boltPort: 7687 + coordinatorPort: 12000 + id: "1" + - args: + - --experimental-enabled=high-availability + - --coordinator-id=2 + - --coordinator-port=12000 + - --bolt-port=7687 + - --also-log-to-stderr + - --log-level=TRACE + - --coordinator-hostname=memgraph-coordinator-2.default.svc.cluster.local + boltPort: 7687 + coordinatorPort: 12000 + id: "2" + - args: + - --experimental-enabled=high-availability + - --coordinator-id=3 + - --coordinator-port=12000 + - --bolt-port=7687 + - --also-log-to-stderr + - --log-level=TRACE + - --coordinator-hostname=memgraph-coordinator-3.default.svc.cluster.local + boltPort: 7687 + coordinatorPort: 12000 + id: "3" + data: + - args: + - --experimental-enabled=high-availability + - --management-port=10000 + - --bolt-port=7687 + - --also-log-to-stderr + - --log-level=TRACE + - --replication-restore-state-on-startup=true + boltPort: 7687 + id: "0" + managementPort: 10000 + replicationPort: 20000 + - args: + - --experimental-enabled=high-availability + - --management-port=10000 + - --bolt-port=7687 + - --also-log-to-stderr + - --log-level=TRACE + - --replication-restore-state-on-startup=true + boltPort: 7687 + id: "1" + managementPort: 10000 + replicationPort: 20000 + memgraph: + coordinators: + volumeClaim: + logPVC: false + logPVCClassName: "" + logPVCSize: 256Mi + storagePVC: true + storagePVCClassName: "" + storagePVCSize: 1Gi + data: + volumeClaim: + logPVC: false + logPVCClassName: "" + logPVCSize: 256Mi + storagePVC: true + storagePVCClassName: "" + storagePVCSize: 1Gi + env: + MEMGRAPH_ENTERPRISE_LICENSE: + MEMGRAPH_ORGANIZATION_NAME: + image: + pullPolicy: IfNotPresent + repository: memgraph/memgraph + tag: 2.18.0 + probes: + liveness: + initialDelaySeconds: 30 + periodSeconds: 10 + readiness: + initialDelaySeconds: 5 + periodSeconds: 5 + startup: + failureThreshold: 30 + periodSeconds: 10 + + diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml new file mode 100644 index 0000000..53a74af --- /dev/null +++ b/config/samples/kustomization.yaml @@ -0,0 +1,4 @@ +## Append samples of your project ## +resources: +- charts_v1alpha1_memgraphhighavailability.yaml +#+kubebuilder:scaffold:manifestskustomizesamples diff --git a/config/scorecard/bases/config.yaml b/config/scorecard/bases/config.yaml new file mode 100644 index 0000000..c770478 --- /dev/null +++ b/config/scorecard/bases/config.yaml @@ -0,0 +1,7 @@ +apiVersion: scorecard.operatorframework.io/v1alpha3 +kind: Configuration +metadata: + name: config +stages: +- parallel: true + tests: [] diff --git a/config/scorecard/kustomization.yaml b/config/scorecard/kustomization.yaml new file mode 100644 index 0000000..50cd2d0 --- /dev/null +++ b/config/scorecard/kustomization.yaml @@ -0,0 +1,16 @@ +resources: +- bases/config.yaml +patchesJson6902: +- path: patches/basic.config.yaml + target: + group: scorecard.operatorframework.io + version: v1alpha3 + kind: Configuration + name: config +- path: patches/olm.config.yaml + target: + group: scorecard.operatorframework.io + version: v1alpha3 + kind: Configuration + name: config +#+kubebuilder:scaffold:patchesJson6902 diff --git a/config/scorecard/patches/basic.config.yaml b/config/scorecard/patches/basic.config.yaml new file mode 100644 index 0000000..893ebd2 --- /dev/null +++ b/config/scorecard/patches/basic.config.yaml @@ -0,0 +1,10 @@ +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - basic-check-spec + image: quay.io/operator-framework/scorecard-test:v1.35.0 + labels: + suite: basic + test: basic-check-spec-test diff --git a/config/scorecard/patches/olm.config.yaml b/config/scorecard/patches/olm.config.yaml new file mode 100644 index 0000000..6cf777b --- /dev/null +++ b/config/scorecard/patches/olm.config.yaml @@ -0,0 +1,50 @@ +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-bundle-validation + image: quay.io/operator-framework/scorecard-test:v1.35.0 + labels: + suite: olm + test: olm-bundle-validation-test +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-crds-have-validation + image: quay.io/operator-framework/scorecard-test:v1.35.0 + labels: + suite: olm + test: olm-crds-have-validation-test +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-crds-have-resources + image: quay.io/operator-framework/scorecard-test:v1.35.0 + labels: + suite: olm + test: olm-crds-have-resources-test +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-spec-descriptors + image: quay.io/operator-framework/scorecard-test:v1.35.0 + labels: + suite: olm + test: olm-spec-descriptors-test +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-status-descriptors + image: quay.io/operator-framework/scorecard-test:v1.35.0 + labels: + suite: olm + test: olm-status-descriptors-test diff --git a/helm-charts/memgraph-high-availability/.helmignore b/helm-charts/memgraph-high-availability/.helmignore new file mode 100644 index 0000000..0e8a0eb --- /dev/null +++ b/helm-charts/memgraph-high-availability/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/helm-charts/memgraph-high-availability/Chart.yaml b/helm-charts/memgraph-high-availability/Chart.yaml new file mode 100644 index 0000000..a15de30 --- /dev/null +++ b/helm-charts/memgraph-high-availability/Chart.yaml @@ -0,0 +1,21 @@ +apiVersion: v2 +appVersion: 2.18.0 +description: A Helm chart for Kubernetes with Memgraph High availabiliy capabilites +home: https://memgraph.com/ +icon: https://public-assets.memgraph.com/memgraph-logo/logo-large.png +keywords: +- memgraph +- graph +- database +- cypher +- analytics +- high-availability +maintainers: +- email: tech@memgraph.com + name: Memgraph +name: memgraph-high-availability +sources: +- https://github.com/memgraph/memgraph +- https://github.com/memgraph/helm-charts +type: application +version: 0.1.1 diff --git a/helm-charts/memgraph-high-availability/README.md b/helm-charts/memgraph-high-availability/README.md new file mode 100644 index 0000000..7c07444 --- /dev/null +++ b/helm-charts/memgraph-high-availability/README.md @@ -0,0 +1,72 @@ +## Helm chart for Memgraph high availability cluster +A Helm Chart for deploying Memgraph in [high availability setup](https://memgraph.com/docs/clustering/high-availability). + +Memgraph HA cluster includes 3 coordinators, 2 data instances by default. The cluster setup is performed via the cluster-setup job. The HA cluster is still work in progress and started with "--experimental-enabled=high-availability". +The cluster is started in the configuration without the node selector, which means that in the current configuration, it is not highly available if the node fails. + +## Installing the Memgraph HA Helm Chart +To install the Memgraph HA Helm Chart, follow the steps below: +``` +helm install memgraph/memgraph-high-availability --set memgraph.env.MEMGRAPH_ENTERPRISE_LICENSE=,memgraph.env.MEMGRAPH_ORGANIZATION_NAME= +``` +Replace `` with a name of your choice for the release and set the enterprise license. + +## Changing the default chart values +To change the default chart values, run the command with the specified set of flags: +``` +helm install memgraph/memgraph-high-availability --set =,=,... +``` +Or you can modify a `values.yaml` file and override the desired values: +``` +helm install memgraph/memgraph-high-availability-f values.yaml +``` + + +## Configuration Options + +The following table lists the configurable parameters of the Memgraph chart and their default values. + +| Parameter | Description | Default | +|---------------------------------------------|-----------------------------------------------------------------------------------------------------|-----------------------------------------| +| `memgraph.image.repository` | Memgraph Docker image repository | `memgraph/memgraph` | +| `memgraph.image.tag` | Specific tag for the Memgraph Docker image. Overrides the image tag whose default is chart version. | `2.17.0` | +| `memgraph.image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `memgraph.env.MEMGRAPH_ENTERPRISE_LICENSE` | Memgraph enterprise license | `` | +| `memgraph.env.MEMGRAPH_ORGANIZATION_NAME` | Organization name | `` | +| `memgraph.probes.startup.failureThreshold` | Startup probe failure threshold | `30` | +| `memgraph.probes.startup.periodSeconds` | Startup probe period in seconds | `10` | +| `memgraph.probes.readiness.initialDelaySeconds` | Readiness probe initial delay in seconds | `5` | +| `memgraph.probes.readiness.periodSeconds` | Readiness probe period in seconds | `5` | +| `memgraph.probes.liveness.initialDelaySeconds` | Liveness probe initial delay in seconds | `30` | +| `memgraph.probes.liveness.periodSeconds` | Liveness probe period in seconds | `10` | +| `memgraph.data.volumeClaim.storagePVC` | Enable storage PVC | `true` | +| `memgraph.data.volumeClaim.storagePVCSize` | Size of the storage PVC | `1Gi` | +| `memgraph.data.volumeClaim.logPVC` | Enable log PVC | `false` | +| `memgraph.data.volumeClaim.logPVCSize` | Size of the log PVC | `256Mi` | +| `memgraph.coordinators.volumeClaim.storagePVC` | Enable storage PVC for coordinators | `true` | +| `memgraph.coordinators.volumeClaim.storagePVCSize` | Size of the storage PVC for coordinators | `1Gi` | +| `memgraph.coordinators.volumeClaim.logPVC` | Enable log PVC for coordinators | `false` | +| `memgraph.coordinators.volumeClaim.logPVCSize` | Size of the log PVC for coordinators | `256Mi` | +| `data` | Configuration for data instances | See `data` section | +| `coordinators` | Configuration for coordinator instances | See `coordinators` section | + +For the `data` and `coordinators` sections, each item in the list has the following parameters: + +| Parameter | Description | Default | +|---------------------------------------------|-----------------------------------------------------------------------------------------------------|-----------------------------------------| +| `id` | ID of the instance | `0` for data, `1` for coordinators | +| `boltPort` | Bolt port of the instance | `7687` | +| `managementPort` (data only) | Management port of the data instance | `10000` | +| `replicationPort` (data only) | Replication port of the data instance | `20000` | +| `coordinatorPort` (coordinators only) | Coordinator port of the coordinator instance | `12000` | +| `args` | List of arguments for the instance | See `args` section | + +The `args` section contains a list of arguments for the instance. The default values are the same for all instances: + +```markdown +- "--also-log-to-stderr" +- "--log-level=TRACE" +- "--replication-restore-state-on-startup=true" +``` + +For all available database settings, refer to the [Configuration settings reference guide](https://memgraph.com/docs/memgraph/reference-guide/configuration). diff --git a/helm-charts/memgraph-high-availability/templates/NOTES.txt b/helm-charts/memgraph-high-availability/templates/NOTES.txt new file mode 100644 index 0000000..5246881 --- /dev/null +++ b/helm-charts/memgraph-high-availability/templates/NOTES.txt @@ -0,0 +1,17 @@ +Thank you for installing the Memgraph High-availability cluster! 🎉 + +Memgraph HA cluster includes 3 coordinators, 2 data instances by default. The cluster setup is performed via the cluster-setup job. The HA cluster is still work in progress and started with "--experimental-enabled=high-availability". +The cluster is started in the configuration without the node selector, which means that in the current configuration, it is not highly available if the node fails. + +The cluster setup requires the proper enterprise license to work since HA is an enterprise feature. + +You can connect to Memgraph instances via Lab, mgconsole, or any other client. By default, all Memgraph instances (coordinators and data instances) listen on port 7687 for a bolt connection. +Make sure your are connecting to the correct ip address and port. For details check the configuration on your cloud provider(aws, gcp, azure, etc.) + +If you are connecting via mgconsole, you can use the following command: + +mgconsole --host --port + +If you are connecting via Lab, specify your instance IP address and port in Memgraph Lab GUI. + +If you are using minikube, you can find out your instance ip using `minikube ip`. diff --git a/helm-charts/memgraph-high-availability/templates/_helpers.tpl b/helm-charts/memgraph-high-availability/templates/_helpers.tpl new file mode 100644 index 0000000..c43a5d1 --- /dev/null +++ b/helm-charts/memgraph-high-availability/templates/_helpers.tpl @@ -0,0 +1,44 @@ + +{{/* Full name of the application */}} +{{- define "memgraph.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end -}} +{{- end -}} + + + +{{/* Define the chart version and app version */}} +{{- define "memgraph.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end -}} + + +{{/* Define the name of the application */}} +{{- define "memgraph.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end -}} + + +{{/* Common labels */}} +{{- define "memgraph.labels" -}} +app.kubernetes.io/name: {{ include "memgraph.name" . }} +helm.sh/chart: {{ include "memgraph.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + + +{{/* +Create the name of the service account to use +*/}} +{{- define "memgraph.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "memgraph.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/helm-charts/memgraph-high-availability/templates/cluster-setup.yaml b/helm-charts/memgraph-high-availability/templates/cluster-setup.yaml new file mode 100644 index 0000000..8376ccc --- /dev/null +++ b/helm-charts/memgraph-high-availability/templates/cluster-setup.yaml @@ -0,0 +1,51 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: memgraph-setup +spec: + template: + spec: + containers: + - name: memgraph-setup + image: memgraph/memgraph:2.17.0 + command: ["/bin/bash", "-c"] + args: + - | + # Install netcat + echo "Installing netcat..." + apt-get update && apt-get install -y netcat-openbsd + + # Wait until the pods are available + echo "Waiting for pods to become available for Bolt connection..." + until nc -z memgraph-coordinator-1.default.svc.cluster.local 7687; do sleep 1; done + until nc -z memgraph-coordinator-2.default.svc.cluster.local 7687; do sleep 1; done + until nc -z memgraph-coordinator-3.default.svc.cluster.local 7687; do sleep 1; done + until nc -z memgraph-data-0.default.svc.cluster.local 7687; do sleep 1; done + until nc -z memgraph-data-1.default.svc.cluster.local 7687; do sleep 1; done + echo "Pods are available for Bolt connection!" + + sleep 5 + + # Run the mgconsole commands + echo "Running mgconsole commands..." + echo 'ADD COORDINATOR 2 WITH CONFIG {"bolt_server": "memgraph-coordinator-2.default.svc.cluster.local:7687", "coordinator_server": "memgraph-coordinator-2.default.svc.cluster.local:12000"};' | mgconsole --host memgraph-coordinator-1.default.svc.cluster.local --port 7687 + echo 'ADD COORDINATOR 3 WITH CONFIG {"bolt_server": "memgraph-coordinator-3.default.svc.cluster.local:7687", "coordinator_server": "memgraph-coordinator-3.default.svc.cluster.local:12000"};' | mgconsole --host memgraph-coordinator-1.default.svc.cluster.local --port 7687 + echo 'REGISTER INSTANCE instance_1 WITH CONFIG {"bolt_server": "memgraph-data-0.default.svc.cluster.local:7687", "management_server": "memgraph-data-0.default.svc.cluster.local:10000", "replication_server": "memgraph-data-0.default.svc.cluster.local:20000"};' | mgconsole --host memgraph-coordinator-1.default.svc.cluster.local --port 7687 + echo 'REGISTER INSTANCE instance_2 WITH CONFIG {"bolt_server": "memgraph-data-1.default.svc.cluster.local:7687", "management_server": "memgraph-data-1.default.svc.cluster.local:10000", "replication_server": "memgraph-data-1.default.svc.cluster.local:20000"};' | mgconsole --host memgraph-coordinator-1.default.svc.cluster.local --port 7687 + echo 'SET INSTANCE instance_1 TO MAIN;' | mgconsole --host memgraph-coordinator-1.default.svc.cluster.local --port 7687 + sleep 3 + echo "SHOW INSTANCES on coord1" + echo 'SHOW INSTANCES;' | mgconsole --host memgraph-coordinator-1.default.svc.cluster.local --port 7687 + echo "SHOW INSTANCES on coord2" + echo 'SHOW INSTANCES;' | mgconsole --host memgraph-coordinator-2.default.svc.cluster.local --port 7687 + echo "SHOW INSTANCES on coord3" + echo 'SHOW INSTANCES;' | mgconsole --host memgraph-coordinator-3.default.svc.cluster.local --port 7687 + echo "RETURN 0 on 1st data instance" + echo 'RETURN 0;' | mgconsole --host memgraph-data-0.default.svc.cluster.local --port 7687 + echo "RETURN 0 on 2nd data instance" + echo 'RETURN 0;' | mgconsole --host memgraph-data-1.default.svc.cluster.local --port 7687 + securityContext: + runAsUser: 0 + + restartPolicy: Never + backoffLimit: 4 diff --git a/helm-charts/memgraph-high-availability/templates/coordinators.yaml b/helm-charts/memgraph-high-availability/templates/coordinators.yaml new file mode 100644 index 0000000..0dbf75e --- /dev/null +++ b/helm-charts/memgraph-high-availability/templates/coordinators.yaml @@ -0,0 +1,116 @@ +{{- range $index, $coordinator := .Values.coordinators }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: memgraph-coordinator-{{ $coordinator.id }} +spec: + serviceName: "memgraph-coordinator-{{ $coordinator.id }}" + replicas: 1 + selector: + matchLabels: + app: memgraph-coordinator-{{ $coordinator.id }} + template: + metadata: + labels: + app: memgraph-coordinator-{{ $coordinator.id }} + spec: + initContainers: + - name: init + image: "{{ $.Values.memgraph.image.repository }}:{{ $.Values.memgraph.image.tag }}" + volumeMounts: + {{- if $.Values.memgraph.coordinators.volumeClaim.storagePVC }} + - name: memgraph-coordinator-{{ $coordinator.id }}-lib-storage + mountPath: /var/lib/memgraph + {{- end }} + {{- if $.Values.memgraph.coordinators.volumeClaim.logPVC }} + - name: memgraph-coordinator-{{ $coordinator.id }}-log-storage + mountPath: /var/log/memgraph + {{- end }} + command: [ "/bin/sh","-c" ] + args: [ "chown -R memgraph:memgraph /var/log; chown -R memgraph:memgraph /var/lib" ] + securityContext: + privileged: true + readOnlyRootFilesystem: false + capabilities: + drop: [ "all" ] + add: [ "CHOWN" ] + runAsUser: 0 + runAsNonRoot: false + containers: + - name: memgraph-coordinator + image: "{{ $.Values.memgraph.image.repository }}:{{ $.Values.memgraph.image.tag }}" + imagePullPolicy: {{ $.Values.memgraph.image.pullPolicy }} + ports: + - containerPort: {{ $coordinator.boltPort }} + - containerPort: {{ $coordinator.coordinatorPort }} + args: + {{- range $arg := $coordinator.args }} + - "{{ $arg }}" + {{- end }} + env: + - name: MEMGRAPH_ENTERPRISE_LICENSE + value: "{{ $.Values.memgraph.env.MEMGRAPH_ENTERPRISE_LICENSE }}" + - name: MEMGRAPH_ORGANIZATION_NAME + value: "{{ $.Values.memgraph.env.MEMGRAPH_ORGANIZATION_NAME }}" + livenessProbe: + exec: + command: + - sh + - -c + - echo 'SHOW INSTANCES;' | mgconsole --host localhost --port {{ $coordinator.boltPort }} + initialDelaySeconds: {{ $.Values.memgraph.probes.liveness.initialDelaySeconds }} + periodSeconds: {{ $.Values.memgraph.probes.liveness.periodSeconds }} + readinessProbe: + exec: + command: + - sh + - -c + - echo 'SHOW INSTANCES;' | mgconsole --host localhost --port {{ $coordinator.boltPort }} + initialDelaySeconds: {{ $.Values.memgraph.probes.readiness.initialDelaySeconds }} + periodSeconds: {{ $.Values.memgraph.probes.readiness.periodSeconds }} + startupProbe: + exec: + command: + - sh + - -c + - echo 'SHOW INSTANCES;' | mgconsole --host localhost --port {{ $coordinator.boltPort }} + failureThreshold: {{ $.Values.memgraph.probes.startup.failureThreshold }} + periodSeconds: {{ $.Values.memgraph.probes.startup.periodSeconds }} + volumeMounts: + {{- if $.Values.memgraph.coordinators.volumeClaim.storagePVC }} + - name: memgraph-coordinator-{{ $coordinator.id }}-lib-storage + mountPath: /var/lib/memgraph + {{- end }} + {{- if $.Values.memgraph.coordinators.volumeClaim.logPVC}} + - name: memgraph-coordinator-{{ $coordinator.id }}-log-storage + mountPath: /var/log/memgraph + {{- end }} + volumeClaimTemplates: + {{- if $.Values.memgraph.coordinators.volumeClaim.storagePVC }} + - metadata: + name: memgraph-coordinator-{{ $coordinator.id }}-lib-storage + spec: + accessModes: + - "ReadWriteOnce" + {{- if $.Values.memgraph.coordinators.volumeClaim.storagePVCClassName }} + storageClassName: {{ $.Values.memgraph.coordinators.volumeClaim.storagePVCClassName }} + {{- end }} + resources: + requests: + storage: {{ $.Values.memgraph.coordinators.volumeClaim.storagePVCSize }} + {{- end }} + {{- if $.Values.memgraph.coordinators.volumeClaim.logPVC }} + - metadata: + name: memgraph-coordinator-{{ $coordinator.id }}-log-storage + spec: + accessModes: + - "ReadWriteOnce" + {{- if $.Values.memgraph.coordinators.volumeClaim.logPVCClassName }} + storageClassName: {{ $.Values.memgraph.coordinators.volumeClaim.logPVCClassName }} + {{- end }} + resources: + requests: + storage: {{ $.Values.memgraph.coordinators.volumeClaim.storagePVC.logPVCSize }} + {{- end }} +--- +{{- end }} diff --git a/helm-charts/memgraph-high-availability/templates/data.yaml b/helm-charts/memgraph-high-availability/templates/data.yaml new file mode 100644 index 0000000..a3bbad3 --- /dev/null +++ b/helm-charts/memgraph-high-availability/templates/data.yaml @@ -0,0 +1,118 @@ +{{- range $index, $data := .Values.data }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: memgraph-data-{{ $data.id }} +spec: + serviceName: "memgraph-data-{{ $data.id }}" + replicas: 1 + selector: + matchLabels: + app: memgraph-data-{{ $data.id }} + template: + metadata: + labels: + app: memgraph-data-{{ $data.id }} + spec: + initContainers: + - name: init + image: "{{ $.Values.memgraph.image.repository }}:{{ $.Values.memgraph.image.tag }}" + volumeMounts: + {{- if $.Values.memgraph.data.volumeClaim.storagePVC }} + - name: memgraph-data-{{ $data.id }}-lib-storage + mountPath: /var/lib/memgraph + {{- end }} + {{- if $.Values.memgraph.data.volumeClaim.logPVC }} + - name: memgraph-data-{{ $data.id }}-log-storage + mountPath: /var/log/memgraph + {{- end }} + command: [ "/bin/sh","-c" ] + args: [ "chown -R memgraph:memgraph /var/log; chown -R memgraph:memgraph /var/lib" ] + securityContext: + privileged: true + readOnlyRootFilesystem: false + capabilities: + drop: [ "all" ] + add: [ "CHOWN" ] + runAsUser: 0 + runAsNonRoot: false + containers: + - name: memgraph-data + image: "{{ $.Values.memgraph.image.repository }}:{{ $.Values.memgraph.image.tag }}" + imagePullPolicy: {{ $.Values.memgraph.image.pullPolicy }} + ports: + - containerPort: {{ $data.boltPort }} + - containerPort: {{ $data.managementPort }} + - containerPort: {{ $data.replicationPort }} + args: + {{- range $arg := $data.args }} + - "{{ $arg }}" + {{- end }} + env: + - name: MEMGRAPH_ENTERPRISE_LICENSE + value: "{{ $.Values.memgraph.env.MEMGRAPH_ENTERPRISE_LICENSE }}" + - name: MEMGRAPH_ORGANIZATION_NAME + value: "{{ $.Values.memgraph.env.MEMGRAPH_ORGANIZATION_NAME }}" + livenessProbe: + exec: + command: + - sh + - -c + - echo 'RETURN 0;' | mgconsole --host localhost --port {{ $data.boltPort }} + initialDelaySeconds: {{ $.Values.memgraph.probes.liveness.initialDelaySeconds }} + periodSeconds: {{ $.Values.memgraph.probes.liveness.periodSeconds }} + readinessProbe: + exec: + command: + - sh + - -c + - echo 'RETURN 0;' | mgconsole --host localhost --port {{ $data.boltPort }} + initialDelaySeconds: {{ $.Values.memgraph.probes.readiness.initialDelaySeconds }} + periodSeconds: {{ $.Values.memgraph.probes.readiness.periodSeconds }} + startupProbe: + exec: + command: + - sh + - -c + - echo 'RETURN 0;' | mgconsole --host localhost --port {{ $data.boltPort }} + failureThreshold: {{ $.Values.memgraph.probes.startup.failureThreshold }} + periodSeconds: {{ $.Values.memgraph.probes.startup.periodSeconds }} + volumeMounts: + {{- if $.Values.memgraph.data.volumeClaim.storagePVC }} + - name: memgraph-data-{{ $data.id }}-lib-storage + mountPath: /var/lib/memgraph + {{- end }} + {{- if $.Values.memgraph.data.volumeClaim.logPVC}} + - name: memgraph-data-{{ $data.id }}-log-storage + mountPath: /var/log/memgraph + {{- end }} + volumeClaimTemplates: + {{- if $.Values.memgraph.data.volumeClaim.storagePVC }} + - metadata: + name: memgraph-data-{{ $data.id }}-lib-storage + spec: + accessModes: + - "ReadWriteOnce" + {{- if $.Values.memgraph.data.volumeClaim.storagePVCClassName }} + storageClassName: {{ $.Values.memgraph.data.volumeClaim.storagePVCClassName }} + {{- end }} + resources: + requests: + storage: {{ $.Values.memgraph.data.volumeClaim.storagePVCSize }} + {{- end }} + {{- if $.Values.memgraph.data.volumeClaim.logPVC }} + - metadata: + name: memgraph-data-{{ $data.id }}-log-storage + spec: + accessModes: + - "ReadWriteOnce" + {{- if $.Values.memgraph.data.volumeClaim.logPVCClassName }} + storageClassName: {{ $.Values.memgraph.data.volumeClaim.logPVCClassName }} + {{- end }} + resources: + requests: + storage: {{ $.Values.memgraph.data.volumeClaim.logPVCSize }} + {{- end }} + +--- +{{- end }} diff --git a/helm-charts/memgraph-high-availability/templates/services-coordinators.yaml b/helm-charts/memgraph-high-availability/templates/services-coordinators.yaml new file mode 100644 index 0000000..62742c9 --- /dev/null +++ b/helm-charts/memgraph-high-availability/templates/services-coordinators.yaml @@ -0,0 +1,39 @@ +# Service for coordinator instances internal +{{- range .Values.coordinators }} +--- +apiVersion: v1 +kind: Service +metadata: + name: memgraph-coordinator-{{ .id }} +spec: + type: ClusterIP + selector: + app: memgraph-coordinator-{{ .id }} + ports: + - protocol: TCP + name: bolt + port: {{ .boltPort }} + targetPort: {{ .boltPort }} + - protocol: TCP + name: coordinator + port: {{ .coordinatorPort }} + targetPort: {{ .coordinatorPort }} +{{- end }} + +# Service for coordinators instances external +{{- range .Values.coordinators }} +--- +apiVersion: v1 +kind: Service +metadata: + name: memgraph-coordinator-{{ .id }}-external +spec: + type: NodePort + selector: + app: memgraph-coordinator-{{ .id }} + ports: + - protocol: TCP + name: bolt + port: 7687 + targetPort: 7687 +{{- end }} diff --git a/helm-charts/memgraph-high-availability/templates/services-data.yaml b/helm-charts/memgraph-high-availability/templates/services-data.yaml new file mode 100644 index 0000000..30b5c22 --- /dev/null +++ b/helm-charts/memgraph-high-availability/templates/services-data.yaml @@ -0,0 +1,43 @@ +# Service for data instances internal +{{- range .Values.data }} +--- +apiVersion: v1 +kind: Service +metadata: + name: memgraph-data-{{ .id }} +spec: + type: ClusterIP + selector: + app: memgraph-data-{{ .id }} + ports: + - protocol: TCP + name: bolt + port: {{ .boltPort }} + targetPort: {{ .boltPort }} + - protocol: TCP + name: management + port: {{ .managementPort }} + targetPort: {{ .managementPort }} + - protocol: TCP + name: replication + port: {{ .replicationPort }} + targetPort: {{ .replicationPort }} +{{- end }} + +# Service for data instances external +{{- range .Values.data }} +--- +apiVersion: v1 +kind: Service +metadata: + name: memgraph-data-{{ .id }}-external +spec: + type: NodePort + selector: + app: memgraph-data-{{ .id }} + ports: + - protocol: TCP + name: bolt + port: {{ .boltPort }} + targetPort: {{ .boltPort }} +{{- end }} diff --git a/helm-charts/memgraph-high-availability/templates/tests/test-cluster-setup.yaml b/helm-charts/memgraph-high-availability/templates/tests/test-cluster-setup.yaml new file mode 100644 index 0000000..45b9927 --- /dev/null +++ b/helm-charts/memgraph-high-availability/templates/tests/test-cluster-setup.yaml @@ -0,0 +1,53 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: "{{ .Release.Name }}-memgraph-test-cluster-setup" + labels: + {{- include "memgraph.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + template: + metadata: + labels: + app: memgraph + spec: + containers: + - name: memgraph-test-cluster-setup + image: memgraph/memgraph:2.17.0 + command: ["/bin/bash", "-c"] + args: + - | + echo "Running cluster test setup..." + result=$(echo 'SHOW INSTANCES;' | mgconsole --host memgraph-coordinator-1.default.svc.cluster.local --port 7687) + echo "$result" + + # Use awk to check if all instances have health status "up" and there are exactly 5 instances + echo "$result" | awk ' + BEGIN { + FS = "|" + instance_count = 0 + health_ok = 1 + } + NR > 3 && NR <= 8 { + gsub(/^ *| *$/, "", $6) # Trim spaces from health + health = $6 + print "Health: [" health "]" # Debug print + instance_count++ + if (health != "up") { + health_ok = 0 + print "Non-up health found: [" health "]" # Debug print + } + } + END { + print "Instance count: " instance_count # Debug print + print "Health OK: " health_ok # Debug print + if (instance_count == 5 && health_ok == 1) { + print "All instances are up and there are exactly 5 instances." + } else { + print "Check failed. Either not all instances are up or the count is not 5." + } + } + ' + restartPolicy: Never + backoffLimit: 3 diff --git a/helm-charts/memgraph-high-availability/templates/tests/test-connection.yaml b/helm-charts/memgraph-high-availability/templates/tests/test-connection.yaml new file mode 100644 index 0000000..ef61c87 --- /dev/null +++ b/helm-charts/memgraph-high-availability/templates/tests/test-connection.yaml @@ -0,0 +1,29 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: "{{ .Release.Name }}-memgraph-test-connection" + labels: + {{- include "memgraph.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + template: + spec: + containers: + - name: memgraph-test + image: memgraph/memgraph:2.17.0 + command: ["/bin/sh", "-c"] + args: + - | + echo "Running connection test on data instances..." + echo "RETURN 0;" | mgconsole --host memgraph-data-0.default.svc.cluster.local --port 7687 + echo "RETURN 0;" | mgconsole --host memgraph-data-1.default.svc.cluster.local --port 7687 + + echo "Running a connection test on coordinator instances..." + echo "SHOW INSTANCES;" | mgconsole --host memgraph-coordinator-1.default.svc.cluster.local --port 7687 + echo "SHOW INSTANCES;" | mgconsole --host memgraph-coordinator-2.default.svc.cluster.local --port 7687 + echo "SHOW INSTANCES;" | mgconsole --host memgraph-coordinator-3.default.svc.cluster.local --port 7687 + + + restartPolicy: Never + backoffLimit: 3 diff --git a/helm-charts/memgraph-high-availability/values.yaml b/helm-charts/memgraph-high-availability/values.yaml new file mode 100644 index 0000000..fcbb06c --- /dev/null +++ b/helm-charts/memgraph-high-availability/values.yaml @@ -0,0 +1,100 @@ +# Default values for memgraph-high-availability. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates.| + +memgraph: + image: + repository: memgraph/memgraph + tag: 2.18.0 + pullPolicy: IfNotPresent + env: + MEMGRAPH_ENTERPRISE_LICENSE: "" + MEMGRAPH_ORGANIZATION_NAME: "" + probes: + startup: + failureThreshold: 30 + periodSeconds: 10 + readiness: + initialDelaySeconds: 5 + periodSeconds: 5 + liveness: + initialDelaySeconds: 30 + periodSeconds: 10 + data: + volumeClaim: + storagePVCClassName: "" + storagePVC: true + storagePVCSize: "1Gi" + logPVCClassName: "" + logPVC: false + logPVCSize: "256Mi" + coordinators: + volumeClaim: + storagePVCClassName: "" + storagePVC: true + storagePVCSize: "1Gi" + logPVCClassName: "" + logPVC: false + logPVCSize: "256Mi" + + + +data: +- id: "0" + boltPort: 7687 + managementPort: 10000 + replicationPort: 20000 + args: + - "--experimental-enabled=high-availability" + - "--management-port=10000" + - "--bolt-port=7687" + - "--also-log-to-stderr" + - "--log-level=TRACE" + - "--replication-restore-state-on-startup=true" + +- id: "1" + boltPort: 7687 + managementPort: 10000 + replicationPort: 20000 + args: + - "--experimental-enabled=high-availability" + - "--management-port=10000" + - "--bolt-port=7687" + - "--also-log-to-stderr" + - "--log-level=TRACE" + - "--replication-restore-state-on-startup=true" + +coordinators: +- id: "1" + boltPort: 7687 + coordinatorPort: 12000 + args: + - "--experimental-enabled=high-availability" + - "--coordinator-id=1" + - "--coordinator-port=12000" + - "--bolt-port=7687" + - "--also-log-to-stderr" + - "--log-level=TRACE" + - "--coordinator-hostname=memgraph-coordinator-1.default.svc.cluster.local" +- id: "2" + boltPort: 7687 + coordinatorPort: 12000 + args: + - "--experimental-enabled=high-availability" + - "--coordinator-id=2" + - "--coordinator-port=12000" + - "--bolt-port=7687" + - "--also-log-to-stderr" + - "--log-level=TRACE" + - "--coordinator-hostname=memgraph-coordinator-2.default.svc.cluster.local" +- id: "3" + boltPort: 7687 + coordinatorPort: 12000 + args: + - "--experimental-enabled=high-availability" + - "--coordinator-id=3" + - "--coordinator-port=12000" + - "--bolt-port=7687" + - "--also-log-to-stderr" + - "--log-level=TRACE" + - "--coordinator-hostname=memgraph-coordinator-3.default.svc.cluster.local" diff --git a/watches.yaml b/watches.yaml new file mode 100644 index 0000000..ee774bd --- /dev/null +++ b/watches.yaml @@ -0,0 +1,6 @@ +# Use the 'create api' subcommand to add watches to this file. +- group: charts.my.domain + version: v1alpha1 + kind: MemgraphHighAvailability + chart: helm-charts/memgraph-high-availability +#+kubebuilder:scaffold:watch From 27939de774c0db48320ce674fd1c03b497a96c7d Mon Sep 17 00:00:00 2001 From: Andi Skrgat Date: Tue, 9 Jul 2024 12:17:24 +0200 Subject: [PATCH 02/39] Add helm-charts as submodule --- .gitmodules | 3 + README.md | 13 +- helm-charts | 1 + .../memgraph-high-availability/.helmignore | 23 ---- .../memgraph-high-availability/Chart.yaml | 21 ---- .../memgraph-high-availability/README.md | 72 ----------- .../templates/NOTES.txt | 17 --- .../templates/_helpers.tpl | 44 ------- .../templates/cluster-setup.yaml | 51 -------- .../templates/coordinators.yaml | 116 ----------------- .../templates/data.yaml | 118 ------------------ .../templates/services-coordinators.yaml | 39 ------ .../templates/services-data.yaml | 43 ------- .../templates/tests/test-cluster-setup.yaml | 53 -------- .../templates/tests/test-connection.yaml | 29 ----- .../memgraph-high-availability/values.yaml | 100 --------------- 16 files changed, 16 insertions(+), 727 deletions(-) create mode 100644 .gitmodules create mode 160000 helm-charts delete mode 100644 helm-charts/memgraph-high-availability/.helmignore delete mode 100644 helm-charts/memgraph-high-availability/Chart.yaml delete mode 100644 helm-charts/memgraph-high-availability/README.md delete mode 100644 helm-charts/memgraph-high-availability/templates/NOTES.txt delete mode 100644 helm-charts/memgraph-high-availability/templates/_helpers.tpl delete mode 100644 helm-charts/memgraph-high-availability/templates/cluster-setup.yaml delete mode 100644 helm-charts/memgraph-high-availability/templates/coordinators.yaml delete mode 100644 helm-charts/memgraph-high-availability/templates/data.yaml delete mode 100644 helm-charts/memgraph-high-availability/templates/services-coordinators.yaml delete mode 100644 helm-charts/memgraph-high-availability/templates/services-data.yaml delete mode 100644 helm-charts/memgraph-high-availability/templates/tests/test-cluster-setup.yaml delete mode 100644 helm-charts/memgraph-high-availability/templates/tests/test-connection.yaml delete mode 100644 helm-charts/memgraph-high-availability/values.yaml diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..d561e32 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "helm-charts"] + path = helm-charts + url = https://github.com/memgraph/helm-charts.git diff --git a/README.md b/README.md index 55105f8..7f55641 100644 --- a/README.md +++ b/README.md @@ -1 +1,12 @@ -# kubernetes-operator \ No newline at end of file +# Memgraph Kubernetes Operator + +## Prerequisites + +We use Go version 1.22.5. Check out here how to [install Go](https://go.dev/doc/install). Helm version is v3.14.4. + + +## Installation + + + + diff --git a/helm-charts b/helm-charts new file mode 160000 index 0000000..dc0d1b2 --- /dev/null +++ b/helm-charts @@ -0,0 +1 @@ +Subproject commit dc0d1b25a43eec48dc973e850900c0532c36a1ef diff --git a/helm-charts/memgraph-high-availability/.helmignore b/helm-charts/memgraph-high-availability/.helmignore deleted file mode 100644 index 0e8a0eb..0000000 --- a/helm-charts/memgraph-high-availability/.helmignore +++ /dev/null @@ -1,23 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*.orig -*~ -# Various IDEs -.project -.idea/ -*.tmproj -.vscode/ diff --git a/helm-charts/memgraph-high-availability/Chart.yaml b/helm-charts/memgraph-high-availability/Chart.yaml deleted file mode 100644 index a15de30..0000000 --- a/helm-charts/memgraph-high-availability/Chart.yaml +++ /dev/null @@ -1,21 +0,0 @@ -apiVersion: v2 -appVersion: 2.18.0 -description: A Helm chart for Kubernetes with Memgraph High availabiliy capabilites -home: https://memgraph.com/ -icon: https://public-assets.memgraph.com/memgraph-logo/logo-large.png -keywords: -- memgraph -- graph -- database -- cypher -- analytics -- high-availability -maintainers: -- email: tech@memgraph.com - name: Memgraph -name: memgraph-high-availability -sources: -- https://github.com/memgraph/memgraph -- https://github.com/memgraph/helm-charts -type: application -version: 0.1.1 diff --git a/helm-charts/memgraph-high-availability/README.md b/helm-charts/memgraph-high-availability/README.md deleted file mode 100644 index 7c07444..0000000 --- a/helm-charts/memgraph-high-availability/README.md +++ /dev/null @@ -1,72 +0,0 @@ -## Helm chart for Memgraph high availability cluster -A Helm Chart for deploying Memgraph in [high availability setup](https://memgraph.com/docs/clustering/high-availability). - -Memgraph HA cluster includes 3 coordinators, 2 data instances by default. The cluster setup is performed via the cluster-setup job. The HA cluster is still work in progress and started with "--experimental-enabled=high-availability". -The cluster is started in the configuration without the node selector, which means that in the current configuration, it is not highly available if the node fails. - -## Installing the Memgraph HA Helm Chart -To install the Memgraph HA Helm Chart, follow the steps below: -``` -helm install memgraph/memgraph-high-availability --set memgraph.env.MEMGRAPH_ENTERPRISE_LICENSE=,memgraph.env.MEMGRAPH_ORGANIZATION_NAME= -``` -Replace `` with a name of your choice for the release and set the enterprise license. - -## Changing the default chart values -To change the default chart values, run the command with the specified set of flags: -``` -helm install memgraph/memgraph-high-availability --set =,=,... -``` -Or you can modify a `values.yaml` file and override the desired values: -``` -helm install memgraph/memgraph-high-availability-f values.yaml -``` - - -## Configuration Options - -The following table lists the configurable parameters of the Memgraph chart and their default values. - -| Parameter | Description | Default | -|---------------------------------------------|-----------------------------------------------------------------------------------------------------|-----------------------------------------| -| `memgraph.image.repository` | Memgraph Docker image repository | `memgraph/memgraph` | -| `memgraph.image.tag` | Specific tag for the Memgraph Docker image. Overrides the image tag whose default is chart version. | `2.17.0` | -| `memgraph.image.pullPolicy` | Image pull policy | `IfNotPresent` | -| `memgraph.env.MEMGRAPH_ENTERPRISE_LICENSE` | Memgraph enterprise license | `` | -| `memgraph.env.MEMGRAPH_ORGANIZATION_NAME` | Organization name | `` | -| `memgraph.probes.startup.failureThreshold` | Startup probe failure threshold | `30` | -| `memgraph.probes.startup.periodSeconds` | Startup probe period in seconds | `10` | -| `memgraph.probes.readiness.initialDelaySeconds` | Readiness probe initial delay in seconds | `5` | -| `memgraph.probes.readiness.periodSeconds` | Readiness probe period in seconds | `5` | -| `memgraph.probes.liveness.initialDelaySeconds` | Liveness probe initial delay in seconds | `30` | -| `memgraph.probes.liveness.periodSeconds` | Liveness probe period in seconds | `10` | -| `memgraph.data.volumeClaim.storagePVC` | Enable storage PVC | `true` | -| `memgraph.data.volumeClaim.storagePVCSize` | Size of the storage PVC | `1Gi` | -| `memgraph.data.volumeClaim.logPVC` | Enable log PVC | `false` | -| `memgraph.data.volumeClaim.logPVCSize` | Size of the log PVC | `256Mi` | -| `memgraph.coordinators.volumeClaim.storagePVC` | Enable storage PVC for coordinators | `true` | -| `memgraph.coordinators.volumeClaim.storagePVCSize` | Size of the storage PVC for coordinators | `1Gi` | -| `memgraph.coordinators.volumeClaim.logPVC` | Enable log PVC for coordinators | `false` | -| `memgraph.coordinators.volumeClaim.logPVCSize` | Size of the log PVC for coordinators | `256Mi` | -| `data` | Configuration for data instances | See `data` section | -| `coordinators` | Configuration for coordinator instances | See `coordinators` section | - -For the `data` and `coordinators` sections, each item in the list has the following parameters: - -| Parameter | Description | Default | -|---------------------------------------------|-----------------------------------------------------------------------------------------------------|-----------------------------------------| -| `id` | ID of the instance | `0` for data, `1` for coordinators | -| `boltPort` | Bolt port of the instance | `7687` | -| `managementPort` (data only) | Management port of the data instance | `10000` | -| `replicationPort` (data only) | Replication port of the data instance | `20000` | -| `coordinatorPort` (coordinators only) | Coordinator port of the coordinator instance | `12000` | -| `args` | List of arguments for the instance | See `args` section | - -The `args` section contains a list of arguments for the instance. The default values are the same for all instances: - -```markdown -- "--also-log-to-stderr" -- "--log-level=TRACE" -- "--replication-restore-state-on-startup=true" -``` - -For all available database settings, refer to the [Configuration settings reference guide](https://memgraph.com/docs/memgraph/reference-guide/configuration). diff --git a/helm-charts/memgraph-high-availability/templates/NOTES.txt b/helm-charts/memgraph-high-availability/templates/NOTES.txt deleted file mode 100644 index 5246881..0000000 --- a/helm-charts/memgraph-high-availability/templates/NOTES.txt +++ /dev/null @@ -1,17 +0,0 @@ -Thank you for installing the Memgraph High-availability cluster! 🎉 - -Memgraph HA cluster includes 3 coordinators, 2 data instances by default. The cluster setup is performed via the cluster-setup job. The HA cluster is still work in progress and started with "--experimental-enabled=high-availability". -The cluster is started in the configuration without the node selector, which means that in the current configuration, it is not highly available if the node fails. - -The cluster setup requires the proper enterprise license to work since HA is an enterprise feature. - -You can connect to Memgraph instances via Lab, mgconsole, or any other client. By default, all Memgraph instances (coordinators and data instances) listen on port 7687 for a bolt connection. -Make sure your are connecting to the correct ip address and port. For details check the configuration on your cloud provider(aws, gcp, azure, etc.) - -If you are connecting via mgconsole, you can use the following command: - -mgconsole --host --port - -If you are connecting via Lab, specify your instance IP address and port in Memgraph Lab GUI. - -If you are using minikube, you can find out your instance ip using `minikube ip`. diff --git a/helm-charts/memgraph-high-availability/templates/_helpers.tpl b/helm-charts/memgraph-high-availability/templates/_helpers.tpl deleted file mode 100644 index c43a5d1..0000000 --- a/helm-charts/memgraph-high-availability/templates/_helpers.tpl +++ /dev/null @@ -1,44 +0,0 @@ - -{{/* Full name of the application */}} -{{- define "memgraph.fullname" -}} -{{- if .Values.fullnameOverride -}} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} -{{- end -}} -{{- end -}} - - - -{{/* Define the chart version and app version */}} -{{- define "memgraph.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} -{{- end -}} - - -{{/* Define the name of the application */}} -{{- define "memgraph.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} -{{- end -}} - - -{{/* Common labels */}} -{{- define "memgraph.labels" -}} -app.kubernetes.io/name: {{ include "memgraph.name" . }} -helm.sh/chart: {{ include "memgraph.chart" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end -}} - - -{{/* -Create the name of the service account to use -*/}} -{{- define "memgraph.serviceAccountName" -}} -{{- if .Values.serviceAccount.create }} -{{- default (include "memgraph.fullname" .) .Values.serviceAccount.name }} -{{- else }} -{{- default "default" .Values.serviceAccount.name }} -{{- end }} -{{- end }} diff --git a/helm-charts/memgraph-high-availability/templates/cluster-setup.yaml b/helm-charts/memgraph-high-availability/templates/cluster-setup.yaml deleted file mode 100644 index 8376ccc..0000000 --- a/helm-charts/memgraph-high-availability/templates/cluster-setup.yaml +++ /dev/null @@ -1,51 +0,0 @@ -apiVersion: batch/v1 -kind: Job -metadata: - name: memgraph-setup -spec: - template: - spec: - containers: - - name: memgraph-setup - image: memgraph/memgraph:2.17.0 - command: ["/bin/bash", "-c"] - args: - - | - # Install netcat - echo "Installing netcat..." - apt-get update && apt-get install -y netcat-openbsd - - # Wait until the pods are available - echo "Waiting for pods to become available for Bolt connection..." - until nc -z memgraph-coordinator-1.default.svc.cluster.local 7687; do sleep 1; done - until nc -z memgraph-coordinator-2.default.svc.cluster.local 7687; do sleep 1; done - until nc -z memgraph-coordinator-3.default.svc.cluster.local 7687; do sleep 1; done - until nc -z memgraph-data-0.default.svc.cluster.local 7687; do sleep 1; done - until nc -z memgraph-data-1.default.svc.cluster.local 7687; do sleep 1; done - echo "Pods are available for Bolt connection!" - - sleep 5 - - # Run the mgconsole commands - echo "Running mgconsole commands..." - echo 'ADD COORDINATOR 2 WITH CONFIG {"bolt_server": "memgraph-coordinator-2.default.svc.cluster.local:7687", "coordinator_server": "memgraph-coordinator-2.default.svc.cluster.local:12000"};' | mgconsole --host memgraph-coordinator-1.default.svc.cluster.local --port 7687 - echo 'ADD COORDINATOR 3 WITH CONFIG {"bolt_server": "memgraph-coordinator-3.default.svc.cluster.local:7687", "coordinator_server": "memgraph-coordinator-3.default.svc.cluster.local:12000"};' | mgconsole --host memgraph-coordinator-1.default.svc.cluster.local --port 7687 - echo 'REGISTER INSTANCE instance_1 WITH CONFIG {"bolt_server": "memgraph-data-0.default.svc.cluster.local:7687", "management_server": "memgraph-data-0.default.svc.cluster.local:10000", "replication_server": "memgraph-data-0.default.svc.cluster.local:20000"};' | mgconsole --host memgraph-coordinator-1.default.svc.cluster.local --port 7687 - echo 'REGISTER INSTANCE instance_2 WITH CONFIG {"bolt_server": "memgraph-data-1.default.svc.cluster.local:7687", "management_server": "memgraph-data-1.default.svc.cluster.local:10000", "replication_server": "memgraph-data-1.default.svc.cluster.local:20000"};' | mgconsole --host memgraph-coordinator-1.default.svc.cluster.local --port 7687 - echo 'SET INSTANCE instance_1 TO MAIN;' | mgconsole --host memgraph-coordinator-1.default.svc.cluster.local --port 7687 - sleep 3 - echo "SHOW INSTANCES on coord1" - echo 'SHOW INSTANCES;' | mgconsole --host memgraph-coordinator-1.default.svc.cluster.local --port 7687 - echo "SHOW INSTANCES on coord2" - echo 'SHOW INSTANCES;' | mgconsole --host memgraph-coordinator-2.default.svc.cluster.local --port 7687 - echo "SHOW INSTANCES on coord3" - echo 'SHOW INSTANCES;' | mgconsole --host memgraph-coordinator-3.default.svc.cluster.local --port 7687 - echo "RETURN 0 on 1st data instance" - echo 'RETURN 0;' | mgconsole --host memgraph-data-0.default.svc.cluster.local --port 7687 - echo "RETURN 0 on 2nd data instance" - echo 'RETURN 0;' | mgconsole --host memgraph-data-1.default.svc.cluster.local --port 7687 - securityContext: - runAsUser: 0 - - restartPolicy: Never - backoffLimit: 4 diff --git a/helm-charts/memgraph-high-availability/templates/coordinators.yaml b/helm-charts/memgraph-high-availability/templates/coordinators.yaml deleted file mode 100644 index 0dbf75e..0000000 --- a/helm-charts/memgraph-high-availability/templates/coordinators.yaml +++ /dev/null @@ -1,116 +0,0 @@ -{{- range $index, $coordinator := .Values.coordinators }} -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: memgraph-coordinator-{{ $coordinator.id }} -spec: - serviceName: "memgraph-coordinator-{{ $coordinator.id }}" - replicas: 1 - selector: - matchLabels: - app: memgraph-coordinator-{{ $coordinator.id }} - template: - metadata: - labels: - app: memgraph-coordinator-{{ $coordinator.id }} - spec: - initContainers: - - name: init - image: "{{ $.Values.memgraph.image.repository }}:{{ $.Values.memgraph.image.tag }}" - volumeMounts: - {{- if $.Values.memgraph.coordinators.volumeClaim.storagePVC }} - - name: memgraph-coordinator-{{ $coordinator.id }}-lib-storage - mountPath: /var/lib/memgraph - {{- end }} - {{- if $.Values.memgraph.coordinators.volumeClaim.logPVC }} - - name: memgraph-coordinator-{{ $coordinator.id }}-log-storage - mountPath: /var/log/memgraph - {{- end }} - command: [ "/bin/sh","-c" ] - args: [ "chown -R memgraph:memgraph /var/log; chown -R memgraph:memgraph /var/lib" ] - securityContext: - privileged: true - readOnlyRootFilesystem: false - capabilities: - drop: [ "all" ] - add: [ "CHOWN" ] - runAsUser: 0 - runAsNonRoot: false - containers: - - name: memgraph-coordinator - image: "{{ $.Values.memgraph.image.repository }}:{{ $.Values.memgraph.image.tag }}" - imagePullPolicy: {{ $.Values.memgraph.image.pullPolicy }} - ports: - - containerPort: {{ $coordinator.boltPort }} - - containerPort: {{ $coordinator.coordinatorPort }} - args: - {{- range $arg := $coordinator.args }} - - "{{ $arg }}" - {{- end }} - env: - - name: MEMGRAPH_ENTERPRISE_LICENSE - value: "{{ $.Values.memgraph.env.MEMGRAPH_ENTERPRISE_LICENSE }}" - - name: MEMGRAPH_ORGANIZATION_NAME - value: "{{ $.Values.memgraph.env.MEMGRAPH_ORGANIZATION_NAME }}" - livenessProbe: - exec: - command: - - sh - - -c - - echo 'SHOW INSTANCES;' | mgconsole --host localhost --port {{ $coordinator.boltPort }} - initialDelaySeconds: {{ $.Values.memgraph.probes.liveness.initialDelaySeconds }} - periodSeconds: {{ $.Values.memgraph.probes.liveness.periodSeconds }} - readinessProbe: - exec: - command: - - sh - - -c - - echo 'SHOW INSTANCES;' | mgconsole --host localhost --port {{ $coordinator.boltPort }} - initialDelaySeconds: {{ $.Values.memgraph.probes.readiness.initialDelaySeconds }} - periodSeconds: {{ $.Values.memgraph.probes.readiness.periodSeconds }} - startupProbe: - exec: - command: - - sh - - -c - - echo 'SHOW INSTANCES;' | mgconsole --host localhost --port {{ $coordinator.boltPort }} - failureThreshold: {{ $.Values.memgraph.probes.startup.failureThreshold }} - periodSeconds: {{ $.Values.memgraph.probes.startup.periodSeconds }} - volumeMounts: - {{- if $.Values.memgraph.coordinators.volumeClaim.storagePVC }} - - name: memgraph-coordinator-{{ $coordinator.id }}-lib-storage - mountPath: /var/lib/memgraph - {{- end }} - {{- if $.Values.memgraph.coordinators.volumeClaim.logPVC}} - - name: memgraph-coordinator-{{ $coordinator.id }}-log-storage - mountPath: /var/log/memgraph - {{- end }} - volumeClaimTemplates: - {{- if $.Values.memgraph.coordinators.volumeClaim.storagePVC }} - - metadata: - name: memgraph-coordinator-{{ $coordinator.id }}-lib-storage - spec: - accessModes: - - "ReadWriteOnce" - {{- if $.Values.memgraph.coordinators.volumeClaim.storagePVCClassName }} - storageClassName: {{ $.Values.memgraph.coordinators.volumeClaim.storagePVCClassName }} - {{- end }} - resources: - requests: - storage: {{ $.Values.memgraph.coordinators.volumeClaim.storagePVCSize }} - {{- end }} - {{- if $.Values.memgraph.coordinators.volumeClaim.logPVC }} - - metadata: - name: memgraph-coordinator-{{ $coordinator.id }}-log-storage - spec: - accessModes: - - "ReadWriteOnce" - {{- if $.Values.memgraph.coordinators.volumeClaim.logPVCClassName }} - storageClassName: {{ $.Values.memgraph.coordinators.volumeClaim.logPVCClassName }} - {{- end }} - resources: - requests: - storage: {{ $.Values.memgraph.coordinators.volumeClaim.storagePVC.logPVCSize }} - {{- end }} ---- -{{- end }} diff --git a/helm-charts/memgraph-high-availability/templates/data.yaml b/helm-charts/memgraph-high-availability/templates/data.yaml deleted file mode 100644 index a3bbad3..0000000 --- a/helm-charts/memgraph-high-availability/templates/data.yaml +++ /dev/null @@ -1,118 +0,0 @@ -{{- range $index, $data := .Values.data }} -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: memgraph-data-{{ $data.id }} -spec: - serviceName: "memgraph-data-{{ $data.id }}" - replicas: 1 - selector: - matchLabels: - app: memgraph-data-{{ $data.id }} - template: - metadata: - labels: - app: memgraph-data-{{ $data.id }} - spec: - initContainers: - - name: init - image: "{{ $.Values.memgraph.image.repository }}:{{ $.Values.memgraph.image.tag }}" - volumeMounts: - {{- if $.Values.memgraph.data.volumeClaim.storagePVC }} - - name: memgraph-data-{{ $data.id }}-lib-storage - mountPath: /var/lib/memgraph - {{- end }} - {{- if $.Values.memgraph.data.volumeClaim.logPVC }} - - name: memgraph-data-{{ $data.id }}-log-storage - mountPath: /var/log/memgraph - {{- end }} - command: [ "/bin/sh","-c" ] - args: [ "chown -R memgraph:memgraph /var/log; chown -R memgraph:memgraph /var/lib" ] - securityContext: - privileged: true - readOnlyRootFilesystem: false - capabilities: - drop: [ "all" ] - add: [ "CHOWN" ] - runAsUser: 0 - runAsNonRoot: false - containers: - - name: memgraph-data - image: "{{ $.Values.memgraph.image.repository }}:{{ $.Values.memgraph.image.tag }}" - imagePullPolicy: {{ $.Values.memgraph.image.pullPolicy }} - ports: - - containerPort: {{ $data.boltPort }} - - containerPort: {{ $data.managementPort }} - - containerPort: {{ $data.replicationPort }} - args: - {{- range $arg := $data.args }} - - "{{ $arg }}" - {{- end }} - env: - - name: MEMGRAPH_ENTERPRISE_LICENSE - value: "{{ $.Values.memgraph.env.MEMGRAPH_ENTERPRISE_LICENSE }}" - - name: MEMGRAPH_ORGANIZATION_NAME - value: "{{ $.Values.memgraph.env.MEMGRAPH_ORGANIZATION_NAME }}" - livenessProbe: - exec: - command: - - sh - - -c - - echo 'RETURN 0;' | mgconsole --host localhost --port {{ $data.boltPort }} - initialDelaySeconds: {{ $.Values.memgraph.probes.liveness.initialDelaySeconds }} - periodSeconds: {{ $.Values.memgraph.probes.liveness.periodSeconds }} - readinessProbe: - exec: - command: - - sh - - -c - - echo 'RETURN 0;' | mgconsole --host localhost --port {{ $data.boltPort }} - initialDelaySeconds: {{ $.Values.memgraph.probes.readiness.initialDelaySeconds }} - periodSeconds: {{ $.Values.memgraph.probes.readiness.periodSeconds }} - startupProbe: - exec: - command: - - sh - - -c - - echo 'RETURN 0;' | mgconsole --host localhost --port {{ $data.boltPort }} - failureThreshold: {{ $.Values.memgraph.probes.startup.failureThreshold }} - periodSeconds: {{ $.Values.memgraph.probes.startup.periodSeconds }} - volumeMounts: - {{- if $.Values.memgraph.data.volumeClaim.storagePVC }} - - name: memgraph-data-{{ $data.id }}-lib-storage - mountPath: /var/lib/memgraph - {{- end }} - {{- if $.Values.memgraph.data.volumeClaim.logPVC}} - - name: memgraph-data-{{ $data.id }}-log-storage - mountPath: /var/log/memgraph - {{- end }} - volumeClaimTemplates: - {{- if $.Values.memgraph.data.volumeClaim.storagePVC }} - - metadata: - name: memgraph-data-{{ $data.id }}-lib-storage - spec: - accessModes: - - "ReadWriteOnce" - {{- if $.Values.memgraph.data.volumeClaim.storagePVCClassName }} - storageClassName: {{ $.Values.memgraph.data.volumeClaim.storagePVCClassName }} - {{- end }} - resources: - requests: - storage: {{ $.Values.memgraph.data.volumeClaim.storagePVCSize }} - {{- end }} - {{- if $.Values.memgraph.data.volumeClaim.logPVC }} - - metadata: - name: memgraph-data-{{ $data.id }}-log-storage - spec: - accessModes: - - "ReadWriteOnce" - {{- if $.Values.memgraph.data.volumeClaim.logPVCClassName }} - storageClassName: {{ $.Values.memgraph.data.volumeClaim.logPVCClassName }} - {{- end }} - resources: - requests: - storage: {{ $.Values.memgraph.data.volumeClaim.logPVCSize }} - {{- end }} - ---- -{{- end }} diff --git a/helm-charts/memgraph-high-availability/templates/services-coordinators.yaml b/helm-charts/memgraph-high-availability/templates/services-coordinators.yaml deleted file mode 100644 index 62742c9..0000000 --- a/helm-charts/memgraph-high-availability/templates/services-coordinators.yaml +++ /dev/null @@ -1,39 +0,0 @@ -# Service for coordinator instances internal -{{- range .Values.coordinators }} ---- -apiVersion: v1 -kind: Service -metadata: - name: memgraph-coordinator-{{ .id }} -spec: - type: ClusterIP - selector: - app: memgraph-coordinator-{{ .id }} - ports: - - protocol: TCP - name: bolt - port: {{ .boltPort }} - targetPort: {{ .boltPort }} - - protocol: TCP - name: coordinator - port: {{ .coordinatorPort }} - targetPort: {{ .coordinatorPort }} -{{- end }} - -# Service for coordinators instances external -{{- range .Values.coordinators }} ---- -apiVersion: v1 -kind: Service -metadata: - name: memgraph-coordinator-{{ .id }}-external -spec: - type: NodePort - selector: - app: memgraph-coordinator-{{ .id }} - ports: - - protocol: TCP - name: bolt - port: 7687 - targetPort: 7687 -{{- end }} diff --git a/helm-charts/memgraph-high-availability/templates/services-data.yaml b/helm-charts/memgraph-high-availability/templates/services-data.yaml deleted file mode 100644 index 30b5c22..0000000 --- a/helm-charts/memgraph-high-availability/templates/services-data.yaml +++ /dev/null @@ -1,43 +0,0 @@ -# Service for data instances internal -{{- range .Values.data }} ---- -apiVersion: v1 -kind: Service -metadata: - name: memgraph-data-{{ .id }} -spec: - type: ClusterIP - selector: - app: memgraph-data-{{ .id }} - ports: - - protocol: TCP - name: bolt - port: {{ .boltPort }} - targetPort: {{ .boltPort }} - - protocol: TCP - name: management - port: {{ .managementPort }} - targetPort: {{ .managementPort }} - - protocol: TCP - name: replication - port: {{ .replicationPort }} - targetPort: {{ .replicationPort }} -{{- end }} - -# Service for data instances external -{{- range .Values.data }} ---- -apiVersion: v1 -kind: Service -metadata: - name: memgraph-data-{{ .id }}-external -spec: - type: NodePort - selector: - app: memgraph-data-{{ .id }} - ports: - - protocol: TCP - name: bolt - port: {{ .boltPort }} - targetPort: {{ .boltPort }} -{{- end }} diff --git a/helm-charts/memgraph-high-availability/templates/tests/test-cluster-setup.yaml b/helm-charts/memgraph-high-availability/templates/tests/test-cluster-setup.yaml deleted file mode 100644 index 45b9927..0000000 --- a/helm-charts/memgraph-high-availability/templates/tests/test-cluster-setup.yaml +++ /dev/null @@ -1,53 +0,0 @@ -apiVersion: batch/v1 -kind: Job -metadata: - name: "{{ .Release.Name }}-memgraph-test-cluster-setup" - labels: - {{- include "memgraph.labels" . | nindent 4 }} - annotations: - "helm.sh/hook": test -spec: - template: - metadata: - labels: - app: memgraph - spec: - containers: - - name: memgraph-test-cluster-setup - image: memgraph/memgraph:2.17.0 - command: ["/bin/bash", "-c"] - args: - - | - echo "Running cluster test setup..." - result=$(echo 'SHOW INSTANCES;' | mgconsole --host memgraph-coordinator-1.default.svc.cluster.local --port 7687) - echo "$result" - - # Use awk to check if all instances have health status "up" and there are exactly 5 instances - echo "$result" | awk ' - BEGIN { - FS = "|" - instance_count = 0 - health_ok = 1 - } - NR > 3 && NR <= 8 { - gsub(/^ *| *$/, "", $6) # Trim spaces from health - health = $6 - print "Health: [" health "]" # Debug print - instance_count++ - if (health != "up") { - health_ok = 0 - print "Non-up health found: [" health "]" # Debug print - } - } - END { - print "Instance count: " instance_count # Debug print - print "Health OK: " health_ok # Debug print - if (instance_count == 5 && health_ok == 1) { - print "All instances are up and there are exactly 5 instances." - } else { - print "Check failed. Either not all instances are up or the count is not 5." - } - } - ' - restartPolicy: Never - backoffLimit: 3 diff --git a/helm-charts/memgraph-high-availability/templates/tests/test-connection.yaml b/helm-charts/memgraph-high-availability/templates/tests/test-connection.yaml deleted file mode 100644 index ef61c87..0000000 --- a/helm-charts/memgraph-high-availability/templates/tests/test-connection.yaml +++ /dev/null @@ -1,29 +0,0 @@ -apiVersion: batch/v1 -kind: Job -metadata: - name: "{{ .Release.Name }}-memgraph-test-connection" - labels: - {{- include "memgraph.labels" . | nindent 4 }} - annotations: - "helm.sh/hook": test -spec: - template: - spec: - containers: - - name: memgraph-test - image: memgraph/memgraph:2.17.0 - command: ["/bin/sh", "-c"] - args: - - | - echo "Running connection test on data instances..." - echo "RETURN 0;" | mgconsole --host memgraph-data-0.default.svc.cluster.local --port 7687 - echo "RETURN 0;" | mgconsole --host memgraph-data-1.default.svc.cluster.local --port 7687 - - echo "Running a connection test on coordinator instances..." - echo "SHOW INSTANCES;" | mgconsole --host memgraph-coordinator-1.default.svc.cluster.local --port 7687 - echo "SHOW INSTANCES;" | mgconsole --host memgraph-coordinator-2.default.svc.cluster.local --port 7687 - echo "SHOW INSTANCES;" | mgconsole --host memgraph-coordinator-3.default.svc.cluster.local --port 7687 - - - restartPolicy: Never - backoffLimit: 3 diff --git a/helm-charts/memgraph-high-availability/values.yaml b/helm-charts/memgraph-high-availability/values.yaml deleted file mode 100644 index fcbb06c..0000000 --- a/helm-charts/memgraph-high-availability/values.yaml +++ /dev/null @@ -1,100 +0,0 @@ -# Default values for memgraph-high-availability. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates.| - -memgraph: - image: - repository: memgraph/memgraph - tag: 2.18.0 - pullPolicy: IfNotPresent - env: - MEMGRAPH_ENTERPRISE_LICENSE: "" - MEMGRAPH_ORGANIZATION_NAME: "" - probes: - startup: - failureThreshold: 30 - periodSeconds: 10 - readiness: - initialDelaySeconds: 5 - periodSeconds: 5 - liveness: - initialDelaySeconds: 30 - periodSeconds: 10 - data: - volumeClaim: - storagePVCClassName: "" - storagePVC: true - storagePVCSize: "1Gi" - logPVCClassName: "" - logPVC: false - logPVCSize: "256Mi" - coordinators: - volumeClaim: - storagePVCClassName: "" - storagePVC: true - storagePVCSize: "1Gi" - logPVCClassName: "" - logPVC: false - logPVCSize: "256Mi" - - - -data: -- id: "0" - boltPort: 7687 - managementPort: 10000 - replicationPort: 20000 - args: - - "--experimental-enabled=high-availability" - - "--management-port=10000" - - "--bolt-port=7687" - - "--also-log-to-stderr" - - "--log-level=TRACE" - - "--replication-restore-state-on-startup=true" - -- id: "1" - boltPort: 7687 - managementPort: 10000 - replicationPort: 20000 - args: - - "--experimental-enabled=high-availability" - - "--management-port=10000" - - "--bolt-port=7687" - - "--also-log-to-stderr" - - "--log-level=TRACE" - - "--replication-restore-state-on-startup=true" - -coordinators: -- id: "1" - boltPort: 7687 - coordinatorPort: 12000 - args: - - "--experimental-enabled=high-availability" - - "--coordinator-id=1" - - "--coordinator-port=12000" - - "--bolt-port=7687" - - "--also-log-to-stderr" - - "--log-level=TRACE" - - "--coordinator-hostname=memgraph-coordinator-1.default.svc.cluster.local" -- id: "2" - boltPort: 7687 - coordinatorPort: 12000 - args: - - "--experimental-enabled=high-availability" - - "--coordinator-id=2" - - "--coordinator-port=12000" - - "--bolt-port=7687" - - "--also-log-to-stderr" - - "--log-level=TRACE" - - "--coordinator-hostname=memgraph-coordinator-2.default.svc.cluster.local" -- id: "3" - boltPort: 7687 - coordinatorPort: 12000 - args: - - "--experimental-enabled=high-availability" - - "--coordinator-id=3" - - "--coordinator-port=12000" - - "--bolt-port=7687" - - "--also-log-to-stderr" - - "--log-level=TRACE" - - "--coordinator-hostname=memgraph-coordinator-3.default.svc.cluster.local" From 2cb6d88d49bb8be5933701942e2d48c71c2060ad Mon Sep 17 00:00:00 2001 From: Andi Skrgat Date: Tue, 9 Jul 2024 12:48:26 +0200 Subject: [PATCH 03/39] Add partial instructions --- Makefile | 4 ++-- PROJECT | 10 ++++----- README.md | 19 ++++++++++++++++ ...ies.yaml => memgraph.com_memgraphhas.yaml} | 20 ++++++++--------- config/crd/kustomization.yaml | 2 +- config/manager/kustomization.yaml | 6 +++++ ..._role.yaml => memgraphha_editor_role.yaml} | 14 ++++++------ ..._role.yaml => memgraphha_viewer_role.yaml} | 14 ++++++------ config/rbac/role.yaml | 22 +++++++++---------- config/samples/kustomization.yaml | 2 +- ...ility.yaml => memgraph_v1_memgraphha.yaml} | 6 ++--- watches.yaml | 6 ++--- 12 files changed, 75 insertions(+), 50 deletions(-) rename config/crd/bases/{charts.my.domain_memgraphhighavailabilities.yaml => memgraph.com_memgraphhas.yaml} (78%) rename config/rbac/{memgraphhighavailability_editor_role.yaml => memgraphha_editor_role.yaml} (61%) rename config/rbac/{memgraphhighavailability_viewer_role.yaml => memgraphha_viewer_role.yaml} (59%) rename config/samples/{charts_v1alpha1_memgraphhighavailability.yaml => memgraph_v1_memgraphha.yaml} (95%) diff --git a/Makefile b/Makefile index dc86b78..de5c656 100644 --- a/Makefile +++ b/Makefile @@ -29,7 +29,7 @@ BUNDLE_METADATA_OPTS ?= $(BUNDLE_CHANNELS) $(BUNDLE_DEFAULT_CHANNEL) # # For example, running 'make bundle-build bundle-push catalog-build catalog-push' will build and push both # my.domain/kubernetes-operator-bundle:$VERSION and my.domain/kubernetes-operator-catalog:$VERSION. -IMAGE_TAG_BASE ?= my.domain/kubernetes-operator +IMAGE_TAG_BASE ?= andidocker8888/kubernetes-operator # BUNDLE_IMG defines the image:tag used for the bundle. # You can use it as an arg. (E.g make bundle-build BUNDLE_IMG=/:) @@ -51,7 +51,7 @@ endif OPERATOR_SDK_VERSION ?= v1.35.0 # Image URL to use all building/pushing image targets -IMG ?= controller:latest +IMG ?= $(IMAGE_TAG_BASE):$(VERSION) .PHONY: all all: docker-build diff --git a/PROJECT b/PROJECT index 3c5cdeb..ca9ee0b 100644 --- a/PROJECT +++ b/PROJECT @@ -2,7 +2,7 @@ # This file is used to track the info used to scaffold your project # and allow the plugins properly work. # More info: https://book.kubebuilder.io/reference/project-config.html -domain: my.domain +domain: com layout: - helm.sdk.operatorframework.io/v1 plugins: @@ -13,8 +13,8 @@ resources: - api: crdVersion: v1 namespaced: true - domain: my.domain - group: charts - kind: MemgraphHighAvailability - version: v1alpha1 + domain: com + group: memgraph + kind: MemgraphHA + version: v1 version: "3" diff --git a/README.md b/README.md index 7f55641..5bbdf30 100644 --- a/README.md +++ b/README.md @@ -7,6 +7,25 @@ We use Go version 1.22.5. Check out here how to [install Go](https://go.dev/doc/ ## Installation +```bash +git clone git@github.com:memgraph/kubernetes-operator.git +git checkout helm-operator (until merged) +git submodule init +git submodule update +``` + +Change andidocker8888 to your domain name to download latest kubernetes operator. +(This is just for developers, users will be able to download this from our DockerHub.) +```bash +make docker-build docker-push +make deploy +``` + +After following steps above you should be able to get the following output: + + + + diff --git a/config/crd/bases/charts.my.domain_memgraphhighavailabilities.yaml b/config/crd/bases/memgraph.com_memgraphhas.yaml similarity index 78% rename from config/crd/bases/charts.my.domain_memgraphhighavailabilities.yaml rename to config/crd/bases/memgraph.com_memgraphhas.yaml index 3973c7d..91f4c22 100644 --- a/config/crd/bases/charts.my.domain_memgraphhighavailabilities.yaml +++ b/config/crd/bases/memgraph.com_memgraphhas.yaml @@ -2,20 +2,20 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: memgraphhighavailabilities.charts.my.domain + name: memgraphhas.memgraph.com spec: - group: charts.my.domain + group: memgraph.com names: - kind: MemgraphHighAvailability - listKind: MemgraphHighAvailabilityList - plural: memgraphhighavailabilities - singular: memgraphhighavailability + kind: MemgraphHA + listKind: MemgraphHAList + plural: memgraphhas + singular: memgraphha scope: Namespaced versions: - - name: v1alpha1 + - name: v1 schema: openAPIV3Schema: - description: MemgraphHighAvailability is the Schema for the memgraphhighavailabilities API + description: MemgraphHA is the Schema for the memgraphhas API properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation @@ -30,11 +30,11 @@ spec: metadata: type: object spec: - description: Spec defines the desired state of MemgraphHighAvailability + description: Spec defines the desired state of MemgraphHA type: object x-kubernetes-preserve-unknown-fields: true status: - description: Status defines the observed state of MemgraphHighAvailability + description: Status defines the observed state of MemgraphHA type: object x-kubernetes-preserve-unknown-fields: true type: object diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index bb22f5e..a7ddb23 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -2,5 +2,5 @@ # since it depends on service name and namespace that are out of this kustomize package. # It should be run by config/default resources: -- bases/charts.my.domain_memgraphhighavailabilities.yaml +- bases/memgraph.com_memgraphhas.yaml #+kubebuilder:scaffold:crdkustomizeresource diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index 5c5f0b8..e6fa018 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -1,2 +1,8 @@ resources: - manager.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +images: +- name: controller + newName: andidocker8888/kubernetes-operator + newTag: 0.0.1 diff --git a/config/rbac/memgraphhighavailability_editor_role.yaml b/config/rbac/memgraphha_editor_role.yaml similarity index 61% rename from config/rbac/memgraphhighavailability_editor_role.yaml rename to config/rbac/memgraphha_editor_role.yaml index 06dd3a6..e54bf20 100644 --- a/config/rbac/memgraphhighavailability_editor_role.yaml +++ b/config/rbac/memgraphha_editor_role.yaml @@ -1,20 +1,20 @@ -# permissions for end users to edit memgraphhighavailabilities. +# permissions for end users to edit memgraphhas. apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: app.kubernetes.io/name: clusterrole - app.kubernetes.io/instance: memgraphhighavailability-editor-role + app.kubernetes.io/instance: memgraphha-editor-role app.kubernetes.io/component: rbac app.kubernetes.io/created-by: kubernetes-operator app.kubernetes.io/part-of: kubernetes-operator app.kubernetes.io/managed-by: kustomize - name: memgraphhighavailability-editor-role + name: memgraphha-editor-role rules: - apiGroups: - - charts.my.domain + - memgraph.com resources: - - memgraphhighavailabilities + - memgraphhas verbs: - create - delete @@ -24,8 +24,8 @@ rules: - update - watch - apiGroups: - - charts.my.domain + - memgraph.com resources: - - memgraphhighavailabilities/status + - memgraphhas/status verbs: - get diff --git a/config/rbac/memgraphhighavailability_viewer_role.yaml b/config/rbac/memgraphha_viewer_role.yaml similarity index 59% rename from config/rbac/memgraphhighavailability_viewer_role.yaml rename to config/rbac/memgraphha_viewer_role.yaml index e8f8a07..769adfd 100644 --- a/config/rbac/memgraphhighavailability_viewer_role.yaml +++ b/config/rbac/memgraphha_viewer_role.yaml @@ -1,27 +1,27 @@ -# permissions for end users to view memgraphhighavailabilities. +# permissions for end users to view memgraphhas. apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: app.kubernetes.io/name: clusterrole - app.kubernetes.io/instance: memgraphhighavailability-viewer-role + app.kubernetes.io/instance: memgraphha-viewer-role app.kubernetes.io/component: rbac app.kubernetes.io/created-by: kubernetes-operator app.kubernetes.io/part-of: kubernetes-operator app.kubernetes.io/managed-by: kustomize - name: memgraphhighavailability-viewer-role + name: memgraphha-viewer-role rules: - apiGroups: - - charts.my.domain + - memgraph.com resources: - - memgraphhighavailabilities + - memgraphhas verbs: - get - list - watch - apiGroups: - - charts.my.domain + - memgraph.com resources: - - memgraphhighavailabilities/status + - memgraphhas/status verbs: - get diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index f94366c..94d9bfa 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -29,14 +29,14 @@ rules: - create ## -## Rules for charts.my.domain/v1alpha1, Kind: MemgraphHighAvailability +## Rules for memgraph.com/v1, Kind: MemgraphHA ## - apiGroups: - - charts.my.domain + - memgraph.com resources: - - memgraphhighavailabilities - - memgraphhighavailabilities/status - - memgraphhighavailabilities/finalizers + - memgraphhas + - memgraphhas/status + - memgraphhas/finalizers verbs: - create - delete @@ -48,20 +48,20 @@ rules: - verbs: - "*" apiGroups: - - "batch" + - "apps" resources: - - "jobs" + - "statefulsets" - verbs: - "*" apiGroups: - - "" + - "batch" resources: - - "services" + - "jobs" - verbs: - "*" apiGroups: - - "apps" + - "" resources: - - "statefulsets" + - "services" #+kubebuilder:scaffold:rules diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index 53a74af..9361e5a 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -1,4 +1,4 @@ ## Append samples of your project ## resources: -- charts_v1alpha1_memgraphhighavailability.yaml +- memgraph_v1_memgraphha.yaml #+kubebuilder:scaffold:manifestskustomizesamples diff --git a/config/samples/charts_v1alpha1_memgraphhighavailability.yaml b/config/samples/memgraph_v1_memgraphha.yaml similarity index 95% rename from config/samples/charts_v1alpha1_memgraphhighavailability.yaml rename to config/samples/memgraph_v1_memgraphha.yaml index 430f390..56da207 100644 --- a/config/samples/charts_v1alpha1_memgraphhighavailability.yaml +++ b/config/samples/memgraph_v1_memgraphha.yaml @@ -1,7 +1,7 @@ -apiVersion: charts.my.domain/v1alpha1 -kind: MemgraphHighAvailability +apiVersion: memgraph.com/v1 +kind: MemgraphHA metadata: - name: memgraphhighavailability-sample + name: memgraphha-sample spec: # Default values copied from /helm-charts/memgraph-high-availability/values.yaml coordinators: diff --git a/watches.yaml b/watches.yaml index ee774bd..d6eecb9 100644 --- a/watches.yaml +++ b/watches.yaml @@ -1,6 +1,6 @@ # Use the 'create api' subcommand to add watches to this file. -- group: charts.my.domain - version: v1alpha1 - kind: MemgraphHighAvailability +- group: memgraph.com + version: v1 + kind: MemgraphHA chart: helm-charts/memgraph-high-availability #+kubebuilder:scaffold:watch From 5d22d63cf88b161eb1600f4367998b35c104fffe Mon Sep 17 00:00:00 2001 From: Andi Date: Tue, 9 Jul 2024 12:49:49 +0200 Subject: [PATCH 04/39] Update README.md --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 5bbdf30..6a4752c 100644 --- a/README.md +++ b/README.md @@ -21,7 +21,8 @@ make docker-build docker-push make deploy ``` -After following steps above you should be able to get the following output: +After following steps above you should be able to see `kubernetes-operator-controller-manager` in `kubernetes-operator-system` namespace: +![image](https://github.com/memgraph/kubernetes-operator/assets/53269502/a4fc70fe-ef5b-4541-afd8-3ad3ee43a070) From d2b3cab20b66904719969c07c3c60571602a2097 Mon Sep 17 00:00:00 2001 From: Andi Date: Tue, 9 Jul 2024 12:57:42 +0200 Subject: [PATCH 05/39] Update README.md --- README.md | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 6a4752c..d378ed6 100644 --- a/README.md +++ b/README.md @@ -21,12 +21,17 @@ make docker-build docker-push make deploy ``` -After following steps above you should be able to see `kubernetes-operator-controller-manager` in `kubernetes-operator-system` namespace: +After following steps above you should be able to see `kubernetes-operator-controller-manager` deployment in `kubernetes-operator-system` namespace: ![image](https://github.com/memgraph/kubernetes-operator/assets/53269502/a4fc70fe-ef5b-4541-afd8-3ad3ee43a070) +This also causes creating `kubernetes-operator-controller-manager-768d9db99b-xs6hk` pod with 2 containers in `kubernetes-operator-system` namespace: +![image](https://github.com/memgraph/kubernetes-operator/assets/53269502/7220c1bd-588c-4662-b696-d43b3085eac3) +If you position yourself into operator-controller-manager pod with: +`kubectl exec -it -n kubernetes-operator-system kubernetes-operator-controller-manager-768d9db99b-xs6hk bash` and run ls, you should be able to see two items: `helm-charts` directory and watches.yaml. + From e36a5e14960bd45f80e2f1cb0c248993bdc0517c Mon Sep 17 00:00:00 2001 From: Andi Date: Tue, 9 Jul 2024 13:12:01 +0200 Subject: [PATCH 06/39] Update README.md --- README.md | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index d378ed6..6a35e6c 100644 --- a/README.md +++ b/README.md @@ -28,10 +28,23 @@ This also causes creating `kubernetes-operator-controller-manager-768d9db99b-xs6 ![image](https://github.com/memgraph/kubernetes-operator/assets/53269502/7220c1bd-588c-4662-b696-d43b3085eac3) - If you position yourself into operator-controller-manager pod with: `kubectl exec -it -n kubernetes-operator-system kubernetes-operator-controller-manager-768d9db99b-xs6hk bash` and run ls, you should be able to see two items: `helm-charts` directory and watches.yaml. +Go to `config/samples/memgraph_v1_memghraphha.yaml` and provide your license information by setting `MEMGRAPH_ORGANIZATION_NAME` and `MEMGRAPH_ENTERPRISE_LICENSE`. + +After approx. 60s, you should be able to see your cluster running with `kubectl get pods -A`: +![image](https://github.com/memgraph/kubernetes-operator/assets/53269502/069e2079-03f2-4827-83c1-b06a338b63e4) + +Find URL of any coordinator instances: +![Screenshot from 2024-07-09 13-10-42](https://github.com/memgraph/kubernetes-operator/assets/53269502/fbc2d487-e258-4613-bc85-0484bcf2e0dd) + +and connect to see the state of the cluster: +![image](https://github.com/memgraph/kubernetes-operator/assets/53269502/c68d52e2-19f7-4e45-8ff0-fc2ee662c64b) + + + + From b14ab60e10f30e6ca01fe8105fe856d81dd39651 Mon Sep 17 00:00:00 2001 From: Andi Date: Tue, 9 Jul 2024 14:32:11 +0200 Subject: [PATCH 07/39] Update README.md --- README.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/README.md b/README.md index 6a35e6c..290aa41 100644 --- a/README.md +++ b/README.md @@ -42,6 +42,19 @@ Find URL of any coordinator instances: and connect to see the state of the cluster: ![image](https://github.com/memgraph/kubernetes-operator/assets/53269502/c68d52e2-19f7-4e45-8ff0-fc2ee662c64b) +Let's say I want to change `--log-level` flag for coordinators with id 1 and 2. I can do that in the following way: +![image](https://github.com/memgraph/kubernetes-operator/assets/53269502/87ff43e7-f5b1-4764-9fed-4d87758b0f77) + +Only pods corresponding to these 2 coordinators will get restarted: +![image](https://github.com/memgraph/kubernetes-operator/assets/53269502/930ac553-31ad-4230-9e2e-f67858f3fe25) + + + + + + + + From 9793a39aa2f1e6ce95e3565c23f39567a818bc16 Mon Sep 17 00:00:00 2001 From: Andi Date: Tue, 9 Jul 2024 14:34:40 +0200 Subject: [PATCH 08/39] Update README.md --- README.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/README.md b/README.md index 290aa41..e64d1af 100644 --- a/README.md +++ b/README.md @@ -48,6 +48,13 @@ Let's say I want to change `--log-level` flag for coordinators with id 1 and 2. Only pods corresponding to these 2 coordinators will get restarted: ![image](https://github.com/memgraph/kubernetes-operator/assets/53269502/930ac553-31ad-4230-9e2e-f67858f3fe25) +## Clear resources + +``` +kubectl delete -f config/samples/memgraph_v1_memgraphha.yaml +make undeploy +``` + From 363051136f08e74a52e8341cad68c6e1ac3a802f Mon Sep 17 00:00:00 2001 From: Andi Skrgat Date: Wed, 10 Jul 2024 09:14:09 +0200 Subject: [PATCH 09/39] Add helmify target --- Makefile | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/Makefile b/Makefile index de5c656..a95961d 100644 --- a/Makefile +++ b/Makefile @@ -225,3 +225,14 @@ catalog-build: opm ## Build a catalog image. .PHONY: catalog-push catalog-push: ## Push a catalog image. $(MAKE) docker-push IMG=$(CATALOG_IMG) + +HELMIFY ?= helmify + +.PHONY: helmify +helmify: $(HELMIFY) ## Download helmify locally if necessary. +$(HELMIFY): $(LOCALBIN) + test -s $(LOCALBIN)/helmify || GOBIN=$(LOCALBIN) go install github.com/arttor/helmify/cmd/helmify@latest + +helm: config/manifests kustomize helmify + $(KUSTOMIZE) build config/default | $(HELMIFY) + From 99966fc2dc46cb54a7b0c983fde65c8c5f7be075 Mon Sep 17 00:00:00 2001 From: Andi Skrgat Date: Wed, 10 Jul 2024 10:54:16 +0200 Subject: [PATCH 10/39] e2e setup --- Makefile | 4 ++-- README.md | 7 +------ config/manager/kustomization.yaml | 4 ++-- config/samples/memgraph_v1_memgraphha.yaml | 4 ++-- watches.yaml | 2 +- 5 files changed, 8 insertions(+), 13 deletions(-) diff --git a/Makefile b/Makefile index a95961d..83ed94a 100644 --- a/Makefile +++ b/Makefile @@ -3,7 +3,7 @@ # To re-generate a bundle for another specific version without changing the standard setup, you can: # - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2) # - use environment variables to overwrite this value (e.g export VERSION=0.0.2) -VERSION ?= 0.0.1 +VERSION ?= 0.0.2 # CHANNELS define the bundle channels used in the bundle. # Add a new line here if you would like to change its default config. (E.g CHANNELS = "candidate,fast,stable") @@ -29,7 +29,7 @@ BUNDLE_METADATA_OPTS ?= $(BUNDLE_CHANNELS) $(BUNDLE_DEFAULT_CHANNEL) # # For example, running 'make bundle-build bundle-push catalog-build catalog-push' will build and push both # my.domain/kubernetes-operator-bundle:$VERSION and my.domain/kubernetes-operator-catalog:$VERSION. -IMAGE_TAG_BASE ?= andidocker8888/kubernetes-operator +IMAGE_TAG_BASE ?= memgraph/kubernetes-operator # BUNDLE_IMG defines the image:tag used for the bundle. # You can use it as an arg. (E.g make bundle-build BUNDLE_IMG=/:) diff --git a/README.md b/README.md index e64d1af..fd3adff 100644 --- a/README.md +++ b/README.md @@ -12,12 +12,6 @@ git clone git@github.com:memgraph/kubernetes-operator.git git checkout helm-operator (until merged) git submodule init git submodule update -``` - -Change andidocker8888 to your domain name to download latest kubernetes operator. -(This is just for developers, users will be able to download this from our DockerHub.) -```bash -make docker-build docker-push make deploy ``` @@ -32,6 +26,7 @@ If you position yourself into operator-controller-manager pod with: `kubectl exec -it -n kubernetes-operator-system kubernetes-operator-controller-manager-768d9db99b-xs6hk bash` and run ls, you should be able to see two items: `helm-charts` directory and watches.yaml. Go to `config/samples/memgraph_v1_memghraphha.yaml` and provide your license information by setting `MEMGRAPH_ORGANIZATION_NAME` and `MEMGRAPH_ENTERPRISE_LICENSE`. +Start Memgraph HA cluster with `kubectl apply -f config/samples/memgraph_v1_memgraphha.yaml`. After approx. 60s, you should be able to see your cluster running with `kubectl get pods -A`: ![image](https://github.com/memgraph/kubernetes-operator/assets/53269502/069e2079-03f2-4827-83c1-b06a338b63e4) diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index e6fa018..c3e4459 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -4,5 +4,5 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization images: - name: controller - newName: andidocker8888/kubernetes-operator - newTag: 0.0.1 + newName: memgraph/kubernetes-operator + newTag: 0.0.2 diff --git a/config/samples/memgraph_v1_memgraphha.yaml b/config/samples/memgraph_v1_memgraphha.yaml index 56da207..802b8ec 100644 --- a/config/samples/memgraph_v1_memgraphha.yaml +++ b/config/samples/memgraph_v1_memgraphha.yaml @@ -79,8 +79,8 @@ spec: storagePVCClassName: "" storagePVCSize: 1Gi env: - MEMGRAPH_ENTERPRISE_LICENSE: - MEMGRAPH_ORGANIZATION_NAME: + MEMGRAPH_ENTERPRISE_LICENSE: "" + MEMGRAPH_ORGANIZATION_NAME: "" image: pullPolicy: IfNotPresent repository: memgraph/memgraph diff --git a/watches.yaml b/watches.yaml index d6eecb9..efed17c 100644 --- a/watches.yaml +++ b/watches.yaml @@ -2,5 +2,5 @@ - group: memgraph.com version: v1 kind: MemgraphHA - chart: helm-charts/memgraph-high-availability + chart: helm-charts/charts/memgraph-high-availability #+kubebuilder:scaffold:watch From abf4f591756116fc4edb93b04bd647c867710a42 Mon Sep 17 00:00:00 2001 From: Andi Skrgat Date: Wed, 10 Jul 2024 11:12:20 +0200 Subject: [PATCH 11/39] UX friendly name --- README.md | 4 ++-- config/samples/kustomization.yaml | 2 +- .../{memgraph_v1_memgraphha.yaml => memgraph_v1_ha.yaml} | 0 watches.yaml | 8 ++++---- 4 files changed, 7 insertions(+), 7 deletions(-) rename config/samples/{memgraph_v1_memgraphha.yaml => memgraph_v1_ha.yaml} (100%) diff --git a/README.md b/README.md index fd3adff..e95bae2 100644 --- a/README.md +++ b/README.md @@ -26,7 +26,7 @@ If you position yourself into operator-controller-manager pod with: `kubectl exec -it -n kubernetes-operator-system kubernetes-operator-controller-manager-768d9db99b-xs6hk bash` and run ls, you should be able to see two items: `helm-charts` directory and watches.yaml. Go to `config/samples/memgraph_v1_memghraphha.yaml` and provide your license information by setting `MEMGRAPH_ORGANIZATION_NAME` and `MEMGRAPH_ENTERPRISE_LICENSE`. -Start Memgraph HA cluster with `kubectl apply -f config/samples/memgraph_v1_memgraphha.yaml`. +Start Memgraph HA cluster with `kubectl apply -f config/samples/memgraph_v1_ha.yaml`. After approx. 60s, you should be able to see your cluster running with `kubectl get pods -A`: ![image](https://github.com/memgraph/kubernetes-operator/assets/53269502/069e2079-03f2-4827-83c1-b06a338b63e4) @@ -46,7 +46,7 @@ Only pods corresponding to these 2 coordinators will get restarted: ## Clear resources ``` -kubectl delete -f config/samples/memgraph_v1_memgraphha.yaml +kubectl delete -f config/samples/memgraph_v1_ha.yaml make undeploy ``` diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index 9361e5a..a483d76 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -1,4 +1,4 @@ ## Append samples of your project ## resources: -- memgraph_v1_memgraphha.yaml +- memgraph_v1_ha.yaml #+kubebuilder:scaffold:manifestskustomizesamples diff --git a/config/samples/memgraph_v1_memgraphha.yaml b/config/samples/memgraph_v1_ha.yaml similarity index 100% rename from config/samples/memgraph_v1_memgraphha.yaml rename to config/samples/memgraph_v1_ha.yaml diff --git a/watches.yaml b/watches.yaml index efed17c..3078f4f 100644 --- a/watches.yaml +++ b/watches.yaml @@ -1,6 +1,6 @@ # Use the 'create api' subcommand to add watches to this file. -- group: memgraph.com - version: v1 - kind: MemgraphHA - chart: helm-charts/charts/memgraph-high-availability +- group: memgraph.com # The group of the Custom Resource that will be watched + version: v1 # The version of the Custom Resource that will be watched + kind: MemgraphHA # The kind of the Custom Resource that will be watched + chart: helm-charts/charts/memgraph-high-availability # Path to the chart that will be used when reconciling #+kubebuilder:scaffold:watch From b22809ebf6e879047ab0083711fed80a8bd56700 Mon Sep 17 00:00:00 2001 From: Andi Skrgat Date: Wed, 10 Jul 2024 11:32:34 +0200 Subject: [PATCH 12/39] Docs --- README.md | 66 ++++++++------------------------------------ docs/installation.md | 44 +++++++++++++++++++++++++++++ 2 files changed, 55 insertions(+), 55 deletions(-) create mode 100644 docs/installation.md diff --git a/README.md b/README.md index e95bae2..6b3fb6c 100644 --- a/README.md +++ b/README.md @@ -1,65 +1,21 @@ # Memgraph Kubernetes Operator -## Prerequisites - -We use Go version 1.22.5. Check out here how to [install Go](https://go.dev/doc/install). Helm version is v3.14.4. - - -## Installation - -```bash -git clone git@github.com:memgraph/kubernetes-operator.git -git checkout helm-operator (until merged) -git submodule init -git submodule update -make deploy -``` - -After following steps above you should be able to see `kubernetes-operator-controller-manager` deployment in `kubernetes-operator-system` namespace: -![image](https://github.com/memgraph/kubernetes-operator/assets/53269502/a4fc70fe-ef5b-4541-afd8-3ad3ee43a070) - -This also causes creating `kubernetes-operator-controller-manager-768d9db99b-xs6hk` pod with 2 containers in `kubernetes-operator-system` namespace: -![image](https://github.com/memgraph/kubernetes-operator/assets/53269502/7220c1bd-588c-4662-b696-d43b3085eac3) - - -If you position yourself into operator-controller-manager pod with: -`kubectl exec -it -n kubernetes-operator-system kubernetes-operator-controller-manager-768d9db99b-xs6hk bash` and run ls, you should be able to see two items: `helm-charts` directory and watches.yaml. - -Go to `config/samples/memgraph_v1_memghraphha.yaml` and provide your license information by setting `MEMGRAPH_ORGANIZATION_NAME` and `MEMGRAPH_ENTERPRISE_LICENSE`. -Start Memgraph HA cluster with `kubectl apply -f config/samples/memgraph_v1_ha.yaml`. - -After approx. 60s, you should be able to see your cluster running with `kubectl get pods -A`: -![image](https://github.com/memgraph/kubernetes-operator/assets/53269502/069e2079-03f2-4827-83c1-b06a338b63e4) - -Find URL of any coordinator instances: -![Screenshot from 2024-07-09 13-10-42](https://github.com/memgraph/kubernetes-operator/assets/53269502/fbc2d487-e258-4613-bc85-0484bcf2e0dd) - -and connect to see the state of the cluster: -![image](https://github.com/memgraph/kubernetes-operator/assets/53269502/c68d52e2-19f7-4e45-8ff0-fc2ee662c64b) - -Let's say I want to change `--log-level` flag for coordinators with id 1 and 2. I can do that in the following way: -![image](https://github.com/memgraph/kubernetes-operator/assets/53269502/87ff43e7-f5b1-4764-9fed-4d87758b0f77) - -Only pods corresponding to these 2 coordinators will get restarted: -![image](https://github.com/memgraph/kubernetes-operator/assets/53269502/930ac553-31ad-4230-9e2e-f67858f3fe25) - -## Clear resources - -``` -kubectl delete -f config/samples/memgraph_v1_ha.yaml -make undeploy -``` - - - - - - +## Table of Contents +- [Prerequisites](#prerequisites) +- [Documentation](#documentation) +- [License](#license) +## Prerequisites +We use Go version 1.22.5. Check out here how to [install Go](https://go.dev/doc/install). Helm version is v3.14.4. +## Documentation +Check our [Documentation](/docs) to start using our Kubernetes operator. +1. [Install the Memgraph Kubernetes Operator](docs/installation.md) +## License +Please check the [LICENSE]() file diff --git a/docs/installation.md b/docs/installation.md new file mode 100644 index 0000000..2c24b20 --- /dev/null +++ b/docs/installation.md @@ -0,0 +1,44 @@ +# Install Memgraph Kubernetes Operator + + +```bash +git clone git@github.com:memgraph/kubernetes-operator.git +git submodule init +git submodule update +make deploy +``` + +After following steps above you should be able to see `kubernetes-operator-controller-manager` deployment in `kubernetes-operator-system` namespace: +![image](https://github.com/memgraph/kubernetes-operator/assets/53269502/a4fc70fe-ef5b-4541-afd8-3ad3ee43a070) + +This also causes creating `kubernetes-operator-controller-manager-768d9db99b-xs6hk` pod with 2 containers in `kubernetes-operator-system` namespace: +![image](https://github.com/memgraph/kubernetes-operator/assets/53269502/7220c1bd-588c-4662-b696-d43b3085eac3) + + +If you position yourself into operator-controller-manager pod with: +`kubectl exec -it -n kubernetes-operator-system kubernetes-operator-controller-manager-768d9db99b-xs6hk bash` and run ls, you should be able to see two items: `helm-charts` directory and watches.yaml. + +Go to `config/samples/memgraph_v1_memghraphha.yaml` and provide your license information by setting `MEMGRAPH_ORGANIZATION_NAME` and `MEMGRAPH_ENTERPRISE_LICENSE`. +Start Memgraph HA cluster with `kubectl apply -f config/samples/memgraph_v1_ha.yaml`. + +After approx. 60s, you should be able to see your cluster running with `kubectl get pods -A`: +![image](https://github.com/memgraph/kubernetes-operator/assets/53269502/069e2079-03f2-4827-83c1-b06a338b63e4) + +Find URL of any coordinator instances: +![Screenshot from 2024-07-09 13-10-42](https://github.com/memgraph/kubernetes-operator/assets/53269502/fbc2d487-e258-4613-bc85-0484bcf2e0dd) + +and connect to see the state of the cluster: +![image](https://github.com/memgraph/kubernetes-operator/assets/53269502/c68d52e2-19f7-4e45-8ff0-fc2ee662c64b) + +Let's say I want to change `--log-level` flag for coordinators with id 1 and 2. I can do that in the following way: +![image](https://github.com/memgraph/kubernetes-operator/assets/53269502/87ff43e7-f5b1-4764-9fed-4d87758b0f77) + +Only pods corresponding to these 2 coordinators will get restarted: +![image](https://github.com/memgraph/kubernetes-operator/assets/53269502/930ac553-31ad-4230-9e2e-f67858f3fe25) + +## Clear resources + +``` +kubectl delete -f config/samples/memgraph_v1_ha.yaml +make undeploy +``` From 2130e47fd7c4775ddf029c69d6be7504aa866c2b Mon Sep 17 00:00:00 2001 From: Andi Skrgat Date: Wed, 10 Jul 2024 11:47:28 +0200 Subject: [PATCH 13/39] Improve docs --- README.md | 10 ++++++++-- docs/installation.md | 43 ++++++++++++++++++++++++++++++------------- 2 files changed, 38 insertions(+), 15 deletions(-) diff --git a/README.md b/README.md index 6b3fb6c..eb1e63a 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,10 @@ # Memgraph Kubernetes Operator +## Introduction + +Memgraph Kubernetes Operator is WIP. You can currently install the operator and manage the deployment of Memgraph's High Availability cluster +through it. + ## Table of Contents - [Prerequisites](#prerequisites) @@ -8,7 +13,8 @@ ## Prerequisites -We use Go version 1.22.5. Check out here how to [install Go](https://go.dev/doc/install). Helm version is v3.14.4. +We use Go version 1.22.5 (not needed at the moment). Check out here how to [install Go](https://go.dev/doc/install). +Current Helm version is v3.14.4. ## Documentation @@ -18,4 +24,4 @@ Check our [Documentation](/docs) to start using our Kubernetes operator. ## License -Please check the [LICENSE]() file +Please check the [LICENSE](LICENSE) file diff --git a/docs/installation.md b/docs/installation.md index 2c24b20..41b0a0e 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -1,24 +1,47 @@ # Install Memgraph Kubernetes Operator +## Install + +Make sure to clone this repository with its submodule (helm-charts). + +```bash +git clone --recurse-submodules git@github.com:memgraph/kubernetes-operator.git +``` + +After this you can start your operator with a single command: ```bash -git clone git@github.com:memgraph/kubernetes-operator.git -git submodule init -git submodule update make deploy ``` -After following steps above you should be able to see `kubernetes-operator-controller-manager` deployment in `kubernetes-operator-system` namespace: +This command will use operator's image from Memgraph's DockerHub and create all necessary Kubernetes resources for running an operator. + +## Verify installation + +To verify that deployment finished successfully please run: + +```bash +kubectl get deployments -A + +``` + +and you should be able to see `kubernetes-operator-controller-manager` deployment in `kubernetes-operator-system` namespace: ![image](https://github.com/memgraph/kubernetes-operator/assets/53269502/a4fc70fe-ef5b-4541-afd8-3ad3ee43a070) -This also causes creating `kubernetes-operator-controller-manager-768d9db99b-xs6hk` pod with 2 containers in `kubernetes-operator-system` namespace: +Together with the deployments `kubernetes-operator-controller-manager-768d9db99b-xs6hk` pod with 2 containers in +`kubernetes-operator-system` namespace should also get created and you can verify this with: ![image](https://github.com/memgraph/kubernetes-operator/assets/53269502/7220c1bd-588c-4662-b696-d43b3085eac3) If you position yourself into operator-controller-manager pod with: -`kubectl exec -it -n kubernetes-operator-system kubernetes-operator-controller-manager-768d9db99b-xs6hk bash` and run ls, you should be able to see two items: `helm-charts` directory and watches.yaml. +`kubectl exec -it -n kubernetes-operator-system kubernetes-operator-controller-manager-768d9db99b-xs6hk bash` and run ls, +you should be able to see two items: `helm-charts` directory and watches.yaml. + +## Start Memgraph High Availability Cluster + +We already provide sample cluster in `config/samples/memgraph_v1_ha.yaml`. You only need to provide your license information by setting +`MEMGRAPH_ORGANIZATION_NAME` and `MEMGRAPH_ENTERPRISE_LICENSE` environment variables. -Go to `config/samples/memgraph_v1_memghraphha.yaml` and provide your license information by setting `MEMGRAPH_ORGANIZATION_NAME` and `MEMGRAPH_ENTERPRISE_LICENSE`. Start Memgraph HA cluster with `kubectl apply -f config/samples/memgraph_v1_ha.yaml`. After approx. 60s, you should be able to see your cluster running with `kubectl get pods -A`: @@ -30,12 +53,6 @@ Find URL of any coordinator instances: and connect to see the state of the cluster: ![image](https://github.com/memgraph/kubernetes-operator/assets/53269502/c68d52e2-19f7-4e45-8ff0-fc2ee662c64b) -Let's say I want to change `--log-level` flag for coordinators with id 1 and 2. I can do that in the following way: -![image](https://github.com/memgraph/kubernetes-operator/assets/53269502/87ff43e7-f5b1-4764-9fed-4d87758b0f77) - -Only pods corresponding to these 2 coordinators will get restarted: -![image](https://github.com/memgraph/kubernetes-operator/assets/53269502/930ac553-31ad-4230-9e2e-f67858f3fe25) - ## Clear resources ``` From b8a35220c5a4ab9fdf80b466e7e939c429cd16ac Mon Sep 17 00:00:00 2001 From: Andi Skrgat Date: Wed, 10 Jul 2024 11:49:41 +0200 Subject: [PATCH 14/39] Improve docs --- README.md | 2 +- docs/installation.md | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index eb1e63a..7320903 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,7 @@ through it. ## Prerequisites We use Go version 1.22.5 (not needed at the moment). Check out here how to [install Go](https://go.dev/doc/install). -Current Helm version is v3.14.4. +The current Helm version is v3.14.4. ## Documentation diff --git a/docs/installation.md b/docs/installation.md index 41b0a0e..6effd96 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -18,7 +18,7 @@ This command will use operator's image from Memgraph's DockerHub and create all ## Verify installation -To verify that deployment finished successfully please run: +To verify that deployment ran successfully please run: ```bash kubectl get deployments -A @@ -28,18 +28,18 @@ kubectl get deployments -A and you should be able to see `kubernetes-operator-controller-manager` deployment in `kubernetes-operator-system` namespace: ![image](https://github.com/memgraph/kubernetes-operator/assets/53269502/a4fc70fe-ef5b-4541-afd8-3ad3ee43a070) -Together with the deployments `kubernetes-operator-controller-manager-768d9db99b-xs6hk` pod with 2 containers in +Together with the deployment, `kubernetes-operator-controller-manager-768d9db99b-xs6hk` pod with 2 containers in `kubernetes-operator-system` namespace should also get created and you can verify this with: ![image](https://github.com/memgraph/kubernetes-operator/assets/53269502/7220c1bd-588c-4662-b696-d43b3085eac3) If you position yourself into operator-controller-manager pod with: `kubectl exec -it -n kubernetes-operator-system kubernetes-operator-controller-manager-768d9db99b-xs6hk bash` and run ls, -you should be able to see two items: `helm-charts` directory and watches.yaml. +you should be able to see two items: `helm-charts` directory and `watches.yaml` file. ## Start Memgraph High Availability Cluster -We already provide sample cluster in `config/samples/memgraph_v1_ha.yaml`. You only need to provide your license information by setting +We already provide sample cluster in `config/samples/memgraph_v1_ha.yaml`. You only need to set your license information by setting `MEMGRAPH_ORGANIZATION_NAME` and `MEMGRAPH_ENTERPRISE_LICENSE` environment variables. Start Memgraph HA cluster with `kubectl apply -f config/samples/memgraph_v1_ha.yaml`. From 615037e6b7e6f414e644ab92c1fc022b3d48ec34 Mon Sep 17 00:00:00 2001 From: Andi Skrgat Date: Wed, 10 Jul 2024 13:29:24 +0200 Subject: [PATCH 15/39] Add Kube-linter --- .github/config_files/config_lint.yaml | 8 ++++++ .github/workflows/kubelint.yaml | 41 +++++++++++++++++++++++++++ docs/installation.md | 11 +++++-- 3 files changed, 58 insertions(+), 2 deletions(-) create mode 100644 .github/config_files/config_lint.yaml create mode 100644 .github/workflows/kubelint.yaml diff --git a/.github/config_files/config_lint.yaml b/.github/config_files/config_lint.yaml new file mode 100644 index 0000000..13789a7 --- /dev/null +++ b/.github/config_files/config_lint.yaml @@ -0,0 +1,8 @@ +checks: + addAllBuiltIn: true + exclude: + - "non-existent-service-account" # because the service account is created in another file + - "minimum-three-replicas" # because the deployment contains only 1 replica of the operator + - "no-liveness-probe" # not necessary + - "no-readiness-probe" # no necessary + - "use-namespace" diff --git a/.github/workflows/kubelint.yaml b/.github/workflows/kubelint.yaml new file mode 100644 index 0000000..60983ce --- /dev/null +++ b/.github/workflows/kubelint.yaml @@ -0,0 +1,41 @@ +name: Kubelinter-check + +on: + push: + branches: + - main + paths-ignore: + - docs/** + pull_request: + branches: + - main + workflow_dispatch: {} + +jobs: + Kubelinter-check: + name: Run Kube-linter check + runs-on: ubuntu-latest + steps: + - name: Checkout Code + uses: actions/checkout@v2 + + - name: Scan directory ./config/manager/ with kube-linter + uses: stackrox/kube-linter-action@v1.0.3 + with: + directory: config/manager/manager.yaml + config: ${GITHUB_WORKSPACE}/.github/config_files/config_lint.yaml + version: "48442350" + + - name: Scan directory ./config/samples/ with kube-linter + uses: stackrox/kube-linter-action@v1.0.3 + with: + directory: config/samples + config: ${GITHUB_WORKSPACE}/.github/config_files/config_lint.yaml + version: "48442350" + + - name: Scan directory ./config/crd/ with kube-linter + uses: stackrox/kube-linter-action@v1.0.3 + with: + directory: config/crd + config: ${GITHUB_WORKSPACE}/.github/config_files/config_lint.yaml + version: "48442350" diff --git a/docs/installation.md b/docs/installation.md index 6effd96..616eda3 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -1,6 +1,6 @@ # Install Memgraph Kubernetes Operator -## Install +## Option I: Install using Makefile Make sure to clone this repository with its submodule (helm-charts). @@ -16,13 +16,19 @@ make deploy This command will use operator's image from Memgraph's DockerHub and create all necessary Kubernetes resources for running an operator. +## Option II: Install using Helm Charts + +```bash +cd helm-charts +helm install kubernetes-operator charts/kubernetes-operator +``` + ## Verify installation To verify that deployment ran successfully please run: ```bash kubectl get deployments -A - ``` and you should be able to see `kubernetes-operator-controller-manager` deployment in `kubernetes-operator-system` namespace: @@ -53,6 +59,7 @@ Find URL of any coordinator instances: and connect to see the state of the cluster: ![image](https://github.com/memgraph/kubernetes-operator/assets/53269502/c68d52e2-19f7-4e45-8ff0-fc2ee662c64b) + ## Clear resources ``` From dd7dbb87901cae5e51e2b73429286ef51e04a6f0 Mon Sep 17 00:00:00 2001 From: Andi Skrgat Date: Wed, 10 Jul 2024 13:37:02 +0200 Subject: [PATCH 16/39] Fix kube linter issues --- config/manager/manager.yaml | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index 038b383..f3e9e08 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -16,6 +16,8 @@ kind: Deployment metadata: name: controller-manager namespace: system + annotations: + email: engineering@memgraph.io labels: control-plane: controller-manager app.kubernetes.io/name: deployment @@ -24,11 +26,16 @@ metadata: app.kubernetes.io/created-by: kubernetes-operator app.kubernetes.io/part-of: kubernetes-operator app.kubernetes.io/managed-by: kustomize + owner: Memgraph spec: selector: matchLabels: control-plane: controller-manager replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate template: metadata: annotations: @@ -69,9 +76,10 @@ spec: - args: - --leader-elect - --leader-election-id=kubernetes-operator - image: controller:latest + image: memgraph/kubernetes-operator name: manager securityContext: + readOnlyRootFilesystem: true allowPrivilegeEscalation: false capabilities: drop: From 16b3ad0894f4039fb88b3afa4c9edfa83349f55f Mon Sep 17 00:00:00 2001 From: Andi Skrgat Date: Wed, 10 Jul 2024 13:50:17 +0200 Subject: [PATCH 17/39] Kube linter improvements --- .github/workflows/kubelint.yaml | 54 ++++++++++++++++++++++++---- config/default/kustomization.yaml | 2 -- config/prometheus/kustomization.yaml | 2 -- config/prometheus/monitor.yaml | 25 ------------- 4 files changed, 48 insertions(+), 35 deletions(-) delete mode 100644 config/prometheus/kustomization.yaml delete mode 100644 config/prometheus/monitor.yaml diff --git a/.github/workflows/kubelint.yaml b/.github/workflows/kubelint.yaml index 60983ce..843d9b2 100644 --- a/.github/workflows/kubelint.yaml +++ b/.github/workflows/kubelint.yaml @@ -11,6 +11,9 @@ on: - main workflow_dispatch: {} +env: + KUBELINTER_VERSION: "143183121" + jobs: Kubelinter-check: name: Run Kube-linter check @@ -19,23 +22,62 @@ jobs: - name: Checkout Code uses: actions/checkout@v2 + - name: Scan directory ./config/crd/ with kube-linter + uses: stackrox/kube-linter-action@v1.0.3 + with: + directory: config/crd + config: ${GITHUB_WORKSPACE}/.github/config_files/config_lint.yaml + version: $KUBELINTER_VERSION + + - name: Scan directory ./config/default/ with kube-linter + uses: stackrox/kube-linter-action@v1.0.3 + with: + directory: config/default + config: ${GITHUB_WORKSPACE}/.github/config_files/config_lint.yaml + version: $KUBELINTER_VERSION + - name: Scan directory ./config/manager/ with kube-linter uses: stackrox/kube-linter-action@v1.0.3 with: - directory: config/manager/manager.yaml + directory: config/manager + config: ${GITHUB_WORKSPACE}/.github/config_files/config_lint.yaml + version: $KUBELINTER_VERSION + + - name: Scan directory ./config/manifests/ with kube-linter + uses: stackrox/kube-linter-action@v1.0.3 + with: + directory: config/manifests + config: ${GITHUB_WORKSPACE}/.github/config_files/config_lint.yaml + version: $KUBELINTER_VERSION + + - name: Scan directory ./config/rbac/ with kube-linter + uses: stackrox/kube-linter-action@v1.0.3 + with: + directory: config/rbac config: ${GITHUB_WORKSPACE}/.github/config_files/config_lint.yaml - version: "48442350" + version: $KUBELINTER_VERSION - name: Scan directory ./config/samples/ with kube-linter uses: stackrox/kube-linter-action@v1.0.3 with: directory: config/samples config: ${GITHUB_WORKSPACE}/.github/config_files/config_lint.yaml - version: "48442350" + version: $KUBELINTER_VERSION - - name: Scan directory ./config/crd/ with kube-linter + - name: Scan directory ./config/scoreboard/ with kube-linter uses: stackrox/kube-linter-action@v1.0.3 with: - directory: config/crd + directory: config/scoreboard config: ${GITHUB_WORKSPACE}/.github/config_files/config_lint.yaml - version: "48442350" + version: $KUBELINTER_VERSION + + + + + + + + + + + diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml index e96fd6a..fe116a4 100644 --- a/config/default/kustomization.yaml +++ b/config/default/kustomization.yaml @@ -18,8 +18,6 @@ resources: - ../crd - ../rbac - ../manager -# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. -#- ../prometheus patches: # Protect the /metrics endpoint by putting it behind auth. diff --git a/config/prometheus/kustomization.yaml b/config/prometheus/kustomization.yaml deleted file mode 100644 index ed13716..0000000 --- a/config/prometheus/kustomization.yaml +++ /dev/null @@ -1,2 +0,0 @@ -resources: -- monitor.yaml diff --git a/config/prometheus/monitor.yaml b/config/prometheus/monitor.yaml deleted file mode 100644 index 7d5f441..0000000 --- a/config/prometheus/monitor.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# Prometheus Monitor Service (Metrics) -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - labels: - control-plane: controller-manager - app.kubernetes.io/name: servicemonitor - app.kubernetes.io/instance: controller-manager-metrics-monitor - app.kubernetes.io/component: metrics - app.kubernetes.io/created-by: kubernetes-operator - app.kubernetes.io/part-of: kubernetes-operator - app.kubernetes.io/managed-by: kustomize - name: controller-manager-metrics-monitor - namespace: system -spec: - endpoints: - - path: /metrics - port: https - scheme: https - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token - tlsConfig: - insecureSkipVerify: true - selector: - matchLabels: - control-plane: controller-manager From 43ee07dee360e78580faf3a85fef3ecbfbeedb46 Mon Sep 17 00:00:00 2001 From: Andi Skrgat Date: Wed, 10 Jul 2024 13:59:32 +0200 Subject: [PATCH 18/39] Add default service account --- .github/config_files/config_lint.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/config_files/config_lint.yaml b/.github/config_files/config_lint.yaml index 13789a7..4c3fc1f 100644 --- a/.github/config_files/config_lint.yaml +++ b/.github/config_files/config_lint.yaml @@ -1,7 +1,7 @@ checks: addAllBuiltIn: true exclude: - - "non-existent-service-account" # because the service account is created in another file + - "default-service-account" # because the service account is created in another file - "minimum-three-replicas" # because the deployment contains only 1 replica of the operator - "no-liveness-probe" # not necessary - "no-readiness-probe" # no necessary From 268a9e0bcadcfb238118bcc5ee149c9c62c8efc2 Mon Sep 17 00:00:00 2001 From: Andi Skrgat Date: Wed, 10 Jul 2024 14:04:33 +0200 Subject: [PATCH 19/39] Remove default directory from linting --- .github/workflows/kubelint.yaml | 7 ------- config/default/manager_auth_proxy_patch.yaml | 1 + 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/.github/workflows/kubelint.yaml b/.github/workflows/kubelint.yaml index 843d9b2..fb7a056 100644 --- a/.github/workflows/kubelint.yaml +++ b/.github/workflows/kubelint.yaml @@ -29,13 +29,6 @@ jobs: config: ${GITHUB_WORKSPACE}/.github/config_files/config_lint.yaml version: $KUBELINTER_VERSION - - name: Scan directory ./config/default/ with kube-linter - uses: stackrox/kube-linter-action@v1.0.3 - with: - directory: config/default - config: ${GITHUB_WORKSPACE}/.github/config_files/config_lint.yaml - version: $KUBELINTER_VERSION - - name: Scan directory ./config/manager/ with kube-linter uses: stackrox/kube-linter-action@v1.0.3 with: diff --git a/config/default/manager_auth_proxy_patch.yaml b/config/default/manager_auth_proxy_patch.yaml index e252f1b..c203f9e 100644 --- a/config/default/manager_auth_proxy_patch.yaml +++ b/config/default/manager_auth_proxy_patch.yaml @@ -11,6 +11,7 @@ spec: containers: - name: kube-rbac-proxy securityContext: + readOnlyRootFilesystem: true allowPrivilegeEscalation: false capabilities: drop: From 0fc15c277bfb292be79771b18bfaaa666846a9eb Mon Sep 17 00:00:00 2001 From: Andi Skrgat Date: Wed, 10 Jul 2024 14:06:24 +0200 Subject: [PATCH 20/39] Remove manager directory from linting --- .github/workflows/kubelint.yaml | 7 ------- 1 file changed, 7 deletions(-) diff --git a/.github/workflows/kubelint.yaml b/.github/workflows/kubelint.yaml index fb7a056..a8285e3 100644 --- a/.github/workflows/kubelint.yaml +++ b/.github/workflows/kubelint.yaml @@ -29,13 +29,6 @@ jobs: config: ${GITHUB_WORKSPACE}/.github/config_files/config_lint.yaml version: $KUBELINTER_VERSION - - name: Scan directory ./config/manager/ with kube-linter - uses: stackrox/kube-linter-action@v1.0.3 - with: - directory: config/manager - config: ${GITHUB_WORKSPACE}/.github/config_files/config_lint.yaml - version: $KUBELINTER_VERSION - - name: Scan directory ./config/manifests/ with kube-linter uses: stackrox/kube-linter-action@v1.0.3 with: From a25e7be58456f47eea27470aa55c1d533708b0cb Mon Sep 17 00:00:00 2001 From: Andi Skrgat Date: Wed, 10 Jul 2024 14:07:31 +0200 Subject: [PATCH 21/39] Remove rbac directory from linting --- .github/workflows/kubelint.yaml | 7 ------- 1 file changed, 7 deletions(-) diff --git a/.github/workflows/kubelint.yaml b/.github/workflows/kubelint.yaml index a8285e3..8537b85 100644 --- a/.github/workflows/kubelint.yaml +++ b/.github/workflows/kubelint.yaml @@ -36,13 +36,6 @@ jobs: config: ${GITHUB_WORKSPACE}/.github/config_files/config_lint.yaml version: $KUBELINTER_VERSION - - name: Scan directory ./config/rbac/ with kube-linter - uses: stackrox/kube-linter-action@v1.0.3 - with: - directory: config/rbac - config: ${GITHUB_WORKSPACE}/.github/config_files/config_lint.yaml - version: $KUBELINTER_VERSION - - name: Scan directory ./config/samples/ with kube-linter uses: stackrox/kube-linter-action@v1.0.3 with: From 785606014cedd23076cc22d1f4c573996b4da629 Mon Sep 17 00:00:00 2001 From: Andi Skrgat Date: Wed, 10 Jul 2024 14:08:29 +0200 Subject: [PATCH 22/39] Remove scorecard directory from linting --- .github/workflows/kubelint.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/kubelint.yaml b/.github/workflows/kubelint.yaml index 8537b85..192839f 100644 --- a/.github/workflows/kubelint.yaml +++ b/.github/workflows/kubelint.yaml @@ -43,10 +43,10 @@ jobs: config: ${GITHUB_WORKSPACE}/.github/config_files/config_lint.yaml version: $KUBELINTER_VERSION - - name: Scan directory ./config/scoreboard/ with kube-linter + - name: Scan directory ./config/scorecard/ with kube-linter uses: stackrox/kube-linter-action@v1.0.3 with: - directory: config/scoreboard + directory: config/scorecard config: ${GITHUB_WORKSPACE}/.github/config_files/config_lint.yaml version: $KUBELINTER_VERSION From 8408cdb35cc68494e89d43d217102577ac58d79a Mon Sep 17 00:00:00 2001 From: Andi Skrgat Date: Wed, 10 Jul 2024 14:22:04 +0200 Subject: [PATCH 23/39] Fix manager img --- config/manager/manager.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index f3e9e08..f4e15ef 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -76,7 +76,7 @@ spec: - args: - --leader-elect - --leader-election-id=kubernetes-operator - image: memgraph/kubernetes-operator + image: memgraph/kubernetes-operator:0.0.2 name: manager securityContext: readOnlyRootFilesystem: true From 16ca7994a600906ae1bf65f77ba529d7296214ef Mon Sep 17 00:00:00 2001 From: Andi Skrgat Date: Thu, 11 Jul 2024 08:53:58 +0200 Subject: [PATCH 24/39] Clean default directory --- .github/config_files/config_lint.yaml | 2 + .github/workflows/kubelint.yaml | 7 ++++ config/default/kustomization.yaml | 17 -------- config/default/manager_auth_proxy_patch.yaml | 41 -------------------- config/default/manager_config_patch.yaml | 10 ----- 5 files changed, 9 insertions(+), 68 deletions(-) delete mode 100644 config/default/manager_auth_proxy_patch.yaml delete mode 100644 config/default/manager_config_patch.yaml diff --git a/.github/config_files/config_lint.yaml b/.github/config_files/config_lint.yaml index 4c3fc1f..64e662e 100644 --- a/.github/config_files/config_lint.yaml +++ b/.github/config_files/config_lint.yaml @@ -6,3 +6,5 @@ checks: - "no-liveness-probe" # not necessary - "no-readiness-probe" # no necessary - "use-namespace" + - "dnsconfig-options" + - "no-node-affinity" diff --git a/.github/workflows/kubelint.yaml b/.github/workflows/kubelint.yaml index 192839f..76208ea 100644 --- a/.github/workflows/kubelint.yaml +++ b/.github/workflows/kubelint.yaml @@ -29,6 +29,13 @@ jobs: config: ${GITHUB_WORKSPACE}/.github/config_files/config_lint.yaml version: $KUBELINTER_VERSION + - name: Scan directory ./config/default/ with kube-linter + uses: stackrox/kube-linter-action@v1.0.3 + with: + directory: config/default + config: ${GITHUB_WORKSPACE}/.github/config_files/config_lint.yaml + version: $KUBELINTER_VERSION + - name: Scan directory ./config/manifests/ with kube-linter uses: stackrox/kube-linter-action@v1.0.3 with: diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml index fe116a4..8c3a385 100644 --- a/config/default/kustomization.yaml +++ b/config/default/kustomization.yaml @@ -1,26 +1,9 @@ # Adds namespace to all resources. namespace: kubernetes-operator-system -# Value of this field is prepended to the -# names of all resources, e.g. a deployment named -# "wordpress" becomes "alices-wordpress". -# Note that it should also match with the prefix (text before '-') of the namespace -# field above. namePrefix: kubernetes-operator- -# Labels to add to all resources and selectors. -#labels: -#- includeSelectors: true -# pairs: -# someName: someValue - resources: - ../crd - ../rbac - ../manager - -patches: -# Protect the /metrics endpoint by putting it behind auth. -# If you want your controller-manager to expose the /metrics -# endpoint w/o any authn/z, please comment the following line. -- path: manager_auth_proxy_patch.yaml diff --git a/config/default/manager_auth_proxy_patch.yaml b/config/default/manager_auth_proxy_patch.yaml deleted file mode 100644 index c203f9e..0000000 --- a/config/default/manager_auth_proxy_patch.yaml +++ /dev/null @@ -1,41 +0,0 @@ -# This patch inject a sidecar container which is a HTTP proxy for the -# controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. -apiVersion: apps/v1 -kind: Deployment -metadata: - name: controller-manager - namespace: system -spec: - template: - spec: - containers: - - name: kube-rbac-proxy - securityContext: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - capabilities: - drop: - - "ALL" - image: gcr.io/kubebuilder/kube-rbac-proxy:v0.15.0 - args: - - "--secure-listen-address=0.0.0.0:8443" - - "--upstream=http://127.0.0.1:8080/" - - "--logtostderr=true" - - "--v=0" - ports: - - containerPort: 8443 - protocol: TCP - name: https - resources: - limits: - cpu: 500m - memory: 128Mi - requests: - cpu: 5m - memory: 64Mi - - name: manager - args: - - "--health-probe-bind-address=:8081" - - "--metrics-bind-address=127.0.0.1:8080" - - "--leader-elect" - - "--leader-election-id=kubernetes-operator" diff --git a/config/default/manager_config_patch.yaml b/config/default/manager_config_patch.yaml deleted file mode 100644 index f6f5891..0000000 --- a/config/default/manager_config_patch.yaml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: controller-manager - namespace: system -spec: - template: - spec: - containers: - - name: manager From 8c0f49fda1d441daf5c455c31954aebeb6113b5f Mon Sep 17 00:00:00 2001 From: Andi Skrgat Date: Thu, 11 Jul 2024 09:00:41 +0200 Subject: [PATCH 25/39] Add manager to kubelinter, rename resources --- .github/workflows/kubelint.yaml | 7 +++++++ config/default/kustomization.yaml | 4 ++-- config/manager/kustomization.yaml | 3 +++ 3 files changed, 12 insertions(+), 2 deletions(-) diff --git a/.github/workflows/kubelint.yaml b/.github/workflows/kubelint.yaml index 76208ea..ee6d3d7 100644 --- a/.github/workflows/kubelint.yaml +++ b/.github/workflows/kubelint.yaml @@ -36,6 +36,13 @@ jobs: config: ${GITHUB_WORKSPACE}/.github/config_files/config_lint.yaml version: $KUBELINTER_VERSION + - name: Scan directory ./config/manager/ with kube-linter + uses: stackrox/kube-linter-action@v1.0.3 + with: + directory: config/manager + config: ${GITHUB_WORKSPACE}/.github/config_files/config_lint.yaml + version: $KUBELINTER_VERSION + - name: Scan directory ./config/manifests/ with kube-linter uses: stackrox/kube-linter-action@v1.0.3 with: diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml index 8c3a385..1ef3cdf 100644 --- a/config/default/kustomization.yaml +++ b/config/default/kustomization.yaml @@ -1,7 +1,7 @@ # Adds namespace to all resources. -namespace: kubernetes-operator-system +namespace: memgraph-operator-system -namePrefix: kubernetes-operator- +namePrefix: memgraph-k8- resources: - ../crd diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index c3e4459..9a846ad 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -6,3 +6,6 @@ images: - name: controller newName: memgraph/kubernetes-operator newTag: 0.0.2 +- name: memgraph-k8-operator + newName: memgraph/kubernetes-operator + newTag: 0.0.2 From de9d9fa90c7034ee032e7991bb4fa789bee49a8b Mon Sep 17 00:00:00 2001 From: Andi Skrgat Date: Thu, 11 Jul 2024 11:05:11 +0200 Subject: [PATCH 26/39] Remove auth roles, improve linting --- .github/config_files/config_lint.yaml | 3 +- Makefile | 4 +-- config/manager/kustomization.yaml | 5 +-- .../rbac/auth_proxy_client_clusterrole.yaml | 16 ---------- config/rbac/auth_proxy_role.yaml | 24 -------------- config/rbac/auth_proxy_role_binding.yaml | 19 ------------ config/rbac/auth_proxy_service.yaml | 21 ------------- config/rbac/kustomization.yaml | 12 ------- config/rbac/memgraphha_editor_role.yaml | 31 ------------------- config/rbac/memgraphha_viewer_role.yaml | 27 ---------------- config/samples/kustomization.yaml | 2 -- config/samples/memgraph_v1_ha.yaml | 4 +-- 12 files changed, 7 insertions(+), 161 deletions(-) delete mode 100644 config/rbac/auth_proxy_client_clusterrole.yaml delete mode 100644 config/rbac/auth_proxy_role.yaml delete mode 100644 config/rbac/auth_proxy_role_binding.yaml delete mode 100644 config/rbac/auth_proxy_service.yaml delete mode 100644 config/rbac/memgraphha_editor_role.yaml delete mode 100644 config/rbac/memgraphha_viewer_role.yaml diff --git a/.github/config_files/config_lint.yaml b/.github/config_files/config_lint.yaml index 64e662e..ac873af 100644 --- a/.github/config_files/config_lint.yaml +++ b/.github/config_files/config_lint.yaml @@ -1,10 +1,11 @@ checks: addAllBuiltIn: true exclude: - - "default-service-account" # because the service account is created in another file + - "non-existent-service-account" # because the service account is created in another file - "minimum-three-replicas" # because the deployment contains only 1 replica of the operator - "no-liveness-probe" # not necessary - "no-readiness-probe" # no necessary - "use-namespace" - "dnsconfig-options" - "no-node-affinity" + - "non-isolated-pod" diff --git a/Makefile b/Makefile index 83ed94a..3cf5068 100644 --- a/Makefile +++ b/Makefile @@ -113,7 +113,7 @@ uninstall: kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube .PHONY: deploy deploy: kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config. - cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} + cd config/manager && $(KUSTOMIZE) edit set image memgraph-kubernetes-operator=${IMG} $(KUSTOMIZE) build config/default | kubectl apply -f - .PHONY: undeploy @@ -174,7 +174,7 @@ endif .PHONY: bundle bundle: kustomize operator-sdk ## Generate bundle manifests and metadata, then validate generated files. $(OPERATOR_SDK) generate kustomize manifests -q - cd config/manager && $(KUSTOMIZE) edit set image controller=$(IMG) + cd config/manager && $(KUSTOMIZE) edit set image memgraph-kubernetes-operator=$(IMG) $(KUSTOMIZE) build config/manifests | $(OPERATOR_SDK) generate bundle $(BUNDLE_GEN_FLAGS) $(OPERATOR_SDK) bundle validate ./bundle diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index 9a846ad..1fe9778 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -3,9 +3,6 @@ resources: apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization images: -- name: controller - newName: memgraph/kubernetes-operator - newTag: 0.0.2 -- name: memgraph-k8-operator +- name: memgraph-kubernetes-operator newName: memgraph/kubernetes-operator newTag: 0.0.2 diff --git a/config/rbac/auth_proxy_client_clusterrole.yaml b/config/rbac/auth_proxy_client_clusterrole.yaml deleted file mode 100644 index 6bfe0a9..0000000 --- a/config/rbac/auth_proxy_client_clusterrole.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/name: clusterrole - app.kubernetes.io/instance: metrics-reader - app.kubernetes.io/component: kube-rbac-proxy - app.kubernetes.io/created-by: kubernetes-operator - app.kubernetes.io/part-of: kubernetes-operator - app.kubernetes.io/managed-by: kustomize - name: metrics-reader -rules: -- nonResourceURLs: - - "/metrics" - verbs: - - get diff --git a/config/rbac/auth_proxy_role.yaml b/config/rbac/auth_proxy_role.yaml deleted file mode 100644 index 2ff84ee..0000000 --- a/config/rbac/auth_proxy_role.yaml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/name: clusterrole - app.kubernetes.io/instance: proxy-role - app.kubernetes.io/component: kube-rbac-proxy - app.kubernetes.io/created-by: kubernetes-operator - app.kubernetes.io/part-of: kubernetes-operator - app.kubernetes.io/managed-by: kustomize - name: proxy-role -rules: -- apiGroups: - - authentication.k8s.io - resources: - - tokenreviews - verbs: - - create -- apiGroups: - - authorization.k8s.io - resources: - - subjectaccessreviews - verbs: - - create diff --git a/config/rbac/auth_proxy_role_binding.yaml b/config/rbac/auth_proxy_role_binding.yaml deleted file mode 100644 index b7f3ab0..0000000 --- a/config/rbac/auth_proxy_role_binding.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - labels: - app.kubernetes.io/name: clusterrolebinding - app.kubernetes.io/instance: proxy-rolebinding - app.kubernetes.io/component: kube-rbac-proxy - app.kubernetes.io/created-by: kubernetes-operator - app.kubernetes.io/part-of: kubernetes-operator - app.kubernetes.io/managed-by: kustomize - name: proxy-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: proxy-role -subjects: -- kind: ServiceAccount - name: controller-manager - namespace: system diff --git a/config/rbac/auth_proxy_service.yaml b/config/rbac/auth_proxy_service.yaml deleted file mode 100644 index cdd7723..0000000 --- a/config/rbac/auth_proxy_service.yaml +++ /dev/null @@ -1,21 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - labels: - control-plane: controller-manager - app.kubernetes.io/name: service - app.kubernetes.io/instance: controller-manager-metrics-service - app.kubernetes.io/component: kube-rbac-proxy - app.kubernetes.io/created-by: kubernetes-operator - app.kubernetes.io/part-of: kubernetes-operator - app.kubernetes.io/managed-by: kustomize - name: controller-manager-metrics-service - namespace: system -spec: - ports: - - name: https - port: 8443 - protocol: TCP - targetPort: https - selector: - control-plane: controller-manager diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml index 731832a..9221586 100644 --- a/config/rbac/kustomization.yaml +++ b/config/rbac/kustomization.yaml @@ -1,18 +1,6 @@ resources: -# All RBAC will be applied under this service account in -# the deployment namespace. You may comment out this resource -# if your manager will use a service account that exists at -# runtime. Be sure to update RoleBinding and ClusterRoleBinding -# subjects if changing service account names. - service_account.yaml - role.yaml - role_binding.yaml - leader_election_role.yaml - leader_election_role_binding.yaml -# Comment the following 4 lines if you want to disable -# the auth proxy (https://github.com/brancz/kube-rbac-proxy) -# which protects your /metrics endpoint. -- auth_proxy_service.yaml -- auth_proxy_role.yaml -- auth_proxy_role_binding.yaml -- auth_proxy_client_clusterrole.yaml diff --git a/config/rbac/memgraphha_editor_role.yaml b/config/rbac/memgraphha_editor_role.yaml deleted file mode 100644 index e54bf20..0000000 --- a/config/rbac/memgraphha_editor_role.yaml +++ /dev/null @@ -1,31 +0,0 @@ -# permissions for end users to edit memgraphhas. -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/name: clusterrole - app.kubernetes.io/instance: memgraphha-editor-role - app.kubernetes.io/component: rbac - app.kubernetes.io/created-by: kubernetes-operator - app.kubernetes.io/part-of: kubernetes-operator - app.kubernetes.io/managed-by: kustomize - name: memgraphha-editor-role -rules: -- apiGroups: - - memgraph.com - resources: - - memgraphhas - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - memgraph.com - resources: - - memgraphhas/status - verbs: - - get diff --git a/config/rbac/memgraphha_viewer_role.yaml b/config/rbac/memgraphha_viewer_role.yaml deleted file mode 100644 index 769adfd..0000000 --- a/config/rbac/memgraphha_viewer_role.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# permissions for end users to view memgraphhas. -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/name: clusterrole - app.kubernetes.io/instance: memgraphha-viewer-role - app.kubernetes.io/component: rbac - app.kubernetes.io/created-by: kubernetes-operator - app.kubernetes.io/part-of: kubernetes-operator - app.kubernetes.io/managed-by: kustomize - name: memgraphha-viewer-role -rules: -- apiGroups: - - memgraph.com - resources: - - memgraphhas - verbs: - - get - - list - - watch -- apiGroups: - - memgraph.com - resources: - - memgraphhas/status - verbs: - - get diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index a483d76..e0823f5 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -1,4 +1,2 @@ -## Append samples of your project ## resources: - memgraph_v1_ha.yaml -#+kubebuilder:scaffold:manifestskustomizesamples diff --git a/config/samples/memgraph_v1_ha.yaml b/config/samples/memgraph_v1_ha.yaml index 802b8ec..eae6e25 100644 --- a/config/samples/memgraph_v1_ha.yaml +++ b/config/samples/memgraph_v1_ha.yaml @@ -67,7 +67,7 @@ spec: logPVC: false logPVCClassName: "" logPVCSize: 256Mi - storagePVC: true + storagePVC: false storagePVCClassName: "" storagePVCSize: 1Gi data: @@ -75,7 +75,7 @@ spec: logPVC: false logPVCClassName: "" logPVCSize: 256Mi - storagePVC: true + storagePVC: false storagePVCClassName: "" storagePVCSize: 1Gi env: From c1f890c16d2b451aab42ed89800c0d368b34a51e Mon Sep 17 00:00:00 2001 From: Andi Skrgat Date: Thu, 11 Jul 2024 11:16:16 +0200 Subject: [PATCH 27/39] Remove probes and name prefix --- config/default/kustomization.yaml | 3 +-- config/manager/manager.yaml | 41 ------------------------------- 2 files changed, 1 insertion(+), 43 deletions(-) diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml index 1ef3cdf..da8437b 100644 --- a/config/default/kustomization.yaml +++ b/config/default/kustomization.yaml @@ -1,7 +1,6 @@ -# Adds namespace to all resources. namespace: memgraph-operator-system -namePrefix: memgraph-k8- +namePrefix: "" resources: - ../crd diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index f4e15ef..ab6e39c 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -43,35 +43,8 @@ spec: labels: control-plane: controller-manager spec: - # TODO(user): Uncomment the following code to configure the nodeAffinity expression - # according to the platforms which are supported by your solution. - # It is considered best practice to support multiple architectures. You can - # build your manager image using the makefile target docker-buildx. - # affinity: - # nodeAffinity: - # requiredDuringSchedulingIgnoredDuringExecution: - # nodeSelectorTerms: - # - matchExpressions: - # - key: kubernetes.io/arch - # operator: In - # values: - # - amd64 - # - arm64 - # - ppc64le - # - s390x - # - key: kubernetes.io/os - # operator: In - # values: - # - linux securityContext: runAsNonRoot: true - # TODO(user): For common cases that do not require escalating privileges - # it is recommended to ensure that all your Pods/Containers are restrictive. - # More info: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted - # Please uncomment the following code if your project does NOT have to work on old Kubernetes - # versions < 1.19 or on vendors versions which do NOT support this field by default (i.e. Openshift < 4.11 ). - # seccompProfile: - # type: RuntimeDefault containers: - args: - --leader-elect @@ -84,20 +57,6 @@ spec: capabilities: drop: - "ALL" - livenessProbe: - httpGet: - path: /healthz - port: 8081 - initialDelaySeconds: 15 - periodSeconds: 20 - readinessProbe: - httpGet: - path: /readyz - port: 8081 - initialDelaySeconds: 5 - periodSeconds: 10 - # TODO(user): Configure the resources accordingly based on the project requirements. - # More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ resources: limits: cpu: 500m From d0bc75e26fbfa4ed4de12bcd5a2a727f4c60fcf4 Mon Sep 17 00:00:00 2001 From: Andi Skrgat Date: Thu, 11 Jul 2024 11:20:43 +0200 Subject: [PATCH 28/39] Remove labels from manager --- config/manager/manager.yaml | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index ab6e39c..e12fd3c 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -14,24 +14,17 @@ metadata: apiVersion: apps/v1 kind: Deployment metadata: - name: controller-manager namespace: system annotations: email: engineering@memgraph.io labels: - control-plane: controller-manager - app.kubernetes.io/name: deployment - app.kubernetes.io/instance: controller-manager - app.kubernetes.io/component: manager - app.kubernetes.io/created-by: kubernetes-operator - app.kubernetes.io/part-of: kubernetes-operator - app.kubernetes.io/managed-by: kustomize owner: Memgraph + name: controller-manager spec: + replicas: 1 selector: matchLabels: - control-plane: controller-manager - replicas: 1 + name: controller-manager strategy: rollingUpdate: maxUnavailable: 1 @@ -41,7 +34,7 @@ spec: annotations: kubectl.kubernetes.io/default-container: manager labels: - control-plane: controller-manager + name: controller-manager spec: securityContext: runAsNonRoot: true From 8aeb7aa9ecb502bab56038d7ceabd49bfa74a6d6 Mon Sep 17 00:00:00 2001 From: Andi Skrgat Date: Thu, 11 Jul 2024 11:22:38 +0200 Subject: [PATCH 29/39] Renamed Deployment --- config/manager/manager.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index e12fd3c..8f7ee81 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -19,12 +19,12 @@ metadata: email: engineering@memgraph.io labels: owner: Memgraph - name: controller-manager + name: memgraph-kubernetes-operator spec: replicas: 1 selector: matchLabels: - name: controller-manager + name: memgraph-kubernetes-operator strategy: rollingUpdate: maxUnavailable: 1 @@ -34,7 +34,7 @@ spec: annotations: kubectl.kubernetes.io/default-container: manager labels: - name: controller-manager + name: memgraph-kubernetes-operator spec: securityContext: runAsNonRoot: true From cc2776708678d88f82f615099af3de06f727f4ed Mon Sep 17 00:00:00 2001 From: Andi Skrgat Date: Thu, 11 Jul 2024 11:23:56 +0200 Subject: [PATCH 30/39] Removed labels from service account and role binding --- config/rbac/role_binding.yaml | 7 ------- config/rbac/service_account.yaml | 7 ------- 2 files changed, 14 deletions(-) diff --git a/config/rbac/role_binding.yaml b/config/rbac/role_binding.yaml index 9eecc5e..2070ede 100644 --- a/config/rbac/role_binding.yaml +++ b/config/rbac/role_binding.yaml @@ -1,13 +1,6 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - labels: - app.kubernetes.io/name: clusterrolebinding - app.kubernetes.io/instance: manager-rolebinding - app.kubernetes.io/component: rbac - app.kubernetes.io/created-by: kubernetes-operator - app.kubernetes.io/part-of: kubernetes-operator - app.kubernetes.io/managed-by: kustomize name: manager-rolebinding roleRef: apiGroup: rbac.authorization.k8s.io diff --git a/config/rbac/service_account.yaml b/config/rbac/service_account.yaml index 06eefef..7cd6025 100644 --- a/config/rbac/service_account.yaml +++ b/config/rbac/service_account.yaml @@ -1,12 +1,5 @@ apiVersion: v1 kind: ServiceAccount metadata: - labels: - app.kubernetes.io/name: serviceaccount - app.kubernetes.io/instance: controller-manager-sa - app.kubernetes.io/component: rbac - app.kubernetes.io/created-by: kubernetes-operator - app.kubernetes.io/part-of: kubernetes-operator - app.kubernetes.io/managed-by: kustomize name: controller-manager namespace: system From 8f9c0e67fffb50ab8e2b668750516b92f22bf8d3 Mon Sep 17 00:00:00 2001 From: Andi Skrgat Date: Thu, 11 Jul 2024 11:24:53 +0200 Subject: [PATCH 31/39] Removed labels from namespace --- config/manager/manager.yaml | 8 -------- 1 file changed, 8 deletions(-) diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index 8f7ee81..260f115 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -1,14 +1,6 @@ apiVersion: v1 kind: Namespace metadata: - labels: - control-plane: controller-manager - app.kubernetes.io/name: namespace - app.kubernetes.io/instance: system - app.kubernetes.io/component: manager - app.kubernetes.io/created-by: kubernetes-operator - app.kubernetes.io/part-of: kubernetes-operator - app.kubernetes.io/managed-by: kustomize name: system --- apiVersion: apps/v1 From 4d46382c2331a04cec565a72733c8225efe1c444 Mon Sep 17 00:00:00 2001 From: Andi Skrgat Date: Thu, 11 Jul 2024 11:26:31 +0200 Subject: [PATCH 32/39] Renamed role --- config/rbac/role.yaml | 2 +- config/rbac/role_binding.yaml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 94d9bfa..b3073c9 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -1,7 +1,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: manager-role + name: memgraph-kubernetes-operator rules: ## ## Base operator rules diff --git a/config/rbac/role_binding.yaml b/config/rbac/role_binding.yaml index 2070ede..2263e2b 100644 --- a/config/rbac/role_binding.yaml +++ b/config/rbac/role_binding.yaml @@ -1,11 +1,11 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: manager-rolebinding + name: memgraph-kubernetes-operator roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: manager-role + name: memgraph-kubernetes-operator subjects: - kind: ServiceAccount name: controller-manager From 983b854260c1d283e40cab4055db5806bfeb2852 Mon Sep 17 00:00:00 2001 From: Andi Skrgat Date: Thu, 11 Jul 2024 11:28:40 +0200 Subject: [PATCH 33/39] Rename service account --- config/manager/manager.yaml | 2 +- config/rbac/leader_election_role_binding.yaml | 2 +- config/rbac/role_binding.yaml | 2 +- config/rbac/service_account.yaml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index 260f115..23039b3 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -49,5 +49,5 @@ spec: requests: cpu: 10m memory: 64Mi - serviceAccountName: controller-manager + serviceAccountName: memgraph-kubernetes-operator terminationGracePeriodSeconds: 10 diff --git a/config/rbac/leader_election_role_binding.yaml b/config/rbac/leader_election_role_binding.yaml index 55647f2..1afd343 100644 --- a/config/rbac/leader_election_role_binding.yaml +++ b/config/rbac/leader_election_role_binding.yaml @@ -15,5 +15,5 @@ roleRef: name: leader-election-role subjects: - kind: ServiceAccount - name: controller-manager + name: memgraph-kubernetes-operator namespace: system diff --git a/config/rbac/role_binding.yaml b/config/rbac/role_binding.yaml index 2263e2b..1c76d6c 100644 --- a/config/rbac/role_binding.yaml +++ b/config/rbac/role_binding.yaml @@ -8,5 +8,5 @@ roleRef: name: memgraph-kubernetes-operator subjects: - kind: ServiceAccount - name: controller-manager + name: memgraph-kubernetes-operator namespace: system diff --git a/config/rbac/service_account.yaml b/config/rbac/service_account.yaml index 7cd6025..3d00c19 100644 --- a/config/rbac/service_account.yaml +++ b/config/rbac/service_account.yaml @@ -1,5 +1,5 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: controller-manager + name: memgraph-kubernetes-operator namespace: system From d832da86b6ed0cc60b9bea5f8ab5d2d5ffe50167 Mon Sep 17 00:00:00 2001 From: Andi Skrgat Date: Thu, 11 Jul 2024 11:35:48 +0200 Subject: [PATCH 34/39] Remove annotation from manager --- config/manager/manager.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index 23039b3..7ce75c0 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -23,8 +23,6 @@ spec: type: RollingUpdate template: metadata: - annotations: - kubectl.kubernetes.io/default-container: manager labels: name: memgraph-kubernetes-operator spec: From 32363dba586289d41821b0759a04cad48106ddb1 Mon Sep 17 00:00:00 2001 From: Andi Skrgat Date: Thu, 11 Jul 2024 11:38:11 +0200 Subject: [PATCH 35/39] Unify namespaces --- config/manager/manager.yaml | 4 ++-- config/rbac/leader_election_role_binding.yaml | 2 +- config/rbac/role_binding.yaml | 2 +- config/rbac/service_account.yaml | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index 7ce75c0..f5395ae 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -1,12 +1,12 @@ apiVersion: v1 kind: Namespace metadata: - name: system + name: memgraph-operator-system --- apiVersion: apps/v1 kind: Deployment metadata: - namespace: system + namespace: memgraph-operator-system annotations: email: engineering@memgraph.io labels: diff --git a/config/rbac/leader_election_role_binding.yaml b/config/rbac/leader_election_role_binding.yaml index 1afd343..f08061e 100644 --- a/config/rbac/leader_election_role_binding.yaml +++ b/config/rbac/leader_election_role_binding.yaml @@ -16,4 +16,4 @@ roleRef: subjects: - kind: ServiceAccount name: memgraph-kubernetes-operator - namespace: system + namespace: memgraph-operator-system diff --git a/config/rbac/role_binding.yaml b/config/rbac/role_binding.yaml index 1c76d6c..9fded4b 100644 --- a/config/rbac/role_binding.yaml +++ b/config/rbac/role_binding.yaml @@ -9,4 +9,4 @@ roleRef: subjects: - kind: ServiceAccount name: memgraph-kubernetes-operator - namespace: system + namespace: memgraph-operator-system diff --git a/config/rbac/service_account.yaml b/config/rbac/service_account.yaml index 3d00c19..f81938c 100644 --- a/config/rbac/service_account.yaml +++ b/config/rbac/service_account.yaml @@ -2,4 +2,4 @@ apiVersion: v1 kind: ServiceAccount metadata: name: memgraph-kubernetes-operator - namespace: system + namespace: memgraph-operator-system From 42894c7e9cd99abddf32ebc50444b127c9591196 Mon Sep 17 00:00:00 2001 From: Andi Skrgat Date: Thu, 11 Jul 2024 12:05:33 +0200 Subject: [PATCH 36/39] Remove leader election roles --- config/manager/manager.yaml | 2 - config/rbac/kustomization.yaml | 2 - config/rbac/leader_election_role.yaml | 44 ------------------- config/rbac/leader_election_role_binding.yaml | 19 -------- 4 files changed, 67 deletions(-) delete mode 100644 config/rbac/leader_election_role.yaml delete mode 100644 config/rbac/leader_election_role_binding.yaml diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index f5395ae..49c81a8 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -30,8 +30,6 @@ spec: runAsNonRoot: true containers: - args: - - --leader-elect - - --leader-election-id=kubernetes-operator image: memgraph/kubernetes-operator:0.0.2 name: manager securityContext: diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml index 9221586..664fcac 100644 --- a/config/rbac/kustomization.yaml +++ b/config/rbac/kustomization.yaml @@ -2,5 +2,3 @@ resources: - service_account.yaml - role.yaml - role_binding.yaml -- leader_election_role.yaml -- leader_election_role_binding.yaml diff --git a/config/rbac/leader_election_role.yaml b/config/rbac/leader_election_role.yaml deleted file mode 100644 index ddf76ba..0000000 --- a/config/rbac/leader_election_role.yaml +++ /dev/null @@ -1,44 +0,0 @@ -# permissions to do leader election. -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - labels: - app.kubernetes.io/name: role - app.kubernetes.io/instance: leader-election-role - app.kubernetes.io/component: rbac - app.kubernetes.io/created-by: kubernetes-operator - app.kubernetes.io/part-of: kubernetes-operator - app.kubernetes.io/managed-by: kustomize - name: leader-election-role -rules: -- apiGroups: - - "" - resources: - - configmaps - verbs: - - get - - list - - watch - - create - - update - - patch - - delete -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - get - - list - - watch - - create - - update - - patch - - delete -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch diff --git a/config/rbac/leader_election_role_binding.yaml b/config/rbac/leader_election_role_binding.yaml deleted file mode 100644 index f08061e..0000000 --- a/config/rbac/leader_election_role_binding.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - labels: - app.kubernetes.io/name: rolebinding - app.kubernetes.io/instance: leader-election-rolebinding - app.kubernetes.io/component: rbac - app.kubernetes.io/created-by: kubernetes-operator - app.kubernetes.io/part-of: kubernetes-operator - app.kubernetes.io/managed-by: kustomize - name: leader-election-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: leader-election-role -subjects: -- kind: ServiceAccount - name: memgraph-kubernetes-operator - namespace: memgraph-operator-system From 52b66a7ff2ed28d521285a2ffcc9aa206b83643c Mon Sep 17 00:00:00 2001 From: Andi Skrgat Date: Thu, 11 Jul 2024 12:18:38 +0200 Subject: [PATCH 37/39] Improve docs --- docs/installation.md | 33 ++++++++++++++++----------------- 1 file changed, 16 insertions(+), 17 deletions(-) diff --git a/docs/installation.md b/docs/installation.md index 616eda3..54afc16 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -16,37 +16,36 @@ make deploy This command will use operator's image from Memgraph's DockerHub and create all necessary Kubernetes resources for running an operator. -## Option II: Install using Helm Charts +## Option II: Install using kubectl ```bash -cd helm-charts -helm install kubernetes-operator charts/kubernetes-operator +kubectl apply -k config/default ``` ## Verify installation -To verify that deployment ran successfully please run: +Installation using any of options described above will cause creating Kubernetes ServiceAccount, RoleBinding, Role, Deployment and Pods all in newly created all in newly created all in newly created +namespace `memgraph-operator-system`. You can check your resources with: ```bash +kubectl get serviceaccounts -A +kubectl get rolebindings -A +kubectl get roles -A kubectl get deployments -A +kubectl get pods -A ``` -and you should be able to see `kubernetes-operator-controller-manager` deployment in `kubernetes-operator-system` namespace: -![image](https://github.com/memgraph/kubernetes-operator/assets/53269502/a4fc70fe-ef5b-4541-afd8-3ad3ee43a070) +CustomResourceDefinition `memgraphhas.memgraph.com`, whose job is to monitor CustomResource `MemgraphHA`, will also get created and you can verify +this with: -Together with the deployment, `kubernetes-operator-controller-manager-768d9db99b-xs6hk` pod with 2 containers in -`kubernetes-operator-system` namespace should also get created and you can verify this with: -![image](https://github.com/memgraph/kubernetes-operator/assets/53269502/7220c1bd-588c-4662-b696-d43b3085eac3) - - -If you position yourself into operator-controller-manager pod with: -`kubectl exec -it -n kubernetes-operator-system kubernetes-operator-controller-manager-768d9db99b-xs6hk bash` and run ls, -you should be able to see two items: `helm-charts` directory and `watches.yaml` file. +```bash +kubectl get crds -A +``` ## Start Memgraph High Availability Cluster We already provide sample cluster in `config/samples/memgraph_v1_ha.yaml`. You only need to set your license information by setting -`MEMGRAPH_ORGANIZATION_NAME` and `MEMGRAPH_ENTERPRISE_LICENSE` environment variables. +`MEMGRAPH_ORGANIZATION_NAME` and `MEMGRAPH_ENTERPRISE_LICENSE` environment variables in the sample file. Start Memgraph HA cluster with `kubectl apply -f config/samples/memgraph_v1_ha.yaml`. @@ -62,7 +61,7 @@ and connect to see the state of the cluster: ## Clear resources -``` +```bash kubectl delete -f config/samples/memgraph_v1_ha.yaml -make undeploy +make undeploy / or kubectl delete -k config/default ``` From e7e627fcb74a18c9c5851452a72f9d1281aae595 Mon Sep 17 00:00:00 2001 From: Andi Skrgat Date: Thu, 11 Jul 2024 12:21:47 +0200 Subject: [PATCH 38/39] Improve docs --- docs/installation.md | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/docs/installation.md b/docs/installation.md index 54afc16..99bcb42 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -49,13 +49,11 @@ We already provide sample cluster in `config/samples/memgraph_v1_ha.yaml`. You o Start Memgraph HA cluster with `kubectl apply -f config/samples/memgraph_v1_ha.yaml`. -After approx. 60s, you should be able to see your cluster running with `kubectl get pods -A`: -![image](https://github.com/memgraph/kubernetes-operator/assets/53269502/069e2079-03f2-4827-83c1-b06a338b63e4) +After approx. 60s, you should be able to see instances in the output of `kubectl get pods -A`: -Find URL of any coordinator instances: -![Screenshot from 2024-07-09 13-10-42](https://github.com/memgraph/kubernetes-operator/assets/53269502/fbc2d487-e258-4613-bc85-0484bcf2e0dd) -and connect to see the state of the cluster: +You can now find URL of any coordinator instances by running e.g `minikube service list` and connect to see the state of the cluster by running +`show instances;`: ![image](https://github.com/memgraph/kubernetes-operator/assets/53269502/c68d52e2-19f7-4e45-8ff0-fc2ee662c64b) From b1e45215d2d8a354d9f1c8c2833f244fd33f4de2 Mon Sep 17 00:00:00 2001 From: Andi Skrgat Date: Thu, 11 Jul 2024 14:38:31 +0200 Subject: [PATCH 39/39] Improve docs --- Makefile | 2 +- docs/installation.md | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 3cf5068..88164d4 100644 --- a/Makefile +++ b/Makefile @@ -28,7 +28,7 @@ BUNDLE_METADATA_OPTS ?= $(BUNDLE_CHANNELS) $(BUNDLE_DEFAULT_CHANNEL) # This variable is used to construct full image tags for bundle and catalog images. # # For example, running 'make bundle-build bundle-push catalog-build catalog-push' will build and push both -# my.domain/kubernetes-operator-bundle:$VERSION and my.domain/kubernetes-operator-catalog:$VERSION. +u my.domain/kubernetes-operator-bundle:$VERSION and my.domain/kubernetes-operator-catalog:$VERSION. IMAGE_TAG_BASE ?= memgraph/kubernetes-operator # BUNDLE_IMG defines the image:tag used for the bundle. diff --git a/docs/installation.md b/docs/installation.md index 99bcb42..100bccc 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -1,5 +1,7 @@ # Install Memgraph Kubernetes Operator +All described installation options will run the Operator inside the cluster. + ## Option I: Install using Makefile Make sure to clone this repository with its submodule (helm-charts).