From d952ce1b0a48ccb37084f29e8a57b04049c521a7 Mon Sep 17 00:00:00 2001 From: Simon Ostendorf Date: Fri, 25 Aug 2023 09:40:58 +0200 Subject: [PATCH] Add k8s support (#377) * feat(charts): add helm chart * feat(ansible): remove due to k8s support * fix(charts): set debug to false * feat(charts): add readme * feat(deploy): add k8s deploy guide * fix(deploy): fmt * feat(ci): add helm chart release action * feat(deploy): improve k8s guide * feat(deploy): improve guide * feat(deploy): improve guide * feat(deploy): improve guide --- .dockerignore | 2 + .github/workflows/release-chart.yml | 29 ++ README.md | 8 + charts/portals/Chart.yaml | 6 + charts/portals/README.md | 44 ++ charts/portals/templates/_helpers.tpl | 51 +++ charts/portals/templates/deployment.yaml | 53 +++ charts/portals/templates/ingress.yaml | 54 +++ charts/portals/templates/secret.yaml | 10 + charts/portals/templates/service.yaml | 14 + charts/portals/values.yaml | 61 +++ deploy/README.md | 415 ++++++++++++++++++ deploy/cluster/cluster.yaml | 25 ++ deploy/cluster/hetznercluster.yaml | 37 ++ deploy/cluster/kubeadm-cfg-tmpl.yaml | 119 +++++ deploy/cluster/kubeadm-ctl.yaml | 216 +++++++++ deploy/cluster/machine-tmpl.yaml | 29 ++ deploy/cluster/md.yaml | 74 ++++ deploy/cluster/secret.yaml | 9 + deploy/deployments/addons/ccm-secret.yaml | 8 + deploy/deployments/addons/ccm-values.yaml | 24 + .../addons/cert-manager-issuer.yaml | 26 ++ .../addons/cert-manager-values.yaml | 1 + deploy/deployments/addons/cilium-values.yaml | 43 ++ deploy/deployments/addons/csi-2.7.0.yaml | 393 +++++++++++++++++ deploy/deployments/addons/csi-secret.yaml | 7 + .../addons/ingress-nginx-values.yaml | 10 + deploy/deployments/portals/pgsql.yaml | 10 + .../deployments/portals/portals-values.yaml | 14 + .../deployments/portals/redis-pw-secret.yaml | 7 + deploy/deployments/portals/redis-values.yaml | 5 + deploy/nat-gateway/cloud-config.yaml | 24 + scaleup/.env.example | 13 - scaleup/README.md | 19 - scaleup/hosts.ini | 2 - scaleup/playbooks/update_hosts.yml | 45 -- scaleup/portals_scaleup.py | 74 ---- 37 files changed, 1828 insertions(+), 153 deletions(-) create mode 100644 .github/workflows/release-chart.yml create mode 100644 charts/portals/Chart.yaml create mode 100644 charts/portals/README.md create mode 100644 charts/portals/templates/_helpers.tpl create mode 100644 charts/portals/templates/deployment.yaml create mode 100644 charts/portals/templates/ingress.yaml create mode 100644 charts/portals/templates/secret.yaml create mode 100644 charts/portals/templates/service.yaml create mode 100644 charts/portals/values.yaml create mode 100644 deploy/README.md create mode 100644 deploy/cluster/cluster.yaml create mode 100644 deploy/cluster/hetznercluster.yaml create mode 100644 deploy/cluster/kubeadm-cfg-tmpl.yaml create mode 100644 deploy/cluster/kubeadm-ctl.yaml create mode 100644 deploy/cluster/machine-tmpl.yaml create mode 100644 deploy/cluster/md.yaml create mode 100644 deploy/cluster/secret.yaml create mode 100644 deploy/deployments/addons/ccm-secret.yaml create mode 100644 deploy/deployments/addons/ccm-values.yaml create mode 100644 deploy/deployments/addons/cert-manager-issuer.yaml create mode 100644 deploy/deployments/addons/cert-manager-values.yaml create mode 100644 deploy/deployments/addons/cilium-values.yaml create mode 100644 deploy/deployments/addons/csi-2.7.0.yaml create mode 100644 deploy/deployments/addons/csi-secret.yaml create mode 100644 deploy/deployments/addons/ingress-nginx-values.yaml create mode 100644 deploy/deployments/portals/pgsql.yaml create mode 100644 deploy/deployments/portals/portals-values.yaml create mode 100644 deploy/deployments/portals/redis-pw-secret.yaml create mode 100644 deploy/deployments/portals/redis-values.yaml create mode 100644 deploy/nat-gateway/cloud-config.yaml delete mode 100644 scaleup/.env.example delete mode 100644 scaleup/README.md delete mode 100644 scaleup/hosts.ini delete mode 100644 scaleup/playbooks/update_hosts.yml delete mode 100644 scaleup/portals_scaleup.py diff --git a/.dockerignore b/.dockerignore index 998e5c3e..47a84115 100644 --- a/.dockerignore +++ b/.dockerignore @@ -16,6 +16,8 @@ README.md LICENSE rr database/seeders/tutors.csv +charts/ +deploy/ scaleup/ storage/ docs/ diff --git a/.github/workflows/release-chart.yml b/.github/workflows/release-chart.yml new file mode 100644 index 00000000..99767c9f --- /dev/null +++ b/.github/workflows/release-chart.yml @@ -0,0 +1,29 @@ +name: Release Helm Chart + +on: + push: + branches: + - main + +jobs: + release: + permissions: + contents: write + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + with: + fetch-depth: 0 + - name: Configure Git + run: | + git config user.name "$GITHUB_ACTOR" + git config user.email "$GITHUB_ACTOR@users.noreply.github.com" + - name: Install Helm + uses: azure/setup-helm@v3 + - name: Run chart-releaser + uses: helm/chart-releaser-action@v1.6.0 + env: + CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}" + with: + mark_as_latest: false diff --git a/README.md b/README.md index d056674f..451b8146 100644 --- a/README.md +++ b/README.md @@ -135,6 +135,14 @@ docker exec -it portals-web touch database/seeders/tutors.csv docker exec -it portals-web php artisan migrate:fresh --seed ``` +### Kubernetes (Helm) + +You can deploy the application to kubernetes using the helm chart. + +See [charts/portals](./charts/portals/) for more information. + +If you want information about creating the kubernetes cluster, see [deploy information](./deploy). + ## Authors 👤 **Titus Kirch (main author)** diff --git a/charts/portals/Chart.yaml b/charts/portals/Chart.yaml new file mode 100644 index 00000000..00fa0f23 --- /dev/null +++ b/charts/portals/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: portals +description: Portals is a group allocation tool for the first week of the Department of Electrical Engineering and Information Technology at the FH Aachen - University of Applied Sciences. +type: application +version: 0.0.1 +appVersion: "2.1.0" diff --git a/charts/portals/README.md b/charts/portals/README.md new file mode 100644 index 00000000..e2f21a05 --- /dev/null +++ b/charts/portals/README.md @@ -0,0 +1,44 @@ +# Portals Helm Chart + +This chart deploys the Portals application to a Kubernetes cluster. + +## Install + +You can install the chart with the following command: + +```sh +helm repo add portals https://fsr5-fhaachen.github.io/portals/ +helm upgrade --install portals portals/portals --namespace portals --create-namespace -f values.yaml +``` + +## Values + +You can find the default values in the [values.yaml](values.yaml) file. + +You can override the default values but there are some values that need to be changed. The (minimum) required values are: + +```yaml +environment: + APP_NAME: Erstiwoche FB5 + APP_KEY: # insert app key here + APP_URL: https://portals.fsr5.de + TUTOR_PASSWORD: password # insert secret password here + ADMIN_PASSWORD: admin # insert secret password here + DB_CONNECTION: pgsql + DB_HOST: # insert db host here + DB_PORT: "5432" + DB_DATABASE: postgres + DB_USERNAME: postgres + DB_PASSWORD: # insert db password here + REDIS_HOST: # insert redis host here + REDIS_PASSWORD: # insert redis password here + REDIS_PORT: "6379" +ingress: + enabled: true + className: "nginx" + annotations: + cert-manager.io/issuer: "letsencrypt-prod" + hosts: + - portals.fsr5.de + tls: true +``` diff --git a/charts/portals/templates/_helpers.tpl b/charts/portals/templates/_helpers.tpl new file mode 100644 index 00000000..27d6e7dc --- /dev/null +++ b/charts/portals/templates/_helpers.tpl @@ -0,0 +1,51 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "portals.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "portals.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "portals.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "portals.labels" -}} +helm.sh/chart: {{ include "portals.chart" . }} +{{ include "portals.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "portals.selectorLabels" -}} +app.kubernetes.io/name: {{ include "portals.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/charts/portals/templates/deployment.yaml b/charts/portals/templates/deployment.yaml new file mode 100644 index 00000000..9cf817dd --- /dev/null +++ b/charts/portals/templates/deployment.yaml @@ -0,0 +1,53 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "portals.fullname" . }} + labels: + {{- include "portals.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + {{- include "portals.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "portals.selectorLabels" . | nindent 8 }} + spec: + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: http + containerPort: 8000 + protocol: TCP + livenessProbe: + httpGet: + path: / + port: http + readinessProbe: + httpGet: + path: / + port: http + resources: + {{- toYaml .Values.resources | nindent 12 }} + envFrom: + - secretRef: + name: {{ include "portals.fullname" . }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/charts/portals/templates/ingress.yaml b/charts/portals/templates/ingress.yaml new file mode 100644 index 00000000..0239eca6 --- /dev/null +++ b/charts/portals/templates/ingress.yaml @@ -0,0 +1,54 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "portals.fullname" . -}} +{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} + {{- end }} +{{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "portals.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: + - hosts: + {{- range .Values.ingress.hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ $fullName }}-ingress-tls + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ . | quote }} + http: + paths: + - path: / + pathType: Prefix + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $fullName }} + port: + number: 8000 + {{- else }} + serviceName: {{ $fullName }} + servicePort: 8000 + {{- end }} + {{- end }} +{{- end }} diff --git a/charts/portals/templates/secret.yaml b/charts/portals/templates/secret.yaml new file mode 100644 index 00000000..e151a35e --- /dev/null +++ b/charts/portals/templates/secret.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "portals.fullname" . }} + labels: + {{- include "portals.labels" . | nindent 4 }} +{{- with .Values.environment }} +stringData: + {{- toYaml . | nindent 2 }} +{{- end }} diff --git a/charts/portals/templates/service.yaml b/charts/portals/templates/service.yaml new file mode 100644 index 00000000..b3471e3d --- /dev/null +++ b/charts/portals/templates/service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "portals.fullname" . }} + labels: + {{- include "portals.labels" . | nindent 4 }} +spec: + ports: + - name: http + port: 8000 + targetPort: http + protocol: TCP + selector: + {{- include "portals.selectorLabels" . | nindent 4 }} diff --git a/charts/portals/values.yaml b/charts/portals/values.yaml new file mode 100644 index 00000000..3f09e29d --- /dev/null +++ b/charts/portals/values.yaml @@ -0,0 +1,61 @@ +replicaCount: 1 + +nameOverride: "" +fullnameOverride: "" + +podAnnotations: {} + +image: + repository: ghcr.io/fsr5-fhaachen/portals + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "" + +environment: + APP_NAME: Erstiwoche FB5 + APP_ENV: local + APP_KEY: # insert app key here + APP_DEBUG: "false" + APP_URL: https://portals.fsr5.de + TUTOR_PASSWORD: password # insert secret password here + ADMIN_PASSWORD: admin # insert secret password here + DB_CONNECTION: pgsql + DB_HOST: # insert db host here + DB_PORT: "5432" + DB_DATABASE: postgres + DB_USERNAME: postgres + DB_PASSWORD: # insert db password here + OCTANE_HTTPS: "true" + OCTANE_WORKERS: "4" + OCTANE_MAX_REQUESTS: "512" + WWWGROUP: "1000" + WWWUSER: "1000" + CACHE_DRIVER: redis + SESSION_DRIVER: redis + SESSION_LIFETIME: "120" + REDIS_HOST: # insert redis host here + REDIS_PASSWORD: # insert redis password here + REDIS_PORT: "6379" + +ingress: + enabled: true + className: "nginx" + annotations: + cert-manager.io/issuer: "letsencrypt-prod" + hosts: + - portals.fsr5.de + tls: true + +resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/deploy/README.md b/deploy/README.md new file mode 100644 index 00000000..282d6527 --- /dev/null +++ b/deploy/README.md @@ -0,0 +1,415 @@ +# Deployment of Portals + +This document describes how a kubernetes cluster can be created and how the portals can be deployed on it. + +> [!NOTE] +> This guide only focuses on the cluster deployment the Fachschaftsrat Elektro- und Informationstechnik uses inside Hetzner Cloud with Cloudflare DNS. It is not a general guide on how to deploy the application. Please see the [README](../README.md) for more information. + +This guide will: +1. [Install prerequisites](#step-0-install-prerequisites) +2. [Setup the Hetzner Cloud project](#step-1-setup-hetzner-cloud) +3. [Create a Management Cluster for Cluster API](#step-2-create-a-management-cluster) +4. [Install Cluster API on the Management Cluster](#step-3-install-cluster-api) +5. [Create a Workload Cluster with Cluster API](#step-4-create-a-workload-cluster) +6. [Deploy Cluster Addons on the Workload Cluster](#step-5-deploy-cluster-addons) +7. [Deploy the Portals Application on the Workload Cluster](#step-6-deploy-portals) + +> [!IMPORTANT] +> The files used in this guide use placeholders. You need to copy the files and replace them with your values/secrets. + +> [!WARNING] +> You need to have good knowledge of kubernetes to follow this guide. There will be no explanation of the kubernetes basics. + +## Prerequisites + +To follow this guide you will need + +* A linux client to work from (I would suggest using WSL2 on Windows) +* A Hetzner Cloud account with an empty project where the cluster should be deployed +* A Cloudflare account with a domain and a zone for the domain +* A personal ssh-key pair (or more keys if you want to use different keys or want to grant access for more people) + +## Step 0: Install prerequisites + +You will need some tools installed on your client to follow this guide. You can install them the way you want or use the following commands. + +You will need: +* [hcloud-cli](https://github.com/hetznercloud/cli) +* [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/) +* [helm](https://helm.sh/docs/intro/install/) +* [clusterctl](https://cluster-api.sigs.k8s.io/user/quick-start.html#install-clusterctl) + +Optional: +* [homebrew](https://brew.sh/) +* [fzf](https://github.com/junegunn/fzf) +* [kubectx and kubens](https://github.com/ahmetb/kubectx) + +```sh +# updates +sudo apt update +sudo apt upgrade -y +sudo apt install bash-completion -y + +# homebrew +/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" +(echo; echo 'eval "$(/home/linuxbrew/.linuxbrew/bin/brew shellenv)"') >> /home/$USER/.profile +eval "$(/home/linuxbrew/.linuxbrew/bin/brew shellenv)" +cat <> ~/.profile +if type brew &>/dev/null +then + HOMEBREW_PREFIX="$(brew --prefix)" + if [[ -r "${HOMEBREW_PREFIX}/etc/profile.d/bash_completion.sh" ]] + then + source "${HOMEBREW_PREFIX}/etc/profile.d/bash_completion.sh" + else + for COMPLETION in "${HOMEBREW_PREFIX}/etc/bash_completion.d/"* + do + [[ -r "${COMPLETION}" ]] && source "${COMPLETION}" + done + fi +fi +EOF + +# hcloud-cli +brew install hcloud + +# kubectl +sudo apt-get update +sudo apt-get install -y apt-transport-https ca-certificates curl +curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.28/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg +echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.28/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list +sudo apt-get update +sudo apt-get install -y kubectl +kubectl completion bash | sudo tee /etc/bash_completion.d/kubectl > /dev/null +echo "alias k=kubectl" >> ~/.bashrc +echo "complete -o default -F __start_kubectl k" >> ~/.bashrc +echo "export KUBE_EDITOR=\"nano\"" >> ~/.bashrc + +# helm +curl https://baltocdn.com/helm/signing.asc | gpg --dearmor | sudo tee /usr/share/keyrings/helm.gpg > /dev/null +sudo apt-get install apt-transport-https --yes +echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/helm.gpg] https://baltocdn.com/helm/stable/debian/ all main" | sudo tee /etc/apt/sources.list.d/helm-stable-debian.list +sudo apt-get update +sudo apt-get install helm +echo "source <(helm completion bash)" >> ~/.bashrc + +# kubectx and kubens +sudo apt install kubectx +brew install fzf + +# clusterctl +brew install clusterctl +``` + +## Step 1.1: Setup Hetzner Cloud + +### Create API Tokens + +You need to create some API tokens inside the cloud project. You can do this in the Hetzner Cloud Console under `Access > API Tokens`. You will need the following tokens: + +* `cli@@` (used by hcloud-cli on your linux client) +* `capi@` (used by the hcloud capi controller inside the management cluster) +* `ccm@` (used by the hcloud controller manager inside the cluster) +* `csi@` (used by the hcloud csi driver inside the cluster) + +You can change the names of the tokens to fit your needs. You will need to replace the names in the following commands. + +> [!IMPORTANT] +> Please save the tokens in a safe place. You will need the values in this guide and you will not be able to see them again. + +### Setup hcloud-cli + +You need to setup the hcloud-cli on your linux client. You can do this by using the following commands. You will need to replace the placeholders with your values. + +```sh +hcloud context create # replace context name with a name of your choice (e.g. the hcloud project name) +``` + +The command will ask for the token you have created in the previous step. + + +### Upload SSH Keys + +You need to upload your public ssh key to the cloud project. You can do this in the Hetzner Cloud Console under `Access > SSH Keys` or by using the following commands. You can upload multiple keys and reference them later to grant access to more people. + +```sh +hcloud ssh-key create --public-key-from-file ~/.ssh/.pub --name @ +``` + +## Step 1.2 Setup Cloudflare + +### Create API Token + +You need to create two API tokens for Cloudflare. You can do this in the Cloudflare Console under `My Profile > API Tokens`. You will need the following tokens: + +* Zone Edit for all needed DNS Zones (for your client) +* Zone Edit for all needed DNS Zones (for cert-manager) + +You can change the names of the tokens to fit your needs. You will need to replace the names in the following commands. + +## Step 2: Create a Management Cluster + +To use cluster api to create a workload kubernetes cluster you need to create a management cluster. This cluster will be used to deploy the cluster api components and to create the workload cluster. + +In this example we will use kind to create the management cluster. + +> [!WARNING] +> Exposing kind clusters to the internet is not recommended and can cause security risks. + +### Create VM + +To create a vm to run kind on you can use the following command: + +```sh +hcloud server create --location nbg1 --image debian-12 --name initial-mgmt-cluster --ssh-key @ --type cx21 +``` + +Wait for the server to be created and then login to the server with `ssh root@`. + +### Setup VM and create cluster + +Run the following commands on the server to create a kind kubernetes cluster: + +```sh +# updates +apt update +apt upgrade -y + +# install docker +curl -fsSL https://get.docker.com -o get-docker.sh +sh get-docker.sh + +# install kind +[ $(uname -m) = x86_64 ] && curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.20.0/kind-linux-amd64 +chmod +x ./kind +sudo mv ./kind /usr/local/bin/kind + +# create cluster config. remember to replcace the placeholder +cat < initial-mgmt-cluster.yaml +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +name: mgmt +networking: + apiServerAddress: "" + apiServerPort: 6443 +nodes: + - role: control-plane + - role: worker +EOF + +# create cluster +kind create cluster --config initial-mgmt-cluster.yaml +``` + +### Gain cluster access + +Run the following commands on your local machine to copy the kubeconfig file from the server to your local machine: + +```sh +# copy kubeconfig from server to local machine +scp root@:/root/.kube/config ~/.kube/initial-mgmt-cluster.kubeconfig +chmod 600 ~/.kube/initial-mgmt-cluster.kubeconfig + +# set currently used kubeconfig +export KUBECONFIG=~/.kube/initial-mgmt-cluster.kubeconfig + +# test connection +kubectl get nodes +``` + +You now have an exposed kind cluster running on the server. + +## Step 3: Install Cluster API + +### Prepare Management Cluster + +Before installing cluster api you need to do a workaround for the container images. + +Login again to the management cluster server with `ssh root@` and run the following commands: + +```sh +docker pull registry.k8s.io/cluster-api/kubeadm-bootstrap-controller: +kind load docker-image -n mgmt registry.k8s.io/cluster-api/kubeadm-bootstrap-controller: +docker pull registry.k8s.io/cluster-api/kubeadm-control-plane-controller: +kind load docker-image -n mgmt registry.k8s.io/cluster-api/kubeadm-control-plane-controller: +docker pull registry.k8s.io/cluster-api/cluster-api-controller: +kind load docker-image -n mgmt registry.k8s.io/cluster-api/cluster-api-controller: +``` + +### Install Cluster API + +With the following commands you will install cluster api on the management cluster. + +> [!IMPORTANT] +> Make sure that you have selected the right kubernetes cluster (maybe check with `kubectx`) + +```sh +clusterctl init --core cluster-api --bootstrap kubeadm --control-plane kubeadm --infrastructure hetzner +``` + +You can check if the installation was successful by running `kubectl get pods -A`. You should see pods in the `caph-system`, `capi-system`, `capi-kubeadm-bootstrap-system` and `capi-kubeadm-control-plane-system` namespace. + +## Step 4: Create a Workload Cluster + +### Create Cluster + +Run the following commands to create a workload cluster: + +> [!NOTE] +> You will need to replace some values base64 encoded. You can use `echo -n "" | base64 -w 0` to encode the values. + +```sh +# replace placeholders before applying +kubectl apply -f cluster/ +``` + +### Create Infrastructure beneath Cluster + +After the `HetznerCluster` object is ready (you can verify this with `k get hetznercluster `) you have to run the following commands: + +```sh +# create nat gateway +hcloud network add-route --destination 0.0.0.0/0 --gateway 10.0.255.254 +hcloud server create --location fsn1 --image debian-11 --name -nat-gateway --placement-group -gw --ssh-key @ --type cx11 --user-data-from-file ./nat-gateway/cloud-config.yaml +hcloud server attach-to-network -n --ip 10.0.255.254 -nat-gateway + +# create dns records +curl --request POST --url https://api.cloudflare.com/client/v4/zones//dns_records --header 'Content-Type: application/json' --header 'Authorization: Bearer ' --data '{"content": "", "name": "", "proxied": false, "type": "A", "comment": "Kubernetes API", "tags": [], "ttl": 1}' +``` + +### Get cluster access + +To get cluster access you can run the following commands: + +```sh +# get kubeconfig +kubectl get secret -n -kubeconfig -o jsonpath='{.data.value}' | base64 -d > ~/.kube/.kubeconfig +chmod 600 ~/.kube/.kubeconfig + +# set currently used kubeconfig +export KUBECONFIG=~/.kube/.kubeconfig +``` + +### Deploy CNI and CCM + +To finish the cluster setup you need to deploy the CNI (container network interface) and the CCM (cloud controller manager). You can do this by running the following commands: + +```sh +# cilium (cni) +helm repo add cilium https://helm.cilium.io/ +helm upgrade --install cilium cilium/cilium --namespace cilium-system --create-namespace -f deployments/addons/cilium-values.yaml # remember to replace the placeholders + +# ccm +kubectl create ns hcloud-system +kubectl apply -f deployments/addons/ccm-secret.yaml # remember to replace the placeholders +helm repo add hcloud https://charts.hetzner.cloud +helm upgrade --install ccm hcloud/hcloud-cloud-controller-manager -n hcloud-system -f deployments/addons/ccm-values.yaml +``` + +### Wait for Cluster to be ready + +After deploying the csi and ccm you have to wait for all nodes to come up. You can watch the process with `watch kubectl get nodes,pods -A`. + +## Step 5: Deploy Cluster Addons + +In this step you will deploy the addons to the cluster. You can do this by running the following commands. + +This will install: +* hcloud csi (container storage interface) +* metrics server +* nginx ingress +* cert-manager +* postgresql cluster +* redis cluster +* monitoring +* logging + +```sh +# csi (container storage interface) +kubectl apply -f deployments/addons/csi-secret.yaml +kubectl apply -f deployments/addons/csi-2.7.0.yaml + +# metrics server +helm repo add metrics-server https://kubernetes-sigs.github.io/metrics-server/ +helm upgrade --install metrics-server metrics-server/metrics-server --namespace kube-system + +# ingress +helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx +helm upgrade --install ingress-nginx ingress-nginx/ingress-nginx --namespace ingress-nginx --create-namespace -f deployments/addons/ingress-nginx-values.yaml + +# create dns records for ingress +curl --request POST --url https://api.cloudflare.com/client/v4/zones//dns_records --header 'Content-Type: application/json' --header 'Authorization: Bearer ' --data '{"content": "$SERVER_IP_INGRESS_LOADBALANCER", "name": "", "proxied": false, "type": "A", "comment": "Kubernetes Cluster Ingress", "tags": [], "ttl": 1}' +curl --request POST --url https://api.cloudflare.com/client/v4/zones//dns_records --header 'Content-Type: application/json' --header 'Authorization: Bearer ' --data '{"content": "", "name": "*..", "proxied": false, "type": "CNAME", "comment": "Kubernetes Cluster Ingress", "tags": [], "ttl": 1}' + +# cert-manager +helm repo add jetstack https://charts.jetstack.io +helm upgrade --install cert-manager jetstack/cert-manager --namespace cert-manager-system --create-namespace -f deployments/addons/cert-manager-values.yaml +kubectl apply -f deployments/addons/cert-manager-issuer.yaml + +# postgresql operator +helm repo add cnpg https://cloudnative-pg.github.io/charts +helm upgrade --install cnpg cnpg/cloudnative-pg --namespace postgresql-system --create-namespace + +# redis operator +helm repo add ot-helm https://ot-container-kit.github.io/helm-charts/ +helm upgrade --install redis-operator ot-helm/redis-operator --namespace redis-system --create-namespace +``` + + + + + + + + + + + +## Step 6: Deploy Portals + +### Deploy Portals + +In this step you will deploy the portals application, a PostgreSQL database and a redis cluster. You can do this by running the following commands. + +Remember to replace the placeholders in the values files with your values. + +```sh +# create namespace +kubectl create namespace portals + +# postgresql cluster +kubectl apply -f deployments/portals/pgsql.yaml + +# redis cluster +kubectl apply -f deployments/portals/redis-pw-secret.yaml +helm repo add ot-helm https://ot-container-kit.github.io/helm-charts/ +helm upgrade --install portals-redis ot-helm/redis-cluster --namespace portals -f deployments/portals/redis-values.yaml + +# portals +helm repo add portals https://fsr5-fhaachen.github.io/portals/ +helm upgrade --install portals portals/portals --namespace portals -f deployments/portals/portals-values.yaml +``` + +### Setup Portals + +To fully setup portals you need to seed the database. You can do it by executing the following command in one portals pod. You can exec into the pod with `kubectl exec -it -n portals -- sh`. + +After switching into the pod, execute the following command to seed the db: + +```sh +php artisan migrate:fresh --seed +``` + +### Setup DNS + +To setup the dns records for portals you need to create a dns record for the ingress. You can do this by running the following command: + +```sh +# wildcard record for ingress +curl --request POST --url https://api.cloudflare.com/client/v4/zones//dns_records --header 'Content-Type: application/json' --header 'Authorization: Bearer ' --data '{"content": "", "name": "*.", "proxied": false, "type": "A", "comment": "Kubernetes Ingress", "tags": [], "ttl": 1}' + +# record for portals (only if not inside ingress wildcard) +curl --request POST --url https://api.cloudflare.com/client/v4/zones//dns_records --header 'Content-Type: application/json' --header 'Authorization: Bearer ' --data '{"content": "", "name": "", "proxied": false, "type": "CNAME", "comment": "Kubernetes Ingress Portals", "tags": [], "ttl": 1}' +``` + +Ready, you can connect to portals on your configured url. diff --git a/deploy/cluster/cluster.yaml b/deploy/cluster/cluster.yaml new file mode 100644 index 00000000..e3a23628 --- /dev/null +++ b/deploy/cluster/cluster.yaml @@ -0,0 +1,25 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: + namespace: +spec: + controlPlaneEndpoint: + host: + port: 6443 + clusterNetwork: + pods: + cidrBlocks: + - 10.10.0.0/16 + services: + cidrBlocks: + - 10.20.0.0/16 + serviceDomain: cluster.local + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: -ctl + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: HetznerCluster + name: diff --git a/deploy/cluster/hetznercluster.yaml b/deploy/cluster/hetznercluster.yaml new file mode 100644 index 00000000..eea0c567 --- /dev/null +++ b/deploy/cluster/hetznercluster.yaml @@ -0,0 +1,37 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: HetznerCluster +metadata: + name: + namespace: +spec: + controlPlaneEndpoint: + host: + port: 6443 + controlPlaneLoadBalancer: + enabled: true + region: nbg1 + type: lb11 + name: + controlPlaneRegions: + - fsn1 + - nbg1 + - hel1 + hcloudNetwork: + enabled: true + networkZone: eu-central + cidrBlock: 10.0.0.0/8 + subnetCidrBlock: 10.0.0.0/16 + hcloudPlacementGroups: + - name: ctl + type: spread + - name: md-0 + type: spread + - name: gw + type: spread + hetznerSecretRef: + name: caph-hcloud-apitoken- + key: + hcloudToken: token + sshKeys: + hcloud: + - name: @ diff --git a/deploy/cluster/kubeadm-cfg-tmpl.yaml b/deploy/cluster/kubeadm-cfg-tmpl.yaml new file mode 100644 index 00000000..4af9430e --- /dev/null +++ b/deploy/cluster/kubeadm-cfg-tmpl.yaml @@ -0,0 +1,119 @@ +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: -md-0 + namespace: +spec: + template: + spec: + files: + - content: | + net.ipv4.conf.lxc*.rp_filter = 0 + owner: root:root + path: /etc/sysctl.d/99-cilium.conf + permissions: "0744" + - content: | + overlay + br_netfilter + owner: root:root + path: /etc/modules-load.d/crio.conf + permissions: "0744" + - content: | + version = 2 + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.crun] + runtime_type = "io.containerd.runc.v2" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.crun.options] + BinaryName = "crun" + Root = "/usr/local/sbin" + SystemdCgroup = true + [plugins."io.containerd.grpc.v1.cri".containerd] + default_runtime_name = "crun" + [plugins."io.containerd.runtime.v1.linux"] + runtime = "crun" + runtime_root = "/usr/local/sbin" + owner: root:root + path: /etc/containerd/config.toml + permissions: "0744" + - content: | + net.bridge.bridge-nf-call-iptables = 1 + net.bridge.bridge-nf-call-ip6tables = 1 + net.ipv4.ip_forward = 1 + owner: root:root + path: /etc/sysctl.d/99-kubernetes-cri.conf + permissions: "0744" + - content: | + vm.overcommit_memory=1 + kernel.panic=10 + kernel.panic_on_oops=1 + owner: root:root + path: /etc/sysctl.d/99-kubelet.conf + permissions: "0744" + - content: | + nameserver 1.1.1.1 + nameserver 1.0.0.1 + nameserver 2606:4700:4700::1111 + owner: root:root + path: /etc/kubernetes/resolv.conf + permissions: "0744" + joinConfiguration: + nodeRegistration: + criSocket: unix:///run/containerd/containerd.sock + kubeletExtraArgs: + kubeconfig: /etc/kubernetes/kubelet.kubeconfig + anonymous-auth: "false" + rotate-server-certificates: "true" + authentication-token-webhook: "true" + authorization-mode: Webhook + event-qps: "5" + max-pods: "120" + read-only-port: "0" + cloud-provider: external + tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + preKubeadmCommands: + # setup gw + - echo "nameserver 1.1.1.1" >> /etc/resolvconf/resolv.conf.d/head + - echo "nameserver 1.0.0.1" >> /etc/resolvconf/resolv.conf.d/head + - resolvconf --enable-updates + - resolvconf -u + - ip route add default via 10.0.0.1 + - echo "10.0.0.2 " >> /etc/hosts + - while ! ping -c 1 google.de > /dev/null 2>&1; do sleep 5; done + # init node + - set -x + - export CRUN=1.8.5 + - export CONTAINERD=1.7.2 + - export KUBERNETES_VERSION= + - ARCH=amd64 + - if [ "$(uname -m)" = "aarch64" ]; then ARCH=arm64; fi + - localectl set-locale LANG=en_US.UTF-8 + - localectl set-locale LANGUAGE=en_US.UTF-8 + - apt-get update -y + # own + - apt install apparmor apparmor-utils -y + # init node + - apt-get -y install at jq unzip wget socat mtr logrotate apt-transport-https + - sed -i '/swap/d' /etc/fstab + - swapoff -a + - modprobe overlay && modprobe br_netfilter && sysctl --system + - wget https://github.com/containerd/containerd/releases/download/v$CONTAINERD/cri-containerd-cni-$CONTAINERD-linux-$ARCH.tar.gz + - wget https://github.com/containerd/containerd/releases/download/v$CONTAINERD/cri-containerd-cni-$CONTAINERD-linux-$ARCH.tar.gz.sha256sum + - sha256sum --check cri-containerd-cni-$CONTAINERD-linux-$ARCH.tar.gz.sha256sum + - tar --no-overwrite-dir -C / -xzf cri-containerd-cni-$CONTAINERD-linux-$ARCH.tar.gz + - rm -f cri-containerd-cni-$CONTAINERD-linux-$ARCH.tar.gz cri-containerd-cni-$CONTAINERD-linux-$ARCH.tar.gz.sha256sum + - wget https://github.com/containers/crun/releases/download/$CRUN/crun-$CRUN-linux-$ARCH -O /usr/local/sbin/crun && chmod +x /usr/local/sbin/crun + - rm -f /etc/cni/net.d/10-containerd-net.conflist + - chmod -R 644 /etc/cni && chown -R root:root /etc/cni + - systemctl daemon-reload && systemctl enable containerd && systemctl start containerd + - curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - + - echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee -a /etc/apt/sources.list.d/kubernetes.list + - apt-get update + - apt-get install -y kubelet=$KUBERNETES_VERSION-00 kubeadm=$KUBERNETES_VERSION-00 kubectl=$KUBERNETES_VERSION-00 bash-completion && apt-mark hold kubelet kubectl kubeadm && systemctl enable kubelet + - kubeadm config images pull --kubernetes-version $KUBERNETES_VERSION + - echo 'source <(kubectl completion bash)' >>~/.bashrc + - echo 'export KUBECONFIG=/etc/kubernetes/admin.conf' >>~/.bashrc + - apt-get -y autoremove && apt-get -y clean all + postKubeadmCommands: [] diff --git a/deploy/cluster/kubeadm-ctl.yaml b/deploy/cluster/kubeadm-ctl.yaml new file mode 100644 index 00000000..e1f5c110 --- /dev/null +++ b/deploy/cluster/kubeadm-ctl.yaml @@ -0,0 +1,216 @@ +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: -ctl + namespace: +spec: + kubeadmConfigSpec: + clusterConfiguration: + clusterName: + controlPlaneEndpoint: :6443 + apiServer: + certSANs: + - + extraArgs: + cloud-provider: external + authorization-mode: Node,RBAC + client-ca-file: /etc/kubernetes/pki/ca.crt + default-not-ready-toleration-seconds: "45" + default-unreachable-toleration-seconds: "45" + enable-aggregator-routing: "true" + enable-bootstrap-token-auth: "true" + etcd-cafile: /etc/kubernetes/pki/etcd/ca.crt + etcd-certfile: /etc/kubernetes/pki/etcd/server.crt + etcd-keyfile: /etc/kubernetes/pki/etcd/server.key + kubelet-client-certificate: /etc/kubernetes/pki/apiserver-kubelet-client.crt + kubelet-client-key: /etc/kubernetes/pki/apiserver-kubelet-client.key + kubelet-preferred-address-types: InternalIP,Hostname,InternalDNS,ExternalDNS,ExternalIP + profiling: "false" + proxy-client-cert-file: /etc/kubernetes/pki/front-proxy-client.crt + proxy-client-key-file: /etc/kubernetes/pki/front-proxy-client.key + requestheader-allowed-names: front-proxy-client + requestheader-client-ca-file: /etc/kubernetes/pki/front-proxy-ca.crt + requestheader-extra-headers-prefix: X-Remote-Extra- + requestheader-group-headers: X-Remote-Group + requestheader-username-headers: X-Remote-User + service-account-key-file: /etc/kubernetes/pki/sa.pub + service-account-lookup: "true" + tls-cert-file: /etc/kubernetes/pki/apiserver.crt + tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + tls-private-key-file: /etc/kubernetes/pki/apiserver.key + enable-admission-plugins: DenyServiceExternalIPs + controllerManager: + extraArgs: + allocate-node-cidrs: "true" + authentication-kubeconfig: /etc/kubernetes/controller-manager.conf + authorization-kubeconfig: /etc/kubernetes/controller-manager.conf + bind-address: 0.0.0.0 + cloud-provider: external + cluster-signing-cert-file: /etc/kubernetes/pki/ca.crt + cluster-signing-duration: 6h0m0s + cluster-signing-key-file: /etc/kubernetes/pki/ca.key + kubeconfig: /etc/kubernetes/controller-manager.conf + profiling: "false" + requestheader-client-ca-file: /etc/kubernetes/pki/front-proxy-ca.crt + root-ca-file: /etc/kubernetes/pki/ca.crt + secure-port: "10257" + service-account-private-key-file: /etc/kubernetes/pki/sa.key + terminated-pod-gc-threshold: "10" + use-service-account-credentials: "true" + etcd: + local: + dataDir: /var/lib/etcd + extraArgs: + auto-tls: "false" + cert-file: /etc/kubernetes/pki/etcd/server.crt + cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + client-cert-auth: "true" + key-file: /etc/kubernetes/pki/etcd/server.key + peer-auto-tls: "false" + peer-client-cert-auth: "true" + trusted-ca-file: /etc/kubernetes/pki/etcd/ca.crt + listen-metrics-urls: http://0.0.0.0:2381 + scheduler: + extraArgs: + bind-address: 0.0.0.0 + kubeconfig: /etc/kubernetes/scheduler.conf + profiling: "false" + secure-port: "10259" + initConfiguration: + skipPhases: + - addon/kube-proxy + nodeRegistration: + criSocket: unix:///run/containerd/containerd.sock + kubeletExtraArgs: + anonymous-auth: "false" + authentication-token-webhook: "true" + authorization-mode: Webhook + cloud-provider: external + event-qps: "5" + kubeconfig: /etc/kubernetes/kubelet.conf + max-pods: "120" + read-only-port: "0" + rotate-server-certificates: "true" + tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + joinConfiguration: + nodeRegistration: + criSocket: unix:///run/containerd/containerd.sock + kubeletExtraArgs: + anonymous-auth: "false" + authentication-token-webhook: "true" + authorization-mode: Webhook + cloud-provider: external + event-qps: "5" + kubeconfig: /etc/kubernetes/kubelet.conf + max-pods: "120" + read-only-port: "0" + rotate-server-certificates: "true" + tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + files: + - content: | + net.ipv4.conf.lxc*.rp_filter = 0 + owner: root:root + path: /etc/sysctl.d/99-cilium.conf + permissions: "0744" + - content: | + overlay + br_netfilter + owner: root:root + path: /etc/modules-load.d/crio.conf + permissions: "0744" + - content: | + version = 2 + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.crun] + runtime_type = "io.containerd.runc.v2" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.crun.options] + BinaryName = "crun" + Root = "/usr/local/sbin" + SystemdCgroup = true + [plugins."io.containerd.grpc.v1.cri".containerd] + default_runtime_name = "crun" + [plugins."io.containerd.runtime.v1.linux"] + runtime = "crun" + runtime_root = "/usr/local/sbin" + owner: root:root + path: /etc/containerd/config.toml + permissions: "0744" + - content: | + net.bridge.bridge-nf-call-iptables = 1 + net.bridge.bridge-nf-call-ip6tables = 1 + net.ipv4.ip_forward = 1 + owner: root:root + path: /etc/sysctl.d/99-kubernetes-cri.conf + permissions: "0744" + - content: | + vm.overcommit_memory=1 + kernel.panic=10 + kernel.panic_on_oops=1 + owner: root:root + path: /etc/sysctl.d/99-kubelet.conf + permissions: "0744" + - content: | + nameserver 1.1.1.1 + nameserver 1.0.0.1 + nameserver 2606:4700:4700::1111 + owner: root:root + path: /etc/kubernetes/resolv.conf + permissions: "0744" + preKubeadmCommands: + # setup gw + - echo "nameserver 1.1.1.1" >> /etc/resolvconf/resolv.conf.d/head + - echo "nameserver 1.0.0.1" >> /etc/resolvconf/resolv.conf.d/head + - resolvconf --enable-updates + - resolvconf -u + - ip route add default via 10.0.0.1 + - echo "10.0.0.2 " >> /etc/hosts + - while ! ping -c 1 google.de > /dev/null 2>&1; do sleep 5; done + # init node + - set -x + - export CRUN=1.8.5 + - export CONTAINERD=1.7.2 + - export KUBERNETES_VERSION= + - localectl set-locale LANG=en_US.UTF-8 + - localectl set-locale LANGUAGE=en_US.UTF-8 + - apt-get update -y + # own + - apt install apparmor apparmor-utils -y + # init node + - apt-get -y install at jq unzip wget socat mtr logrotate apt-transport-https + - sed -i '/swap/d' /etc/fstab + - swapoff -a + - modprobe overlay && modprobe br_netfilter && sysctl --system + - wget https://github.com/containerd/containerd/releases/download/v$CONTAINERD/cri-containerd-cni-$CONTAINERD-linux-amd64.tar.gz + - wget https://github.com/containerd/containerd/releases/download/v$CONTAINERD/cri-containerd-cni-$CONTAINERD-linux-amd64.tar.gz.sha256sum + - sha256sum --check cri-containerd-cni-$CONTAINERD-linux-amd64.tar.gz.sha256sum + - tar --no-overwrite-dir -C / -xzf cri-containerd-cni-$CONTAINERD-linux-amd64.tar.gz + - rm -f cri-containerd-cni-$CONTAINERD-linux-amd64.tar.gz cri-containerd-cni-$CONTAINERD-linux-amd64.tar.gz.sha256sum + - wget https://github.com/containers/crun/releases/download/$CRUN/crun-$CRUN-linux-amd64 -O /usr/local/sbin/crun && chmod +x /usr/local/sbin/crun + - rm -f /etc/cni/net.d/10-containerd-net.conflist + - chmod -R 644 /etc/cni && chown -R root:root /etc/cni + - systemctl daemon-reload && systemctl enable containerd && systemctl start containerd + - curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - + - echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee -a /etc/apt/sources.list.d/kubernetes.list + - apt-get update + - apt-get install -y kubelet=$KUBERNETES_VERSION-00 kubeadm=$KUBERNETES_VERSION-00 kubectl=$KUBERNETES_VERSION-00 bash-completion && apt-mark hold kubelet kubectl kubeadm && systemctl enable kubelet + - kubeadm config images pull --kubernetes-version $KUBERNETES_VERSION + - echo 'source <(kubectl completion bash)' >>~/.bashrc + - echo 'export KUBECONFIG=/etc/kubernetes/admin.conf' >>~/.bashrc + - apt-get -y autoremove && apt-get -y clean all + postKubeadmCommands: [] + rolloutBefore: + certificatesExpiryDays: 30 + rolloutStrategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 1 + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: HCloudMachineTemplate + name: -ctl + replicas: 3 + version: diff --git a/deploy/cluster/machine-tmpl.yaml b/deploy/cluster/machine-tmpl.yaml new file mode 100644 index 00000000..4aefd5a4 --- /dev/null +++ b/deploy/cluster/machine-tmpl.yaml @@ -0,0 +1,29 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: HCloudMachineTemplate +metadata: + name: -ctl + namespace: +spec: + template: + spec: + type: cx21 + imageName: debian-12 + placementGroupName: ctl + publicNetwork: + enableIPv4: false + enableIPv6: false +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: HCloudMachineTemplate +metadata: + name: -md-0 + namespace: +spec: + template: + spec: + type: cx21 + imageName: debian-12 + placementGroupName: md-0 + publicNetwork: + enableIPv4: false + enableIPv6: false diff --git a/deploy/cluster/md.yaml b/deploy/cluster/md.yaml new file mode 100644 index 00000000..a4128d19 --- /dev/null +++ b/deploy/cluster/md.yaml @@ -0,0 +1,74 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + name: -md-0-fsn1 + namespace: +spec: + clusterName: + replicas: 1 + selector: + matchLabels: null + template: + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: -md-0 + clusterName: + failureDomain: fsn1 + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: HCloudMachineTemplate + name: -md-0 + version: +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + name: -md-0-nbg1 + namespace: +spec: + clusterName: + replicas: 1 + selector: + matchLabels: null + template: + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: -md-0 + clusterName: + failureDomain: nbg1 + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: HCloudMachineTemplate + name: -md-0 + version: +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + name: -md-0-hel1 + namespace: +spec: + clusterName: + replicas: 1 + selector: + matchLabels: null + template: + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: -md-0 + clusterName: + failureDomain: hel1 + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: HCloudMachineTemplate + name: -md-0 + version: diff --git a/deploy/cluster/secret.yaml b/deploy/cluster/secret.yaml new file mode 100644 index 00000000..e6f003ef --- /dev/null +++ b/deploy/cluster/secret.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Secret +metadata: + name: caph-hcloud-apitoken- + namespace: + labels: + clusterctl.cluster.x-k8s.io/move: '' +data: + token: diff --git a/deploy/deployments/addons/ccm-secret.yaml b/deploy/deployments/addons/ccm-secret.yaml new file mode 100644 index 00000000..fa757e6f --- /dev/null +++ b/deploy/deployments/addons/ccm-secret.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Secret +metadata: + name: ccm + namespace: hcloud-system +data: + token: + network: # you can get the id from the url in the web interface diff --git a/deploy/deployments/addons/ccm-values.yaml b/deploy/deployments/addons/ccm-values.yaml new file mode 100644 index 00000000..1d8fa253 --- /dev/null +++ b/deploy/deployments/addons/ccm-values.yaml @@ -0,0 +1,24 @@ +env: + HCLOUD_TOKEN: + valueFrom: + secretKeyRef: + name: ccm + key: token + NODE_NAME: + valueFrom: + fieldRef: + fieldPath: spec.nodeName + +# wait for new hcloud ccm release before uncommenting +#replicaCount: 3 +args: + leader-elect: "true" + +networking: + enabled: true + clusterCIDR: 10.10.0.0/16 + network: + valueFrom: + secretKeyRef: + name: ccm + key: network diff --git a/deploy/deployments/addons/cert-manager-issuer.yaml b/deploy/deployments/addons/cert-manager-issuer.yaml new file mode 100644 index 00000000..02bbeba7 --- /dev/null +++ b/deploy/deployments/addons/cert-manager-issuer.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Secret +metadata: + name: cert-manager-cloudflare-api-token + namespace: cert-manager-system +type: Opaque +data: + token: +--- +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: letsencrypt-prod + namespace: cert-manager-system +spec: + acme: + server: https://acme-v02.api.letsencrypt.org/directory + email: + privateKeySecretRef: + name: cert-manager-issuer-private-key-letsencrypt-prod + solvers: + - dns01: + cloudflare: + apiTokenSecretRef: + name: cert-manager-cloudflare-api-token + key: token diff --git a/deploy/deployments/addons/cert-manager-values.yaml b/deploy/deployments/addons/cert-manager-values.yaml new file mode 100644 index 00000000..1b4551cc --- /dev/null +++ b/deploy/deployments/addons/cert-manager-values.yaml @@ -0,0 +1 @@ +installCRDs: true diff --git a/deploy/deployments/addons/cilium-values.yaml b/deploy/deployments/addons/cilium-values.yaml new file mode 100644 index 00000000..ebac56cf --- /dev/null +++ b/deploy/deployments/addons/cilium-values.yaml @@ -0,0 +1,43 @@ +rollOutCiliumPods: true +priorityClassName: "system-node-critical" + +hubble: + metrics: + enabled: + - dns:query;ignoreAAAA + - drop + - tcp + - flow + - icmp + - http + relay: + enabled: true + rollOutPods: true + ui: + enabled: true + rollOutPods: true +ipam: + mode: "kubernetes" + +kubeProxyReplacement: strict +k8sServiceHost: +k8sServicePort: 6443 + +hostServices: + enabled: false + +externalIPs: + enabled: true + +nodePort: + enabled: true + +hostPort: + enabled: true + +image: + pullPolicy: IfNotPresent + +operator: + rollOutPods: true + priorityClassName: "system-node-critical" diff --git a/deploy/deployments/addons/csi-2.7.0.yaml b/deploy/deployments/addons/csi-2.7.0.yaml new file mode 100644 index 00000000..e38714d6 --- /dev/null +++ b/deploy/deployments/addons/csi-2.7.0.yaml @@ -0,0 +1,393 @@ +allowVolumeExpansion: true +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + annotations: + storageclass.kubernetes.io/is-default-class: "true" + name: hcloud-volumes +provisioner: csi.hetzner.cloud +volumeBindingMode: WaitForFirstConsumer +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: hcloud-csi-controller + namespace: hcloud-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: hcloud-csi-controller +rules: +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - update + - patch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - csi.storage.k8s.io + resources: + - csinodeinfos + verbs: + - get + - list + - watch +- apiGroups: + - storage.k8s.io + resources: + - csinodes + verbs: + - get + - list + - watch +- apiGroups: + - storage.k8s.io + resources: + - volumeattachments + verbs: + - get + - list + - watch + - update + - patch +- apiGroups: + - storage.k8s.io + resources: + - volumeattachments/status + verbs: + - patch +- apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - create + - delete + - patch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + - persistentvolumeclaims/status + verbs: + - get + - list + - watch + - update + - patch +- apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - list + - watch + - create + - update + - patch +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - get + - list +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotcontents + verbs: + - get + - list +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + - create + - update + - patch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: hcloud-csi-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: hcloud-csi-controller +subjects: +- kind: ServiceAccount + name: hcloud-csi-controller + namespace: hcloud-system +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: hcloud-csi-controller + name: hcloud-csi-controller-metrics + namespace: hcloud-system +spec: + ports: + - name: metrics + port: 9189 + targetPort: metrics + selector: + app: hcloud-csi-controller +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: hcloud-csi + name: hcloud-csi-node-metrics + namespace: hcloud-system +spec: + ports: + - name: metrics + port: 9189 + targetPort: metrics + selector: + app: hcloud-csi +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: hcloud-csi-controller + namespace: hcloud-system +spec: + replicas: 1 + selector: + matchLabels: + app: hcloud-csi-controller + template: + metadata: + labels: + app: hcloud-csi-controller + spec: + containers: + - args: + - --default-fstype=ext4 + image: registry.k8s.io/sig-storage/csi-attacher:v4.1.0 + name: csi-attacher + volumeMounts: + - mountPath: /run/csi + name: socket-dir + - image: registry.k8s.io/sig-storage/csi-resizer:v1.7.0 + name: csi-resizer + volumeMounts: + - mountPath: /run/csi + name: socket-dir + - args: + - --feature-gates=Topology=true + - --default-fstype=ext4 + image: registry.k8s.io/sig-storage/csi-provisioner:v3.4.0 + name: csi-provisioner + volumeMounts: + - mountPath: /run/csi + name: socket-dir + - command: + - /bin/hcloud-csi-driver-controller + env: + - name: CSI_ENDPOINT + value: unix:///run/csi/socket + - name: METRICS_ENDPOINT + value: 0.0.0.0:9189 + - name: ENABLE_METRICS + value: "true" + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: HCLOUD_TOKEN + valueFrom: + secretKeyRef: + key: token + name: csi + image: hetznercloud/hcloud-csi-driver:latest + imagePullPolicy: Always + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + periodSeconds: 2 + timeoutSeconds: 3 + name: hcloud-csi-driver + ports: + - containerPort: 9189 + name: metrics + - containerPort: 9808 + name: healthz + protocol: TCP + volumeMounts: + - mountPath: /run/csi + name: socket-dir + - image: registry.k8s.io/sig-storage/livenessprobe:v2.9.0 + imagePullPolicy: Always + name: liveness-probe + volumeMounts: + - mountPath: /run/csi + name: socket-dir + serviceAccountName: hcloud-csi-controller + volumes: + - emptyDir: {} + name: socket-dir +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + app: hcloud-csi + name: hcloud-csi-node + namespace: hcloud-system +spec: + selector: + matchLabels: + app: hcloud-csi + template: + metadata: + labels: + app: hcloud-csi + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: instance.hetzner.cloud/is-root-server + operator: NotIn + values: + - "true" + containers: + - args: + - --kubelet-registration-path=/var/lib/kubelet/plugins/csi.hetzner.cloud/socket + image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.7.0 + name: csi-node-driver-registrar + volumeMounts: + - mountPath: /run/csi + name: plugin-dir + - mountPath: /registration + name: registration-dir + - command: + - /bin/hcloud-csi-driver-node + env: + - name: CSI_ENDPOINT + value: unix:///run/csi/socket + - name: METRICS_ENDPOINT + value: 0.0.0.0:9189 + - name: ENABLE_METRICS + value: "true" + image: hetznercloud/hcloud-csi-driver:latest + imagePullPolicy: Always + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + periodSeconds: 2 + timeoutSeconds: 3 + name: hcloud-csi-driver + ports: + - containerPort: 9189 + name: metrics + - containerPort: 9808 + name: healthz + protocol: TCP + securityContext: + privileged: true + volumeMounts: + - mountPath: /var/lib/kubelet + mountPropagation: Bidirectional + name: kubelet-dir + - mountPath: /run/csi + name: plugin-dir + - mountPath: /dev + name: device-dir + - image: registry.k8s.io/sig-storage/livenessprobe:v2.9.0 + imagePullPolicy: Always + name: liveness-probe + volumeMounts: + - mountPath: /run/csi + name: plugin-dir + tolerations: + - effect: NoExecute + operator: Exists + - effect: NoSchedule + operator: Exists + - key: CriticalAddonsOnly + operator: Exists + volumes: + - hostPath: + path: /var/lib/kubelet + type: Directory + name: kubelet-dir + - hostPath: + path: /var/lib/kubelet/plugins/csi.hetzner.cloud/ + type: DirectoryOrCreate + name: plugin-dir + - hostPath: + path: /var/lib/kubelet/plugins_registry/ + type: Directory + name: registration-dir + - hostPath: + path: /dev + type: Directory + name: device-dir +--- +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: csi.hetzner.cloud +spec: + attachRequired: true + fsGroupPolicy: File + podInfoOnMount: true + volumeLifecycleModes: + - Persistent diff --git a/deploy/deployments/addons/csi-secret.yaml b/deploy/deployments/addons/csi-secret.yaml new file mode 100644 index 00000000..d082b645 --- /dev/null +++ b/deploy/deployments/addons/csi-secret.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Secret +metadata: + name: csi + namespace: hcloud-system +data: + token: diff --git a/deploy/deployments/addons/ingress-nginx-values.yaml b/deploy/deployments/addons/ingress-nginx-values.yaml new file mode 100644 index 00000000..45c6e6ea --- /dev/null +++ b/deploy/deployments/addons/ingress-nginx-values.yaml @@ -0,0 +1,10 @@ +controller: + config: + use-proxy-protocol: true + replicaCount: 2 + service: + annotations: + load-balancer.hetzner.cloud/location: fsn1 + load-balancer.hetzner.cloud/use-private-ip: true + load-balancer.hetzner.cloud/name: + load-balancer.hetzner.cloud/uses-proxyprotocol: true diff --git a/deploy/deployments/portals/pgsql.yaml b/deploy/deployments/portals/pgsql.yaml new file mode 100644 index 00000000..15e1774c --- /dev/null +++ b/deploy/deployments/portals/pgsql.yaml @@ -0,0 +1,10 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: portals-db + namespace: portals +spec: + instances: 3 + primaryUpdateStrategy: unsupervised + storage: + size: 10Gi diff --git a/deploy/deployments/portals/portals-values.yaml b/deploy/deployments/portals/portals-values.yaml new file mode 100644 index 00000000..3b616c42 --- /dev/null +++ b/deploy/deployments/portals/portals-values.yaml @@ -0,0 +1,14 @@ +environment: + APP_KEY: + APP_URL: https://portals.fsr5.de + APP_DEBUG: "false" + TUTOR_PASSWORD: password + ADMIN_PASSWORD: admin + DB_HOST: portals-db-rw.portals.svc + DB_PASSWORD: + REDIS_HOST: portals-redis.portals.svc + REDIS_PASSWORD: password +ingress: + hosts: + - portals.fsr5.de +replicaCount: 3 diff --git a/deploy/deployments/portals/redis-pw-secret.yaml b/deploy/deployments/portals/redis-pw-secret.yaml new file mode 100644 index 00000000..c37c8ba3 --- /dev/null +++ b/deploy/deployments/portals/redis-pw-secret.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Secret +metadata: + name: redis-password + namespace: portals +data: + password: cGFzc3dvcmQ= # you can change the password here (put in base64 encoded). default is "password" diff --git a/deploy/deployments/portals/redis-values.yaml b/deploy/deployments/portals/redis-values.yaml new file mode 100644 index 00000000..5e9ba4e9 --- /dev/null +++ b/deploy/deployments/portals/redis-values.yaml @@ -0,0 +1,5 @@ +redisCluster: + persistenceEnabled: false + redisSecret: + secretName: redis-password + secretKey: password diff --git a/deploy/nat-gateway/cloud-config.yaml b/deploy/nat-gateway/cloud-config.yaml new file mode 100644 index 00000000..749d6826 --- /dev/null +++ b/deploy/nat-gateway/cloud-config.yaml @@ -0,0 +1,24 @@ +#cloud-config +write_files: + - content: | + table ip nat { + chain prerouting { + type nat hook prerouting priority dstnat; policy accept; + } + chain postrouting { + type nat hook postrouting priority srcnat; policy accept; + oifname "eth0" masquerade + } + } + owner: root:root + path: /etc/nftables/nat.conf + permissions: '0644' + +package_update: true +package_upgrade: true + +runcmd: + - echo 1 > /proc/sys/net/ipv4/ip_forward + - sed -i -e 's/#net.ipv4.ip_forward=1/net.ipv4.ip_forward=1/' /etc/sysctl.conf + - echo 'include "/etc/nftables/*"' >> /etc/nftables.conf + - nft -f /etc/nftables.conf diff --git a/scaleup/.env.example b/scaleup/.env.example deleted file mode 100644 index 5365401f..00000000 --- a/scaleup/.env.example +++ /dev/null @@ -1,13 +0,0 @@ -# HETZNER LOGIN DATA -HETZNER_CLOUD_TOKEN= - -# CLOUDFLARE LOGIN DATA -CLOUDFLARE_ZONEID= -CLOUDFLARE_EMAIL= -CLOUDFLARE_TOKEN= - -# HETZNER CLOUD PROJECT ENVIRONMENT -IMAGE_ID= -NETWORK_ID= -PLACEMENTGROUP_ID= -SSH_KEY_NAMES= \ No newline at end of file diff --git a/scaleup/README.md b/scaleup/README.md deleted file mode 100644 index 04602ee9..00000000 --- a/scaleup/README.md +++ /dev/null @@ -1,19 +0,0 @@ -# scaleup - -## prerequisites -Needs `hcloud` and `python-dotenv` installed via `pip`. -Copy the `.env.example` to `.env` and fill in all needed variables. - -## run -Run `portals_scaleup.py` with python3. - - - -# update hosts - -## prerequisites -Add all hosts to `hosts.ini`. -Nodes added by the scaleup process are added automaticly. - -## run -Run the ansible playbook with `ansible-playbook --user root -i ./hosts.ini ./playbooks/update_hosts.yml --extra-vars "branch=main"` \ No newline at end of file diff --git a/scaleup/hosts.ini b/scaleup/hosts.ini deleted file mode 100644 index a7acbec8..00000000 --- a/scaleup/hosts.ini +++ /dev/null @@ -1,2 +0,0 @@ -[webhosts] -portals-1.fsr5.de diff --git a/scaleup/playbooks/update_hosts.yml b/scaleup/playbooks/update_hosts.yml deleted file mode 100644 index 506cae57..00000000 --- a/scaleup/playbooks/update_hosts.yml +++ /dev/null @@ -1,45 +0,0 @@ -- name: update hosts - hosts: "*" - tasks: - - name: maintenance laravel on - shell: php artisan down - args: - chdir: /var/www - - name: reset git - shell: git reset --hard - args: - chdir: /var/www - - name: checkout git - shell: "git checkout {{ branch }}" - args: - chdir: /var/www - - name: pull git - shell: "git pull origin {{ branch }}" - args: - chdir: /var/www - - name: install dependencies - shell: npm install - args: - chdir: /var/www - - name: build frontend - shell: npm run build - args: - chdir: /var/www -- name: setup db - hosts: "portals-1.fsr5.de" - tasks: - - name: migrate db - shell: php artisan migrate --force - args: - chdir: /var/www - - name: seed db - shell: php artisan db:seed --force - args: - chdir: /var/www -- name: activate application - hosts: "*" - tasks: - - name: maintenance laravel off - shell: php artisan up - args: - chdir: /var/www \ No newline at end of file diff --git a/scaleup/portals_scaleup.py b/scaleup/portals_scaleup.py deleted file mode 100644 index a80d7979..00000000 --- a/scaleup/portals_scaleup.py +++ /dev/null @@ -1,74 +0,0 @@ -from operator import mod -import os -import requests -from dotenv import load_dotenv -from hcloud import Client - -load_dotenv() -client = Client(token=os.getenv("HETZNER_CLOUD_TOKEN")) - -def find_next_server_id(): - print("searching for next server id...") - highest_number = -1 - for server in client.servers.get_all(): - curr_numb = server.name.replace("portals-", "") - if curr_numb.isnumeric(): - curr_numb = int(curr_numb) - print("found server with id " + str(curr_numb)) - if curr_numb > highest_number: - highest_number = curr_numb - print("highest server id is " + str(highest_number)) - return highest_number + 1 - -def setup_server(server_id): - print("creating server from snapshot...") - - keys = [] - for keyname in os.getenv("SSH_KEY_NAMES").split(";"): - keys.append(client.ssh_keys.get_by_name(keyname)) - - dc = "" - if (server_id % 2) == 0: - dc = client.datacenters.get_by_id(4) - else: - dc = client.datacenters.get_by_id(2) - - response = client.servers.create(name="portals-" + str(server_id), - server_type=client.server_types.get_by_name("cpx31"), - image=client.images.get_by_id(os.getenv("IMAGE_ID")), - ssh_keys=keys, - volumes=[], - firewalls=[], - networks=[client.networks.get_by_id(os.getenv("NETWORK_ID"))], - user_data="", - labels={"portals-role": "webhost"}, - datacenter=dc, - start_after_create=True, - automount=None, - placement_group=client.placement_groups.get_by_id(os.getenv("PLACEMENTGROUP_ID"))) - - print("Created server " + response.server.name) - - return {"ipv4": response.server.public_net.ipv4.ip, "ipv6": response.server.public_net.ipv6.ip.replace("::/64", "::1")} - -def setup_cloudflare(id, ips): - print("creating dns records...") - requests.post("https://api.cloudflare.com/client/v4/zones/" + os.getenv("CLOUDFLARE_ZONEID") + "/dns_records", headers={"X-Auth-Email": os.getenv("CLOUDFLARE_EMAIL"), "X-Auth-Key": os.getenv("CLOUDFLARE_TOKEN")}, json={"type":"A","name":"portals-" + str(id),"content": ips.get("ipv4"), "ttl":1,"proxied":False}) - requests.post("https://api.cloudflare.com/client/v4/zones/" + os.getenv("CLOUDFLARE_ZONEID") + "/dns_records", headers={"X-Auth-Email": os.getenv("CLOUDFLARE_EMAIL"), "X-Auth-Key": os.getenv("CLOUDFLARE_TOKEN")}, json={"type":"AAAA","name":"portals-" + str(id),"content": ips.get("ipv6"), "ttl":1,"proxied":False}) - print("dns records finished") - -def add_to_local_ansible(id): - os.system("echo portals-" + str(id) + ".fsr5.de >> $(pwd)/hosts.ini") - print("added server to local ansible hosts file") - -def main(): - print("starting scaleup...") - next_id = find_next_server_id() - ips = setup_server(next_id) - setup_cloudflare(next_id, ips) - add_to_local_ansible(next_id) - print("scaleup complete!") - - -if __name__ == "__main__": - main() \ No newline at end of file