Skip to content

Merge develop into main #947

Merge develop into main

Merge develop into main #947

Workflow file for this run

name: CI
on: [push, pull_request, workflow_dispatch]
env:
REGISTRY_LOGIN_SERVER: ${{ secrets.ACR_LOGIN_SERVER }}
REGISTRY_USERNAME: ${{ secrets.ACR_USERNAME }}
REGISTRY_PASSWORD: ${{ secrets.ACR_PASSWORD }}
jobs:
build:
runs-on: ubuntu-latest
env:
DOCKER_BUILDKIT: '1'
steps:
- uses: actions/checkout@v2
- name: Login to Docker Hub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKER_HUB_USERNAME }}
password: ${{ secrets.DOCKER_HUB_TOKEN }}
- name: Pull static images
run: docker compose -f docker-compose.dev.yml pull
- id: cache-docker
uses: actions/cache@v2
with:
path: /tmp/docker-registry
key: docker-registry-buildkit-${{ hashFiles('Dockerfile') }}
- run: docker run -d -p 5000:5000 --restart=always --name registry -v /tmp/docker-registry:/var/lib/registry registry:2 && npx wait-on tcp:5000
- run: docker build . -t iati-standard-website_web:latest --cache-from=localhost:5000/iati-standard-website_web --build-arg BUILDKIT_INLINE_CACHE=1
- run: docker tag iati-standard-website_web:latest localhost:5000/iati-standard-website_web && docker push localhost:5000/iati-standard-website_web || true
if: steps.cache.outputs.cache-hit != 'true'
- name: Setup docker compose
run: |
docker container stop registry
docker compose -f docker-compose.dev.yml up -d
while ! echo exit | nc localhost 5000; do sleep 10; done
- name: flake8 lint
run: docker compose -f docker-compose.dev.yml exec -T web make flake8
# - name: pylint
# run: docker compose -f docker-compose.dev.yml exec -T web make pylint
- name: pydocstyle lint
run: docker compose -f docker-compose.dev.yml exec -T web make pydocstyle
- name: pytest
run: docker compose -f docker-compose.dev.yml exec -T web make test
deploy_dev:
needs: [build]
runs-on: ubuntu-latest
# Checking against 'refs/heads/{0}' essentially means it runs on pushes and workflow_dispatch.
# Not pull_requests! (pull_requests have ref `refs/pull/X/merge`)
# This is good, as we don't want a push to a PR on the same branch as vars.DEV_BRANCH_NAME to
# start 2 deploys at once - they would conflict and crash.
if: ${{ github.ref == format('refs/heads/{0}', vars.DEV_BRANCH_NAME) }}
env:
DOCKER_BUILDKIT: '1'
TAG: ${{ github.sha }}
STAGE: dev
NAME: iati-website
steps:
- uses: actions/checkout@v2
- name: Login to Docker Hub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKER_HUB_USERNAME }}
password: ${{ secrets.DOCKER_HUB_TOKEN }}
- id: cache-docker
uses: actions/cache@v2
with:
path: /tmp/docker-registry
key: docker-registry-buildkit-${{ hashFiles('Dockerfile') }}
- run: docker run -d -p 5000:5000 --restart=always --name registry -v /tmp/docker-registry:/var/lib/registry registry:2 && npx wait-on tcp:5000
- run: docker build . -t iati-standard-website_web:latest --cache-from=localhost:5000/iati-standard-website_web --build-arg BUILDKIT_INLINE_CACHE=1
- name: 'Login via Azure CLI'
uses: azure/login@v1
with:
creds: ${{ secrets.AZURE_CREDENTIALS }}
- name: "Login to azure"
uses: azure/docker-login@v1
with:
login-server: ${{ env.REGISTRY_LOGIN_SERVER }}
username: ${{ env.REGISTRY_USERNAME }}
password: ${{ env.REGISTRY_PASSWORD }}
- name: "Build and push image"
run: |
docker build -f Dockerfile_deploy . -t ${{ env.REGISTRY_LOGIN_SERVER }}/${{env.STAGE}}-${{env.NAME}}:${{ env.TAG }}
docker push ${{ env.REGISTRY_LOGIN_SERVER }}/${{env.STAGE}}-${{env.NAME}}:${{ env.TAG }}
- name: "Check if blue is running"
uses: "azure/[email protected]"
with:
azcliversion: 2.30.0
inlineScript: |
if echo $(az group exists --name "${{ env.STAGE }}-${{ env.NAME }}-blue") | grep -q "true"; then
if echo $(az group exists --name "${{ env.STAGE }}-${{ env.NAME }}-green") | grep -q "true"; then
echo "Blue and green resource groups exist, exiting. Please manually delete one, and re-run."
exit 1
fi
echo "NEW_COLOUR=green" >> $GITHUB_ENV
echo "OLD_COLOUR=blue" >> $GITHUB_ENV
else
echo "NEW_COLOUR=blue" >> $GITHUB_ENV
echo "OLD_COLOUR=green" >> $GITHUB_ENV
fi
- name: "Create new resource group"
uses: "azure/[email protected]"
with:
azcliversion: 2.30.0
inlineScript: az group create --location uksouth --name "${{ env.STAGE }}-${{ env.NAME }}-${{ env.NEW_COLOUR }}"
- name: "Deploy new Azure VM"
uses: "azure/[email protected]"
with:
azcliversion: 2.30.0
inlineScript: |
az vm create \
--resource-group "${{ env.STAGE }}-${{ env.NAME }}-${{ env.NEW_COLOUR }}" \
--name "${{ env.STAGE }}-${{ env.NAME }}-${{ env.NEW_COLOUR }}" \
--size Standard_B2s --public-ip-sku Standard \
--image "Canonical:0001-com-ubuntu-server-jammy:22_04-lts-gen2:latest" \
--ssh-key-values ${{ secrets.DEV_PUB_KEYS }} && \
export NEW_IP=$(az vm list-ip-addresses --name "${{ env.STAGE }}-${{ env.NAME }}-${{ env.NEW_COLOUR }}" --resource-group "${{ env.STAGE }}-${{ env.NAME }}-${{ env.NEW_COLOUR }}" --query [].virtualMachine.network[].publicIpAddresses[][].ipAddress --output tsv) && \
echo "NEW_IP=$NEW_IP" >> $GITHUB_ENV && \
az postgres flexible-server firewall-rule update \
--resource-group "${{ secrets.DEV_PSQL_RESOURCE_GROUP }}" \
--name "${{ secrets.DEV_PSQL_NAME }}" \
--rule-name "${{ env.STAGE }}-${{ env.NAME }}-${{ env.NEW_COLOUR }}" \
--start-ip-address $NEW_IP \
--end-ip-address $NEW_IP && \
az network nsg rule create \
--resource-group "${{ env.STAGE }}-${{ env.NAME }}-${{ env.NEW_COLOUR }}" \
--nsg-name "${{ env.STAGE }}-${{ env.NAME }}-${{ env.NEW_COLOUR }}NSG" \
--name AllowPort5000 \
--priority 1010 \
--access Allow \
--protocol Tcp \
--destination-port-ranges 5000 && \
az network nsg rule create \
--resource-group "${{ env.STAGE }}-${{ env.NAME }}-${{ env.NEW_COLOUR }}" \
--nsg-name "${{ env.STAGE }}-${{ env.NAME }}-${{ env.NEW_COLOUR }}NSG" \
--name AllowPrometheusPort9157 \
--priority 1011 \
--access Allow \
--protocol Tcp \
--destination-port-ranges 9157 && \
az vm run-command invoke \
--resource-group "${{ env.STAGE }}-${{ env.NAME }}-${{ env.NEW_COLOUR }}" \
--name "${{ env.STAGE }}-${{ env.NAME }}-${{ env.NEW_COLOUR }}" \
--command-id RunShellScript \
--scripts "\
set -eux
adduser prometheus-client --disabled-password --gecos ''
cd /home/prometheus-client/
PROMETHEUS_VERSION=1.7.0
wget https://github.com/prometheus/node_exporter/releases/download/v\$PROMETHEUS_VERSION/node_exporter-\$PROMETHEUS_VERSION.linux-amd64.tar.gz
tar -xvzf node_exporter-\$PROMETHEUS_VERSION.linux-amd64.tar.gz
echo \"\
[Unit]
Description=Prometheus Node Exporter
Wants=network-online.target
After=network-online.target
[Service]
User=prometheus-client
Group=prometheus-client
Type=simple
ExecStart=/home/prometheus-client/node_exporter-\$PROMETHEUS_VERSION.linux-amd64/node_exporter \\\\
--collector.systemd \\\\
--web.listen-address=:9157 \\\\
--web.config.file /home/prometheus-client/web-config.yaml
[Install]
WantedBy=multi-user.target
\" > /etc/systemd/system/prometheus-node-exporter.service
echo 'basic_auth_users:
# Do not include the dollars in the secret, as escaping is a pain
# Password is generated using htpasswd -nBC 10 "" | tr -d ':'
prom: \"\$2y\$10\$${{ secrets.PROMETHEUS_CLIENT_PASSWORD_HASHED_PARTIAL }}\"
' > /home/prometheus-client/web-config.yaml
systemctl enable --now prometheus-node-exporter.service
" && \
az vm run-command invoke \
--resource-group "${{ env.STAGE }}-${{ env.NAME }}-${{ env.NEW_COLOUR }}" \
--name "${{ env.STAGE }}-${{ env.NAME }}-${{ env.NEW_COLOUR }}" \
--command-id RunShellScript \
--scripts "sudo snap install core snapd && sudo snap install docker" && \
az vm run-command invoke \
--resource-group "${{ env.STAGE }}-${{ env.NAME }}-${{ env.NEW_COLOUR }}" \
--name "${{ env.STAGE }}-${{ env.NAME }}-${{ env.NEW_COLOUR }}" \
--command-id RunShellScript \
--scripts "\
docker login -u '${{ env.REGISTRY_USERNAME }}' -p '${{ env.REGISTRY_PASSWORD }}' ${{ env.REGISTRY_LOGIN_SERVER }} && \
docker pull '${{ env.REGISTRY_LOGIN_SERVER }}/${{env.STAGE}}-${{env.NAME}}:${{ env.TAG }}' && \
docker run --restart always --name website -d -p 5000:5000 \
--log-driver 'json-file' \
--log-opt max-size=100m \
--log-opt max-file=3 \
-e DJANGO_SETTINGS_MODULE='iati.settings.dev_public' \
-e SECRET_KEY='${{ secrets.DEV_SECRET_KEY }}' \
-e DATABASE_NAME='${{ secrets.DEV_DATABASE_NAME }}' \
-e DATABASE_USER='${{ secrets.DEV_DATABASE_USER }}' \
-e DATABASE_PASS='${{ secrets.DEV_DATABASE_PASS }}' \
-e DATABASE_HOST='${{ secrets.DEV_DATABASE_HOST }}' \
-e DATABASE_PORT='${{ secrets.DEV_DATABASE_PORT }}' \
-e AZURE_ACCOUNT_NAME='${{ secrets.DEV_AZURE_ACCOUNT_NAME }}' \
-e AZURE_ACCOUNT_KEY='${{ secrets.DEV_AZURE_ACCOUNT_KEY }}' \
-e AZURE_CONTAINER='${{ secrets.DEV_AZURE_CONTAINER }}' \
-e ZENDESK_CAPTCHA_FIELD_ID='${{ secrets.ZENDESK_CAPTCHA_FIELD_ID }}' \
-e ZENDESK_SUSPICIOUS_FIELD_ID='${{ secrets.ZENDESK_SUSPICIOUS_FIELD_ID }}' \
-e RECAPTCHA_PUBLIC_KEY='${{ secrets.RECAPTCHA_PUBLIC_KEY }}' \
-e RECAPTCHA_PRIVATE_KEY='${{ secrets.RECAPTCHA_PRIVATE_KEY }}' \
-e GITHUB_TOKEN='${{ secrets.SSOT_GITHUB_TOKEN }}' \
'${{ env.REGISTRY_LOGIN_SERVER }}/${{env.STAGE}}-${{env.NAME}}:${{ env.TAG }}'" && \
az vm run-command invoke \
--resource-group "${{ env.STAGE }}-${{ env.NAME }}-${{ env.NEW_COLOUR }}" \
--name "${{ env.STAGE }}-${{ env.NAME }}-${{ env.NEW_COLOUR }}" \
--command-id RunShellScript \
--scripts "\
echo '#!/bin/sh\n/snap/bin/docker exec -i website sh -c '\''python3 manage.py publish_scheduled_pages && python3 manage.py updatestatistics'\''' > /etc/cron.hourly/hourly-cron && \
chmod +x /etc/cron.hourly/hourly-cron" && \
az resource tag \
--tags env="$STAGE" avgCPU=true ACU=true ACUvalue=300 \
--resource-group "${{ env.STAGE }}-${{ env.NAME }}-${{ env.NEW_COLOUR }}" \
--name "${{ env.STAGE }}-${{ env.NAME }}-${{ env.NEW_COLOUR }}" \
--resource-type "Microsoft.Compute/virtualMachines"
- name: "Wait for 5 minutes"
run: sleep 300
- name: "Check new Azure Container Instance success"
id: new_has_succeeded
continue-on-error: true
uses: "azure/[email protected]"
with:
azcliversion: 2.30.0
inlineScript: |
if echo $(az vm run-command invoke -g "${{ env.STAGE }}-${{ env.NAME }}-${{ env.NEW_COLOUR }}" -n "${{ env.STAGE }}-${{ env.NAME }}-${{ env.NEW_COLOUR }}" --command-id RunShellScript --scripts "docker logs website" -o tsv --query value[0].message) | grep -q "Booting worker with pid"; then
echo "Success"
else
echo "Failure"
exit 1
fi
- if: steps.new_has_succeeded.outcome == 'success'
name: "Update NGINX VM backend"
env:
SSH_AUTH_SOCK: /tmp/ssh_agent.sock
run: |
ssh-agent -a $SSH_AUTH_SOCK > /dev/null && \
ssh-add - <<< "${{ secrets.DEV_NGINX_KEY }}" && \
sed -i 's/XX.XX.XX.XX/'${{ env.NEW_IP }}'/g' config/nginx/website_dev.conf && \
scp -o StrictHostKeyChecking=no config/nginx/website_dev.conf azureuser@${{ secrets.DEV_NGINX_IP }}:/etc/nginx/conf.d/website.conf && \
ssh -o StrictHostKeyChecking=no azureuser@${{ secrets.DEV_NGINX_IP }} /home/azureuser/restart_nginx.sh
- if: steps.new_has_succeeded.outcome == 'success'
name: "Delete previous VM resource group"
uses: "azure/[email protected]"
with:
azcliversion: 2.30.0
inlineScript: |
az group delete --yes --no-wait \
--name "${{ env.STAGE }}-${{ env.NAME }}-${{ env.OLD_COLOUR }}"
- if: steps.new_has_succeeded.outcome != 'success'
name: "Delete failed VM resource group"
uses: "azure/[email protected]"
with:
azcliversion: 2.30.0
inlineScript: |
az group delete --yes --no-wait \
--name "${{ env.STAGE }}-${{ env.NAME }}-${{ env.NEW_COLOUR }}"
deploy_prod:
needs: [build]
runs-on: ubuntu-latest
if: github.ref == 'refs/heads/main'
env:
DOCKER_BUILDKIT: '1'
TAG: ${{ github.sha }}
STAGE: prod
NAME: iati-website
steps:
- uses: actions/checkout@v2
- name: Login to Docker Hub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKER_HUB_USERNAME }}
password: ${{ secrets.DOCKER_HUB_TOKEN }}
- id: cache-docker
uses: actions/cache@v2
with:
path: /tmp/docker-registry
key: docker-registry-buildkit-${{ hashFiles('Dockerfile') }}
- run: docker run -d -p 5000:5000 --restart=always --name registry -v /tmp/docker-registry:/var/lib/registry registry:2 && npx wait-on tcp:5000
- run: docker build . -t iati-standard-website_web:latest --cache-from=localhost:5000/iati-standard-website_web --build-arg BUILDKIT_INLINE_CACHE=1
- name: 'Login via Azure CLI'
uses: azure/login@v1
with:
creds: ${{ secrets.AZURE_CREDENTIALS }}
- name: "Login to azure"
uses: azure/docker-login@v1
with:
login-server: ${{ env.REGISTRY_LOGIN_SERVER }}
username: ${{ env.REGISTRY_USERNAME }}
password: ${{ env.REGISTRY_PASSWORD }}
- name: "Build and push image"
run: |
docker build -f Dockerfile_deploy . -t ${{ env.REGISTRY_LOGIN_SERVER }}/${{env.STAGE}}-${{env.NAME}}:${{ env.TAG }}
docker push ${{ env.REGISTRY_LOGIN_SERVER }}/${{env.STAGE}}-${{env.NAME}}:${{ env.TAG }}
- name: "Check if blue is running"
uses: "azure/[email protected]"
with:
azcliversion: 2.30.0
inlineScript: |
if echo $(az group exists --name "${{ env.STAGE }}-${{ env.NAME }}-blue") | grep -q "true"; then
if echo $(az group exists --name "${{ env.STAGE }}-${{ env.NAME }}-green") | grep -q "true"; then
echo "Blue and green resource groups exist, exiting. Please manually delete one, and re-run."
exit 1
fi
echo "NEW_COLOUR=green" >> $GITHUB_ENV
echo "OLD_COLOUR=blue" >> $GITHUB_ENV
else
echo "NEW_COLOUR=blue" >> $GITHUB_ENV
echo "OLD_COLOUR=green" >> $GITHUB_ENV
fi
- name: "Create new resource group"
uses: "azure/[email protected]"
with:
azcliversion: 2.30.0
inlineScript: az group create --location uksouth --name "${{ env.STAGE }}-${{ env.NAME }}-${{ env.NEW_COLOUR }}"
- name: "Deploy new Azure VM"
uses: "azure/[email protected]"
with:
azcliversion: 2.30.0
inlineScript: |
az vm create \
--resource-group "${{ env.STAGE }}-${{ env.NAME }}-${{ env.NEW_COLOUR }}" \
--name "${{ env.STAGE }}-${{ env.NAME }}-${{ env.NEW_COLOUR }}" \
--size Standard_B2s --public-ip-sku Standard \
--image "Canonical:0001-com-ubuntu-server-jammy:22_04-lts-gen2:latest" \
--ssh-key-values ${{ secrets.DEV_PUB_KEYS }} && \
export NEW_IP=$(az vm list-ip-addresses --name "${{ env.STAGE }}-${{ env.NAME }}-${{ env.NEW_COLOUR }}" --resource-group "${{ env.STAGE }}-${{ env.NAME }}-${{ env.NEW_COLOUR }}" --query [].virtualMachine.network[].publicIpAddresses[][].ipAddress --output tsv) && \
echo "NEW_IP=$NEW_IP" >> $GITHUB_ENV && \
az postgres flexible-server firewall-rule update \
--resource-group "${{ secrets.PROD_PSQL_RESOURCE_GROUP }}" \
--name "${{ secrets.PROD_PSQL_NAME }}" \
--rule-name "${{ env.STAGE }}-${{ env.NAME }}-${{ env.NEW_COLOUR }}" \
--start-ip-address $NEW_IP \
--end-ip-address $NEW_IP && \
az network nsg rule create \
--resource-group "${{ env.STAGE }}-${{ env.NAME }}-${{ env.NEW_COLOUR }}" \
--nsg-name "${{ env.STAGE }}-${{ env.NAME }}-${{ env.NEW_COLOUR }}NSG" \
--name AllowPort5000 \
--priority 1010 \
--access Allow \
--protocol Tcp \
--destination-port-ranges 5000 && \
az network nsg rule create \
--resource-group "${{ env.STAGE }}-${{ env.NAME }}-${{ env.NEW_COLOUR }}" \
--nsg-name "${{ env.STAGE }}-${{ env.NAME }}-${{ env.NEW_COLOUR }}NSG" \
--name AllowPrometheusPort9157 \
--priority 1011 \
--access Allow \
--protocol Tcp \
--destination-port-ranges 9157 && \
az vm run-command invoke \
--resource-group "${{ env.STAGE }}-${{ env.NAME }}-${{ env.NEW_COLOUR }}" \
--name "${{ env.STAGE }}-${{ env.NAME }}-${{ env.NEW_COLOUR }}" \
--command-id RunShellScript \
--scripts "\
set -eux
adduser prometheus-client --disabled-password --gecos ''
cd /home/prometheus-client/
PROMETHEUS_VERSION=1.7.0
wget https://github.com/prometheus/node_exporter/releases/download/v\$PROMETHEUS_VERSION/node_exporter-\$PROMETHEUS_VERSION.linux-amd64.tar.gz
tar -xvzf node_exporter-\$PROMETHEUS_VERSION.linux-amd64.tar.gz
echo \"\
[Unit]
Description=Prometheus Node Exporter
Wants=network-online.target
After=network-online.target
[Service]
User=prometheus-client
Group=prometheus-client
Type=simple
ExecStart=/home/prometheus-client/node_exporter-\$PROMETHEUS_VERSION.linux-amd64/node_exporter \\\\
--collector.systemd \\\\
--web.listen-address=:9157 \\\\
--web.config.file /home/prometheus-client/web-config.yaml
[Install]
WantedBy=multi-user.target
\" > /etc/systemd/system/prometheus-node-exporter.service
echo 'basic_auth_users:
# Do not include the dollars in the secret, as escaping is a pain
# Password is generated using htpasswd -nBC 10 "" | tr -d ':'
prom: \"\$2y\$10\$${{ secrets.PROMETHEUS_CLIENT_PASSWORD_HASHED_PARTIAL }}\"
' > /home/prometheus-client/web-config.yaml
systemctl enable --now prometheus-node-exporter.service
" && \
az vm run-command invoke \
--resource-group "${{ env.STAGE }}-${{ env.NAME }}-${{ env.NEW_COLOUR }}" \
--name "${{ env.STAGE }}-${{ env.NAME }}-${{ env.NEW_COLOUR }}" \
--command-id RunShellScript \
--scripts "sudo snap install core snapd && sudo snap install docker" && \
az vm run-command invoke \
--resource-group "${{ env.STAGE }}-${{ env.NAME }}-${{ env.NEW_COLOUR }}" \
--name "${{ env.STAGE }}-${{ env.NAME }}-${{ env.NEW_COLOUR }}" \
--command-id RunShellScript \
--scripts "\
docker login -u '${{ env.REGISTRY_USERNAME }}' -p '${{ env.REGISTRY_PASSWORD }}' ${{ env.REGISTRY_LOGIN_SERVER }} && \
docker pull '${{ env.REGISTRY_LOGIN_SERVER }}/${{env.STAGE}}-${{env.NAME}}:${{ env.TAG }}' && \
docker run --restart always --name website -d -p 5000:5000 \
--log-driver 'json-file' \
--log-opt max-size=100m \
--log-opt max-file=3 \
-e DJANGO_SETTINGS_MODULE='iati.settings.production' \
-e SECRET_KEY='${{ secrets.PROD_SECRET_KEY }}' \
-e DATABASE_NAME='${{ secrets.PROD_DATABASE_NAME }}' \
-e DATABASE_USER='${{ secrets.PROD_DATABASE_USER }}' \
-e DATABASE_PASS='${{ secrets.PROD_DATABASE_PASS }}' \
-e DATABASE_HOST='${{ secrets.PROD_DATABASE_HOST }}' \
-e DATABASE_PORT='${{ secrets.PROD_DATABASE_PORT }}' \
-e APPLICATIONINSIGHTS_CONNECTION_STRING='${{ secrets.PROD_APPLICATIONINSIGHTS_CONNECTION_STRING }}' \
-e AZURE_ACCOUNT_NAME='${{ secrets.PROD_AZURE_ACCOUNT_NAME }}' \
-e AZURE_ACCOUNT_KEY='${{ secrets.PROD_AZURE_ACCOUNT_KEY }}' \
-e AZURE_CONTAINER='${{ secrets.PROD_AZURE_CONTAINER }}' \
-e ZENDESK_CAPTCHA_FIELD_ID='${{ secrets.ZENDESK_CAPTCHA_FIELD_ID }}' \
-e ZENDESK_SUSPICIOUS_FIELD_ID='${{ secrets.ZENDESK_SUSPICIOUS_FIELD_ID }}' \
-e RECAPTCHA_PUBLIC_KEY='${{ secrets.RECAPTCHA_PUBLIC_KEY }}' \
-e RECAPTCHA_PRIVATE_KEY='${{ secrets.RECAPTCHA_PRIVATE_KEY }}' \
-e GITHUB_TOKEN='${{ secrets.SSOT_GITHUB_TOKEN }}' \
'${{ env.REGISTRY_LOGIN_SERVER }}/${{env.STAGE}}-${{env.NAME}}:${{ env.TAG }}'" && \
az vm run-command invoke \
--resource-group "${{ env.STAGE }}-${{ env.NAME }}-${{ env.NEW_COLOUR }}" \
--name "${{ env.STAGE }}-${{ env.NAME }}-${{ env.NEW_COLOUR }}" \
--command-id RunShellScript \
--scripts "\
echo '#!/bin/sh\n/snap/bin/docker exec -i website sh -c '\''python3 manage.py publish_scheduled_pages && python3 manage.py updatestatistics'\''' > /etc/cron.hourly/hourly-cron && \
chmod +x /etc/cron.hourly/hourly-cron" && \
az resource tag \
--tags env="$STAGE" avgCPU=true ACU=true ACUvalue=300 \
--resource-group "${{ env.STAGE }}-${{ env.NAME }}-${{ env.NEW_COLOUR }}" \
--name "${{ env.STAGE }}-${{ env.NAME }}-${{ env.NEW_COLOUR }}" \
--resource-type "Microsoft.Compute/virtualMachines"
- name: "Wait for 5 minutes"
run: sleep 300
- name: "Check new Azure Container Instance success"
id: new_has_succeeded
continue-on-error: true
uses: "azure/[email protected]"
with:
azcliversion: 2.30.0
inlineScript: |
if echo $(az vm run-command invoke -g "${{ env.STAGE }}-${{ env.NAME }}-${{ env.NEW_COLOUR }}" -n "${{ env.STAGE }}-${{ env.NAME }}-${{ env.NEW_COLOUR }}" --command-id RunShellScript --scripts "docker logs website" -o tsv --query value[0].message) | grep -q "Booting worker with pid"; then
echo "Success"
else
echo "Failure"
exit 1
fi
- if: steps.new_has_succeeded.outcome == 'success'
name: "Update NGINX VM backend"
env:
SSH_AUTH_SOCK: /tmp/ssh_agent.sock
run: |
ssh-agent -a $SSH_AUTH_SOCK > /dev/null && \
ssh-add - <<< "${{ secrets.PROD_NGINX_KEY }}" && \
sed -i 's/XX.XX.XX.XX/'${{ env.NEW_IP }}'/g' config/nginx/website_prod.conf && \
scp -o StrictHostKeyChecking=no config/nginx/website_prod.conf azureuser@${{ secrets.PROD_NGINX_IP }}:/etc/nginx/conf.d/website.conf && \
ssh -o StrictHostKeyChecking=no azureuser@${{ secrets.PROD_NGINX_IP }} /home/azureuser/restart_nginx.sh
- if: steps.new_has_succeeded.outcome == 'success'
name: "Delete previous VM resource group"
uses: "azure/[email protected]"
with:
azcliversion: 2.30.0
inlineScript: |
az group delete --yes --no-wait \
--name "${{ env.STAGE }}-${{ env.NAME }}-${{ env.OLD_COLOUR }}"
- if: steps.new_has_succeeded.outcome != 'success'
name: "Delete failed VM resource group"
uses: "azure/[email protected]"
with:
azcliversion: 2.30.0
inlineScript: |
az group delete --yes --no-wait \
--name "${{ env.STAGE }}-${{ env.NAME }}-${{ env.NEW_COLOUR }}"