做POC经常有这样的问题:客户的主机上,就一块1T SSD,我们知道,ocp 默认安装,会占用这第一块硬盘,分4个分区,其中root分区几乎占满整个硬盘。但是其实ocp运行的时候,只会用到100G左右,那么这1T SSD的大部分就浪费掉了。我们希望能从这个1T的硬盘中,分出来800G,给存储解决方案用,比如ceph/odf,那么怎么在安装的过程中,定制installer,让他给我们多分出来第5个分区呢?
本文描述,如何在single node openshift上,如果是单独硬盘的话,如何在这个硬盘上,多分出来几个分区,来做数据分区。
这个在某些资源有限PoC场景下,会很有用,比如需要在单硬盘single node上启动ceph.
本文,有一个背景知识,或者是前导实验,就是如何部署一个普通的single node openshift
内部安装逻辑图如下:
视频讲解
# download yq and install
mkdir tmp; cd tmp
wget https://github.com/mikefarah/yq/releases/download/v4.25.1/yq_linux_amd64.tar.gz
tar -zxvf yq_linux_amd64.tar.gz
install yq_linux_amd64 /usr/local/bin/yq
# calcuate a password
# VAR_PWD=`podman run -ti --rm quay.io/coreos/mkpasswd --method=yescrypt redhat`
# $y$j9T$UCg7ef5in/0aw0C2ZqSFo.$n8gC9.kDzWwlq0GmXKDVH8KUuGNdj7l6tnAsR4RZaG5
VAR_PWD_HASH="$(python3 -c 'import crypt,getpass; print(crypt.crypt("redhat"))')"
# # https://docs.fedoraproject.org/en-US/fedora-coreos/storage/#_setting_up_separate_var_mounts
# cat << EOF > /data/sno/root-partition.bu
# variant: openshift
# version: 4.8.0
# metadata:
# name: root-storage
# labels:
# machineconfiguration.openshift.io/role: master
# storage:
# disks:
# - device: /dev/vda
# wipe_table: false
# partitions:
# - number: 4
# label: root
# size_mib: $(( 120 * 1024 ))
# resize: true
# - label: data_odf_lvm
# size_mib: 0
# EOF
# butane /data/sno/root-partition.bu -r -o /data/install/partition-ric.ign
# # merge the 2 ignition files
# jq -s '.[0] * .[1]' /data/install/iso.ign /data/install/partition-ric.ign | jq -c . > /data/install/iso.ign.new
# /bin/cp -f /data/install/iso.ign.new /data/install/iso.ign
# https://github.com/openshift/installer/blob/master/data/data/bootstrap/bootstrap-in-place/files/opt/openshift/bootstrap-in-place/master-update.fcc
# cat iso.ign | jq ' .storage.files[] | select ( .path == "/opt/openshift/bootstrap-in-place/master-update.fcc" ) ' | jq -r .contents.source | sed 's/.*base64,//g' | base64 -d > /data/install/master-update.fcc
cat << EOF > /data/install/root-partition.fc
variant: fcos
version: 1.3.0
# !!! do not include passwd / users in production system !!!
passwd:
users:
- name: wzh
password_hash: $VAR_PWD_HASH
system: true
ssh_authorized_keys:
- $NODE_SSH_KEY
groups:
- adm
- wheel
- sudo
- systemd-journal
storage:
disks:
- device: /dev/vda
wipe_table: false
partitions:
- number: 4
label: root
size_mib: $(( 120 * 1024 ))
resize: true
# - label: data_01
# size_mib: $(( 5 * 1024 ))
- label: data_odf_lvm
size_mib: 0
EOF
butane /data/install/root-partition.fc -r -o /data/install/partition-ric.ign
cat << EOF > /data/sno/root-partition.bu
variant: openshift
version: 4.9.0
metadata:
labels:
machineconfiguration.openshift.io/role: master
name: 99-zzz-master-static-hostname
storage:
files:
- path: /opt/openshift/partition-ric.ign
mode: 0644
overwrite: true
contents:
local: partition-ric.ign
EOF
# yq '. *= load("/data/install/master-update.fcc")' /data/install/root-partition.fc > /data/install/root-partition.fcc
# config_source=$(cat /data/install/root-partition.fcc | python3 -c "import sys, urllib.parse; print(urllib.parse.quote(''.join(sys.stdin.readlines())))" )
# VAR_FCC_FILE="data:text/plain,${config_source}"
butane -d /data/install /data/sno/root-partition.bu > /data/sno/disconnected/99-zzz-master-root-partition.yaml
get_file_content_for_ignition "/opt/openshift/partition-ric.ign" "/data/sno/disconnected/99-zzz-master-root-partition.yaml"
VAR_99_master_fcc=$RET_VAL
VAR_99_master_fcc_2=$RET_VAL_2
cat iso.ign | jq ' .storage.files[] | select ( .path == "/usr/local/bin/bootstrap-in-place.sh" ) ' | jq -r .contents.source | sed 's/.*base64,//g' | base64 -d > /data/install/bootstrap-in-place.sh
# try to replace
# merge the 2 ignition files
cat << EOF > /data/install/bootstrap-in-place.sh.patch
jq -s '.[0] * .[1]' /opt/openshift/master.ign /opt/openshift/partition-ric.ign | jq -c . > /opt/openshift/master.ign.new
/bin/cp -f /opt/openshift/master.ign.new /opt/openshift/master.ign
EOF
# https://stackoverflow.com/questions/26141347/using-sed-to-insert-file-content-into-a-file-before-a-pattern
sed $'/touch master-ignition.done/{e cat \/data\/install\/bootstrap-in-place.sh.patch\n}' /data/install/bootstrap-in-place.sh > /data/install/bootstrap-in-place.sh.new
cat << EOF > /data/sno/bootstrap-in-place.bu
variant: openshift
version: 4.9.0
metadata:
labels:
machineconfiguration.openshift.io/role: master
name: 99-zzz-master-bootstrap-in-place
storage:
files:
- path: /usr/local/bin/bootstrap-in-place.sh
mode: 0555
overwrite: true
contents:
local: bootstrap-in-place.sh.new
EOF
butane -d /data/install /data/sno/bootstrap-in-place.bu > /data/sno/disconnected/99-zzz-master-bootstrap-in-place.yaml
get_file_content_for_ignition "/usr/local/bin/bootstrap-in-place.sh" "/data/sno/disconnected/99-zzz-master-bootstrap-in-place.yaml"
VAR_99_master_bootstrap_sh=$RET_VAL
VAR_99_master_bootstrap_sh_2=$RET_VAL_2
cat iso.ign | jq ' del ( .storage.files[] | select ( .path == "/usr/local/bin/bootstrap-in-place.sh" ) )' > /data/install/iso.ign.new
# cat iso.ign | jq ' .storage.files[] | select ( .path == "/usr/local/bin/bootstrap-in-place.sh" ) ' | jq -r .contents.source
cat /data/install/iso.ign.new \
| jq --argjson VAR "$VAR_99_master_fcc_2" '.storage.files += [$VAR] ' \
| jq --argjson VAR "$VAR_99_master_bootstrap_sh_2" '.storage.files += [$VAR] ' \
| jq -c . \
> /data/install/iso.ign
# cat iso.ign | jq ' .storage.files[] | select ( .path == "/opt/openshift/bootstrap-in-place/master-update.fcc" ) ' | jq -r .contents.source
# cat iso.ign | jq ' .storage.files[] | select ( .path == "/opt/openshift/partition-ric.ign" ) ' | jq -r .contents.source
check the result
ssh -tt [email protected] -- lsblk
# NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
# sr0 11:0 1 1024M 0 rom
# vda 252:0 0 400G 0 disk
# ├─vda1 252:1 0 1M 0 part
# ├─vda2 252:2 0 127M 0 part
# ├─vda3 252:3 0 384M 0 part /boot
# ├─vda4 252:4 0 120G 0 part /sysroot
# └─vda5 252:5 0 279.5G 0 part
我们有了很多分区,那么赶快来测试一下如何把他们变成 PV 吧
cat << EOF > /data/install/local-storage.yaml
---
apiVersion: v1
kind: Namespace
metadata:
name: openshift-local-storage
annotations:
workload.openshift.io/allowed: management
---
apiVersion: operators.coreos.com/v1
kind: OperatorGroup
metadata:
name: openshift-local-storage
namespace: openshift-local-storage
spec:
targetNamespaces:
- openshift-local-storage
---
apiVersion: operators.coreos.com/v1alpha1
kind: Subscription
metadata:
name: local-storage-operator
namespace: openshift-local-storage
spec:
channel: "stable"
installPlanApproval: Manual
name: local-storage-operator
source: redhat-operators
sourceNamespace: openshift-marketplace
EOF
oc create -f /data/install/local-storage.yaml
cat << EOF > /data/install/local-storage-lv.yaml
apiVersion: "local.storage.openshift.io/v1"
kind: "LocalVolume"
metadata:
name: "local-disks"
namespace: "openshift-local-storage"
spec:
nodeSelector:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- acm-demo-hub-master
storageClassDevices:
- storageClassName: "local-sc"
volumeMode: Filesystem
fsType: xfs
devicePaths:
- /dev/vda5
EOF
oc create -f /data/install/local-storage-lv.yaml
我们创建pod,创建和使用pvc,然后弄点数据,然后删掉pod,删掉pvc。然后重新创建pod,创建和使用pvc,看看里面的数据是否会清空。
cat << EOF >> /data/install/pvc-demo.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: local-pvc-demo
spec:
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
resources:
requests:
storage: 2Gi
storageClassName: local-sc
---
kind: Pod
apiVersion: v1
metadata:
annotations:
name: demo1
spec:
nodeSelector:
kubernetes.io/hostname: 'acm-demo-hub-master'
restartPolicy: Always
containers:
- name: demo1
image: >-
quay.io/wangzheng422/qimgs:centos7-test
env:
- name: key
value: value
command:
- sleep
- infinity
imagePullPolicy: Always
volumeMounts:
- mountPath: /data
name: demo
readOnly: false
volumes:
- name: demo
persistentVolumeClaim:
claimName: local-pvc-demo
EOF
oc create -n default -f /data/install/pvc-demo.yaml
install lvm operator
lvm operator dose NOT work.
cat iso.ign | jq .storage.files[].path | grep fcc
# "/opt/openshift/bootstrap-in-place/master-update.fcc"
cat iso.ign | jq ' .storage.files[] | select ( .path == "/opt/openshift/bootstrap-in-place/master-update.fcc" ) ' | jq -r .contents.source | sed 's/.*base64,//g' | base64 -d
# variant: fcos
# version: 1.1.0
# ignition:
# config:
# merge:
# - local: original-master.ign
# storage:
# trees:
# - local: kubernetes/bootstrap-configs
# path: /etc/kubernetes/bootstrap-configs
# - local: tls/
# path: /etc/kubernetes/bootstrap-secrets
# - local: etcd-bootstrap/etc-kubernetes/static-pod-resources/etcd-member/
# path: /etc/kubernetes/static-pod-resources/etcd-member
# - local: etcd-data
# path: /var/lib/etcd
# files:
# - path: /etc/kubernetes/bootstrap-secrets/kubeconfig
# contents:
# local: auth/kubeconfig-loopback
# - path: /etc/kubernetes/manifests/etcd-pod.yaml
# contents:
# local: etcd-bootstrap/etc-kubernetes/manifests/etcd-member-pod.yaml
# - path: /etc/kubernetes/manifests/kube-apiserver-pod.yaml
# contents:
# local: bootstrap-manifests/kube-apiserver-pod.yaml
# - path: /etc/kubernetes/manifests/kube-controller-manager-pod.yaml
# contents:
# local: bootstrap-manifests/kube-controller-manager-pod.yaml
# - path: /etc/kubernetes/manifests/kube-scheduler-pod.yaml
# contents:
# local: bootstrap-manifests/kube-scheduler-pod.yaml
# - path: /usr/local/bin/bootstrap-in-place-post-reboot.sh
# contents:
# local: bootstrap-in-place/bootstrap-in-place-post-reboot.sh
# mode: 0555
# - path: /var/log/log-bundle-bootstrap.tar.gz
# contents:
# local: log-bundle-bootstrap.tar.gz
# - path: /usr/local/bin/installer-masters-gather.sh
# contents:
# local: bin/installer-masters-gather.sh
# mode: 0555
# - path: /usr/local/bin/installer-gather.sh
# contents:
# local: bin/installer-gather.sh
# mode: 0555
# systemd:
# units:
# - name: bootkube.service
# enabled: true
# contents: |
# [Unit]
# Description=Bootkube - bootstrap in place post reboot
# Wants=kubelet.service
# After=kubelet.service
# ConditionPathExists=/etc/kubernetes/bootstrap-secrets/kubeconfig
# [Service]
# Type=oneshot
# ExecStart=/usr/local/bin/bootstrap-in-place-post-reboot.sh
# RestartSec=5s
# [Install]
# WantedBy=multi-user.target
cat iso.ign | jq ' .storage.files[] | select ( .path == "/usr/local/bin/bootstrap-in-place.sh" ) ' | jq -r .contents.source | sed 's/.*base64,//g' | base64 -d
# ......
# echo "Adding bootstrap control plane and bootstrap installer-gather bundle to master ignition"
# bootkube_podman_run \
# --rm \
# --privileged \
# --volume "$PWD:/assets:z" \
# --volume "/usr/local/bin/:/assets/bin" \
# --volume "/var/lib/etcd/:/assets/etcd-data" \
# --volume "/etc/kubernetes:/assets/kubernetes" \
# "${CLUSTER_BOOTSTRAP_IMAGE}" \
# bootstrap-in-place \
# --asset-dir /assets \
# --input /assets/bootstrap-in-place/master-update.fcc \
# --output /assets/master.ign
# touch master-ignition.done
# record_service_stage_success
# fi
# https://github.com/openshift/cluster-bootstrap
cd /data/install
podman run --rm -it \
--privileged \
--volume "$PWD:/assets:z" \
--volume "/usr/local/bin/:/assets/bin" \
--volume "/var/lib/etcd/:/assets/etcd-data" \
--volume "/etc/kubernetes:/assets/kubernetes" \
quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c29cb321d7ac72d86a86ba4a74a0774ed2ebf9910d65c1805245a17d7b005b88 \
bootstrap-in-place \
--asset-dir /assets \
--input /assets/bootstrap-in-place/master-update.fcc \
--output /assets/master.ign
lsblk -o PARTUUID,NAME,FSTYPE,LABEL,UUID,MOUNTPOINT
# PARTUUID NAME FSTYPE LABEL UUID MOUNTPOINT
# sr0
# vda
# e23d3123-1d83-4665-8b0f-1c39f8e8f533 ├─vda1
# ed26d305-052e-4148-9b44-05357053742a ├─vda2 vfat EFI-SYSTEM 1533-24B8
# ae634b25-a5b9-4667-85ce-119455a92e53 ├─vda3 ext4 boot 85555068-e37d-4773-837c-d279550eb818 /boot
# ef1b4117-0c2d-4f53-abd4-d3019ecf267e ├─vda4 xfs root 936512ae-5449-4a2f-808e-1c698859c877 /sysroot
# e7b459fb-f2e1-43c9-b638-c732898eedf5 ├─vda5
# 9f0f85c7-51c6-4f2a-b7b7-c8ea3131fb32 └─vda6
Array.from(document.querySelectorAll("div[class='catalog-tile-pf-title']")).forEach(txt => console.log(txt.html()));