From b51f28a349f892ca6e4ac98aa780d8079ff951af Mon Sep 17 00:00:00 2001 From: Hyunseok Cho Date: Tue, 26 Dec 2023 11:01:34 +0900 Subject: [PATCH] doc: Update Backend.AI production installation guide doc (#1796) --- changes/1796.doc.md | 1 + docs/dev/development-setup.rst | 2 +- .../install-from-package/install-agent.rst | 5 +- .../install-from-package/install-manager.rst | 3 + .../install-storage-proxy.rst | 3 + .../install-webserver.rst | 65 ++++++++++++++++++- .../install-from-package/os-preparation.rst | 52 ++++++++++++--- .../prepare-cache-service.rst | 10 +-- .../prepare-config-service.rst | 10 +-- .../install-from-package/prepare-database.rst | 12 ++-- 10 files changed, 133 insertions(+), 30 deletions(-) create mode 100644 changes/1796.doc.md diff --git a/changes/1796.doc.md b/changes/1796.doc.md new file mode 100644 index 0000000000..192f743f73 --- /dev/null +++ b/changes/1796.doc.md @@ -0,0 +1 @@ +Update Backend.AI production installation guide doc diff --git a/docs/dev/development-setup.rst b/docs/dev/development-setup.rst index 9f639539d4..f79a7804aa 100644 --- a/docs/dev/development-setup.rst +++ b/docs/dev/development-setup.rst @@ -239,7 +239,7 @@ Open yet another terminal for client and run: .. code-block:: console $ source ./env-local-admin-api.sh # Use the generated local endpoint and credential config. - $ # source ./env-local-user-api.sh # Yo may choose an alternative credential config. + $ # source ./env-local-user-api.sh # You may choose an alternative credential config. $ ./backend.ai config $ ./backend.ai run python --rm -c 'print("hello world")' ∙ Session token prefix: fb05c73953 diff --git a/docs/install/install-from-package/install-agent.rst b/docs/install/install-from-package/install-agent.rst index f5c7222a8f..f9c4e99866 100644 --- a/docs/install/install-from-package/install-agent.rst +++ b/docs/install/install-from-package/install-agent.rst @@ -87,7 +87,7 @@ would be: scratch-size = "1G" [watcher] - service-addr = { host = "bai-m1", port = 6009 } + service-addr = { host = "bai-a01"", port = 6009 } ssl-enabled = false target-service = "backendai-agent.service" soft-reset-available = false @@ -189,6 +189,9 @@ First, create a runner script at ``${HOME}/bin/run-agent.sh``: export HOME="/home/bai" fi + # -- If you have installed using static python -- + source .venv/bin/activate + # -- If you have installed using pyenv -- if [ -z "$PYENV_ROOT" ]; then export PYENV_ROOT="$HOME/.pyenv" diff --git a/docs/install/install-from-package/install-manager.rst b/docs/install/install-from-package/install-manager.rst index d6c34211a9..014f8905b2 100644 --- a/docs/install/install-from-package/install-manager.rst +++ b/docs/install/install-from-package/install-manager.rst @@ -264,6 +264,9 @@ First, create a runner script at ``${HOME}/bin/run-manager.sh``: export HOME="/home/bai" fi + # -- If you have installed using static python -- + source .venv/bin/activate + # -- If you have installed using pyenv -- if [ -z "$PYENV_ROOT" ]; then export PYENV_ROOT="$HOME/.pyenv" diff --git a/docs/install/install-from-package/install-storage-proxy.rst b/docs/install/install-from-package/install-storage-proxy.rst index 3d4a86011f..eed22d421a 100644 --- a/docs/install/install-from-package/install-storage-proxy.rst +++ b/docs/install/install-from-package/install-storage-proxy.rst @@ -148,6 +148,9 @@ First, create a runner script at ``${HOME}/bin/run-storage-proxy.sh``: export HOME="/home/bai" fi + # -- If you have installed using static python -- + source .venv/bin/activate + # -- If you have installed using pyenv -- if [ -z "$PYENV_ROOT" ]; then export PYENV_ROOT="$HOME/.pyenv" diff --git a/docs/install/install-from-package/install-webserver.rst b/docs/install/install-from-package/install-webserver.rst index 9ae6c29aa7..9b70f5a74a 100644 --- a/docs/install/install-from-package/install-webserver.rst +++ b/docs/install/install-from-package/install-webserver.rst @@ -67,8 +67,7 @@ would be: [ui] brand = "Backend.AI" - default_environment = "cr.backend.ai/stable/python" - default_import_environment = "cr.backend.ai/filebrowser:21.02-ubuntu20.04" + menu_blocklist = "pipeline" [api] domain = "default" @@ -89,6 +88,65 @@ would be: [license] + [webserver] + + [logging] + # One of: "NOTSET", "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL" + # Set the global logging level. + level = "INFO" + + # Multi-choice of: "console", "logstash", "file" + # For each choice, there must be a "logging." section + # in this config file as exemplified below. + drivers = ["console", "file"] + + [logging.console] + # If set true, use ANSI colors if the console is a terminal. + # If set false, always disable the colored output in console logs. + colored = true + + # One of: "simple", "verbose" + format = "verbose" + + [logging.file] + # The log file path and filename pattern. + # All messages are wrapped in single-line JSON objects. + # Rotated logs may have additional suffixes. + # For production, "/var/log/backend.ai" is recommended. + path = "./logs" + filename = "webserver.log" + + # Set the maximum number of recent container coredumps in the coredump directory. + # Oldest coredumps are deleted if there is more than this number of coredumps. + backup-count = 10 + + # The log file size to begin rotation. + rotation-size = "10M" + + [logging.logstash] + # The endpoint to publish logstash records. + endpoint = { host = "localhost", port = 9300 } + + # One of: "zmq.push", "zmq.pub", "tcp", "udp" + protocol = "tcp" + + # SSL configs when protocol = "tcp" + ssl-enabled = true + ssl-verify = true + + # Specify additional package namespaces to include in the logs + # and their individual log levels. + # Note that the actual logging level applied is the conjunction of the global logging level and the + # logging levels specified here for each namespace. + [logging.pkg-ns] + "" = "WARNING" + "aiotools" = "INFO" + "aiohttp" = "INFO" + "ai.backend" = "INFO" + + [debug] + enabled = false + [plugin] [pipeline] @@ -127,6 +185,9 @@ First, create a runner script at ``${HOME}/bin/run-webserver.sh``: export HOME="/home/bai" fi + # -- If you have installed using static python -- + source .venv/bin/activate + # -- If you have installed using pyenv -- if [ -z "$PYENV_ROOT" ]; then export PYENV_ROOT="$HOME/.pyenv" diff --git a/docs/install/install-from-package/os-preparation.rst b/docs/install/install-from-package/os-preparation.rst index c7385ec2db..7133628054 100644 --- a/docs/install/install-from-package/os-preparation.rst +++ b/docs/install/install-from-package/os-preparation.rst @@ -113,11 +113,51 @@ information on the Python version compatibility can be found at `here `_. There can be several ways to prepare a specific Python version. Here, we will be -using pyenv and pyenv-virtualenv. +using a standalone static built Python. +Use a standalone static built Python (Recommended) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Obtain distribution of `a standalone static built Python `_ according to required +python version, target machine architecture and etc. Then extract the distribution +to a directory of your choice. + +.. code-block:: console + + $ curl -L "https://github.com/indygreg/python-build-standalone/releases/download/${PYTHON_RELEASE_DATE}/cpython-${PYTHON_VERSION}+${PYTHON_RELEASE_DATE}-${TARGET_MACHINE_ARCHITECTURE}-${ARCHIVE_FLAVOR}.tar.gz" > cpython-${PYTHON_VERSION}+${PYTHON_RELEASE_DATE}-${TARGET_MACHINE_ARCHITECTURE}-${ARCHIVE_FLAVOR}.tar.gz + $ tar -xf "cpython-${PYTHON_VERSION}+${PYTHON_RELEASE_DATE}-${TARGET_MACHINE_ARCHITECTURE}-${ARCHIVE_FLAVOR}.tar.gz" + $ mkdir -p "/home/${USERNAME}/.static-python/versions" + $ mv python "/home/${USERNAME}/.static-python/versions/${PYTHON_VERSION}" + +For example, + +.. code-block:: console + + $ curl -L "https://github.com/indygreg/python-build-standalone/releases/download/20231002/cpython-3.11.6+20231002-x86_64-unknown-linux-gnu-install_only.tar.gz" > cpython-3.11.6+20231002-x86_64-unknown-linux-gnu-install_only.tar.gz + $ tar -xf "cpython-3.11.6+20231002-x86_64-unknown-linux-gnu-install_only.tar.gz" + $ mkdir -p "/home/bai/.static-python/versions" + $ mv python "/home/bai/.static-python/versions/3.11.6" + +Then, you can create multiple virtual environments per service. To create a +virtual environment for Backend.AI Manager and activate it, for example, you may run: + +.. code-block:: console + + $ mkdir "${HOME}/manager" + $ cd "${HOME}/manager" + $ ~/.static-python/versions/3.11.6/bin/python3 -m venv .venv + $ source .venv/bin/activate + $ pip install -U pip setuptools wheel + +You also need to make ``pip`` available to the Python installation with the +latest ``wheel`` and ``setuptools`` packages, so that any non-binary extension +packages can be compiled and installed on your system. -Use pyenv to manually build and select a specific Python version + + +(Alternative) Use pyenv to manually build and select a specific Python version ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +If you prefer, there is no problem using pyenv and pyenv-virtualenv. Install `pyenv `_ and `pyenv-virtualenv `_. Then, install @@ -150,14 +190,6 @@ latest ``wheel`` and ``setuptools`` packages, so that any non-binary extension packages can be compiled and installed on your system. -Use a standalone static built Python -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -We can `use a standalone static built Python `_. - -.. warning:: Details will be added later. - - Configure network aliases ------------------------- diff --git a/docs/install/install-from-package/prepare-cache-service.rst b/docs/install/install-from-package/prepare-cache-service.rst index 5acfdb1fda..5284bfdc05 100644 --- a/docs/install/install-from-package/prepare-cache-service.rst +++ b/docs/install/install-from-package/prepare-cache-service.rst @@ -3,7 +3,7 @@ Prepare Cache Service Backend.AI makes use of Redis as its main cache service. Launch the service using docker compose by generating the file -``$HOME/halfstack/docker-compose.hs.redis.yaml`` and populating it with the +``$HOME/halfstack/redis-cluster-default/docker-compose.yaml`` and populating it with the following YAML. Feel free to adjust the volume paths and port settings. Please refer `the latest configuration `_ @@ -50,9 +50,9 @@ Execute the following command to start the service container. The project .. code-block:: console - $ cd ${HOME}/halfstack - $ docker compose -f docker-compose.hs.redis.yaml -p ${USER} up -d + $ cd ${HOME}/halfstack/redis-cluster-default + $ docker compose up -d $ # -- To terminate the container: - $ # docker compose -f docker-compose.hs.redis.yaml -p ${USER} down + $ # docker compose down $ # -- To see the container logs: - $ # docker compose -f docker-compose.hs.redis.yaml -p ${USER} logs -f + $ # docker compose logs -f diff --git a/docs/install/install-from-package/prepare-config-service.rst b/docs/install/install-from-package/prepare-config-service.rst index b5eda581ad..4dee7a07d6 100644 --- a/docs/install/install-from-package/prepare-config-service.rst +++ b/docs/install/install-from-package/prepare-config-service.rst @@ -3,7 +3,7 @@ Prepare Config Service Backend.AI makes use of Etcd as its main config service. Launch the service using docker compose by generating the file -``$HOME/halfstack/docker-compose.hs.etcd.yaml`` and populating it with the +``$HOME/halfstack/etcd-cluster-default/docker-compose.yaml`` and populating it with the following YAML. Feel free to adjust the volume paths and port settings. Please refer `the latest configuration `_ @@ -59,9 +59,9 @@ Execute the following command to start the service container. The project .. code-block:: console - $ cd ${HOME}/halfstack - $ docker compose -f docker-compose.hs.etcd.yaml -p ${USER} up -d + $ cd ${HOME}/halfstack/etcd-cluster-default + $ docker compose up -d $ # -- To terminate the container: - $ # docker compose -f docker-compose.hs.etcd.yaml -p ${USER} down + $ # docker compose down $ # -- To see the container logs: - $ # docker compose -f docker-compose.hs.etcd.yaml -p ${USER} logs -f + $ # docker compose logs -f diff --git a/docs/install/install-from-package/prepare-database.rst b/docs/install/install-from-package/prepare-database.rst index 43ca4902cd..2d4d6afbfb 100644 --- a/docs/install/install-from-package/prepare-database.rst +++ b/docs/install/install-from-package/prepare-database.rst @@ -3,7 +3,7 @@ Prepare Database Backend.AI makes use of PostgreSQL as its main database. Launch the service using docker compose by generating the file -``$HOME/halfstack/docker-compose.hs.postgres.yaml`` and populating it with the +``$HOME/halfstack/postgres-cluster-default/docker-compose.yaml`` and populating it with the following YAML. Feel free to adjust the volume paths and port settings. Please refer `the latest configuration `_ @@ -22,7 +22,7 @@ refer services: backendai-pg-active: <<: *base - image: postgres:13.2-alpine + image: postgres:15.1-alpine restart: unless-stopped command: > postgres @@ -58,9 +58,9 @@ Execute the following command to start the service container. The project .. code-block:: console - $ cd ${HOME}/halfstack - $ docker compose -f docker-compose.hs.postgres.yaml -p ${USER} up -d + $ cd ${HOME}/halfstack/postgres-cluster-default + $ docker compose up -d $ # -- To terminate the container: - $ # docker compose -f docker-compose.hs.postgres.yaml -p ${USER} down + $ # docker compose down $ # -- To see the container logs: - $ # docker compose -f docker-compose.hs.postgres.yaml -p ${USER} logs -f + $ # docker compose logs -f