diff --git a/.github/workflows/tunasync.yml b/.github/workflows/tunasync.yml index beb9ff6..4b38077 100644 --- a/.github/workflows/tunasync.yml +++ b/.github/workflows/tunasync.yml @@ -9,10 +9,10 @@ jobs: runs-on: ubuntu-latest steps: - - name: Set up Go 1.13 + - name: Set up Go 1.16 uses: actions/setup-go@v1 with: - go-version: 1.13 + go-version: 1.16 id: go - name: Check out code into the Go module directory @@ -37,6 +37,11 @@ jobs: test: name: Test runs-on: ubuntu-latest + services: + registry: + image: registry:2 + ports: + - 5000:5000 steps: - name: Setup test dependencies @@ -48,22 +53,184 @@ jobs: sudo cgcreate -a $USER -t $USER -g cpu:tunasync sudo cgcreate -a $USER -t $USER -g memory:tunasync - - name: Set up Go 1.13 + - name: Set up Go 1.16 uses: actions/setup-go@v1 with: - go-version: 1.13 + go-version: 1.16 id: go - name: Check out code into the Go module directory uses: actions/checkout@v2 - name: Run Unit tests. - run: make test + run: | + go install github.com/wadey/gocovmerge@latest + TERM=xterm-256color make test + + - name: Run Additional Unit tests. + run: | + make build-test-worker + sudo cgexec -g "*:/" bash -c "echo 0 > /sys/fs/cgroup/systemd/tasks; exec sudo -u $USER env USECURCGROUP=1 TERM=xterm-256color cgexec -g cpu,memory:tunasync ./worker.test -test.v=true -test.coverprofile profile2.cov -test.run TestCgroup" + touch /tmp/dummy_exec + chmod +x /tmp/dummy_exec + run_test_reexec (){ + case="$1" + shift + argv0="$1" + shift + (TESTREEXEC="$case" TERM=xterm-256color exec -a "$argv0" ./worker.test -test.v=true -test.coverprofile "profile5_$case.cov" -test.run TestReexec -- "$@") + } + run_test_reexec 1 tunasync-exec __dummy__ + run_test_reexec 2 tunasync-exec /tmp/dummy_exec + run_test_reexec 3 tunasync-exec /tmp/dummy_exec 3< <(echo -n "abrt") + run_test_reexec 4 tunasync-exec /tmp/dummy_exec 3< <(echo -n "cont") + run_test_reexec 5 tunasync-exec2 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v1 + with: + driver-opts: network=host + - name: Cache Docker layers + uses: actions/cache@v2 + if: github.event_name == 'push' + with: + path: /tmp/.buildx-cache + key: ${{ runner.os }}-buildx-${{ github.sha }} + restore-keys: | + ${{ runner.os }}-buildx- + - name: Cache Docker layers + uses: actions/cache@v2 + if: github.event_name == 'pull_request' + with: + path: /tmp/.buildx-cache + key: ${{ runner.os }}-pr-${{ github.event.pull_request.head.user.login }}-buildx-${{ github.sha }} + restore-keys: | + ${{ runner.os }}-pr-${{ github.event.pull_request.head.user.login }}-buildx- + ${{ runner.os }}-buildx- + - name: Cache Docker layers + if: github.event_name != 'push' && github.event_name != 'pull_request' + run: | + echo "I do not know how to setup cache" + exit -1 + + - name: Prepare cache directory + run: | + mkdir -p /tmp/.buildx-cache + + - name: Build Docker image for uml rootfs + uses: docker/build-push-action@v2 + with: + context: .umlrootfs + file: .umlrootfs/Dockerfile + push: true + tags: localhost:5000/umlrootfs + cache-from: type=local,src=/tmp/.buildx-cache + cache-to: type=local,dest=/tmp/.buildx-cache + + - name: Fetch and install uml package + run: | + sudo apt-get update + sudo apt-get install -y debian-archive-keyring + sudo ln -sf /usr/share/keyrings/debian-archive-keyring.gpg /etc/apt/trusted.gpg.d/ + echo "deb http://deb.debian.org/debian buster main" | sudo tee /etc/apt/sources.list.d/buster.list + sudo apt-get update + apt-get download user-mode-linux/buster + sudo rm /etc/apt/sources.list.d/buster.list + sudo apt-get update + sudo mv user-mode-linux_*.deb /tmp/uml.deb + sudo apt-get install --no-install-recommends -y /tmp/uml.deb + sudo rm /tmp/uml.deb + sudo apt-get install --no-install-recommends -y rsh-redone-client + + - name: Prepare uml environment + run: | + docker container create --name umlrootfs localhost:5000/umlrootfs + sudo mkdir -p umlrootfs + docker container export umlrootfs | sudo tar -xv -C umlrootfs + docker container rm umlrootfs + sudo cp -a --target-directory=umlrootfs/lib/ /usr/lib/uml/modules + /bin/echo -e "127.0.0.1 localhost\n254.255.255.1 host" | sudo tee umlrootfs/etc/hosts + sudo ip tuntap add dev umltap mode tap + sudo ip addr add 254.255.255.1/24 dev umltap + sudo ip link set umltap up + + - name: Start Uml + run: | + start_uml () { + sudo bash -c 'linux root=/dev/root rootflags=/ rw rootfstype=hostfs mem=2G eth0=tuntap,umltap hostfs="$PWD/umlrootfs" con1=pts systemd.unified_cgroup_hierarchy=1 & pid=$!; echo "UMLINUX_PID=$pid" >> '"$GITHUB_ENV" + } + ( start_uml ) + started=0 + for i in $(seq 1 60); do + if ping -c 1 -w 1 254.255.255.2; then + started=1 + break + fi + done + if [ "$started" != "1" ]; then + echo "Failed to wait Umlinux online" + exit 1 + fi + + - name: Prepare Uml Environment + run: | + CUSER="$(id --user --name)" + CUID="$(id --user)" + CGID="$(id --group)" + sudo chroot umlrootfs bash --noprofile --norc -eo pipefail << EOF + groupadd --gid "${CGID?}" "${CUSER?}" + useradd --create-home --home-dir "/home/${CUSER}" --gid "${CGID?}" \ + --uid "${CUID?}" --shell "\$(which bash)" "${CUSER?}" + EOF + ln ./worker.test "umlrootfs/home/${CUSER}/worker.test" + + - name: Run Tests in Cgroupv2 + run: | + CUSER="$(id --user --name)" + sudo rsh 254.255.255.2 bash --noprofile --norc -eo pipefail << EOF + cd "/home/${CUSER}" + mkdir -p /sys/fs/cgroup/tunasync + TERM=xterm-256color ./worker.test -test.v=true -test.coverprofile \ + profile3.cov -test.run TestCgroup + rmdir /sys/fs/cgroup/tunasync + systemd-run --service-type=oneshot --uid="${CUSER}" --pipe --wait \ + --property=Delegate=yes --setenv=USECURCGROUP=1 \ + --setenv=TERM=xterm-256color --same-dir \ + "\${PWD}/worker.test" -test.v=true -test.coverprofile \ + profile4.cov -test.run TestCgroup + EOF + + - name: Stop Uml + run: | + sudo rsh 254.255.255.2 systemctl poweroff + sleep 10 + if [ -e "/proc/$UMLINUX_PID" ]; then + sleep 10 + if [ -e "/proc/$UMLINUX_PID" ]; then + sudo kill -TERM "$UMLINUX_PID" || true + sleep 1 + fi + fi + if [ -e "/proc/$UMLINUX_PID" ]; then + sleep 10 + if [ -e "/proc/$UMLINUX_PID" ]; then + sudo kill -KILL "$UMLINUX_PID" || true + sleep 1 + fi + fi + + - name: Combine coverage files + run : | + CUSER="$(id --user --name)" + "${HOME}/go/bin/gocovmerge" profile.cov profile2.cov \ + "umlrootfs/home/${CUSER}/profile3.cov" \ + "umlrootfs/home/${CUSER}/profile4.cov" \ + profile5_*.cov > profile-all.cov - name: Convert coverage to lcov uses: jandelgado/gcov2lcov-action@v1.0.0 with: - infile: profile.cov + infile: profile-all.cov outfile: coverage.lcov - name: Coveralls diff --git a/.umlrootfs/Dockerfile b/.umlrootfs/Dockerfile new file mode 100644 index 0000000..a5878bc --- /dev/null +++ b/.umlrootfs/Dockerfile @@ -0,0 +1,13 @@ +FROM debian:buster +RUN apt-get update && apt-get install -y systemd rsh-redone-server ifupdown sudo kmod +RUN echo "host" > /root/.rhosts && \ + chmod 600 /root/.rhosts && \ + /bin/echo -e "auto eth0\niface eth0 inet static\naddress 254.255.255.2/24" > /etc/network/interfaces.d/eth0 && \ + sed -i '/pam_securetty/d' /etc/pam.d/rlogin && \ + cp /usr/share/systemd/tmp.mount /etc/systemd/system && \ + systemctl enable tmp.mount + +RUN echo "deb http://deb.debian.org/debian experimental main" >> /etc/apt/sources.list && \ + apt-get update && \ + apt-get install -y make && \ + apt-get install -y -t experimental cgroup-tools diff --git a/Makefile b/Makefile index f495a9f..f01f852 100644 --- a/Makefile +++ b/Makefile @@ -19,4 +19,7 @@ $(BUILDBIN:%=build-$(ARCH)/%) : build-$(ARCH)/% : cmd/% test: go test -v -covermode=count -coverprofile=profile.cov ./... -.PHONY: all test $(BUILDBIN) +build-test-worker: + go test -c -covermode=count ./worker + +.PHONY: all test $(BUILDBIN) build-test-worker diff --git a/cmd/tunasync/tunasync.go b/cmd/tunasync/tunasync.go index 7db4485..a762e34 100644 --- a/cmd/tunasync/tunasync.go +++ b/cmd/tunasync/tunasync.go @@ -12,6 +12,7 @@ import ( "github.com/pkg/profile" "gopkg.in/op/go-logging.v1" "github.com/urfave/cli" + "github.com/moby/moby/pkg/reexec" tunasync "github.com/tuna/tunasync/internal" "github.com/tuna/tunasync/manager" @@ -109,6 +110,10 @@ func startWorker(c *cli.Context) error { func main() { + if reexec.Init() { + return + } + cli.VersionPrinter = func(c *cli.Context) { var builddate string if buildstamp == "" { diff --git a/docs/cgroup.md b/docs/cgroup.md new file mode 100644 index 0000000..0771d2a --- /dev/null +++ b/docs/cgroup.md @@ -0,0 +1,141 @@ +# About Tunasync and cgroup + +Optionally, tunasync can be integrated with cgroup to have better control and tracking processes started by mirror jobs. Also, limiting memory usage of a mirror job also requires cgroup support. + +## How cgroup are utilized in tunasync? + +If cgroup are enabled globally, all the mirror jobs, except those running in docker containers, are run in separate cgroups. If `mem_limit` is specified, it will be applied to the cgroup. For jobs running in docker containers, `mem_limit` is applied via `docker run` command. + + +## Tl;dr: What's the recommended configuration? + +### If you are using v1 (legacy, hybrid) cgroup hierarchy: + +`tunasync-worker.service`: + +``` +[Unit] +Description = TUNA mirrors sync worker +After=network.target + +[Service] +Type=simple +User=tunasync +PermissionsStartOnly=true +ExecStartPre=/usr/bin/cgcreate -t tunasync -a tunasync -g memory:tunasync +ExecStart=/home/bin/tunasync worker -c /etc/tunasync/worker.conf --with-systemd +ExecReload=/bin/kill -SIGHUP $MAINPID +ExecStopPost=/usr/bin/cgdelete memory:tunasync + +[Install] +WantedBy=multi-user.target +``` + +`worker.conf`: + +``` toml +[cgroup] +enable = true +group = "tunasync" +``` + +### If you are using v2 (unified) cgroup hierarchy: + +`tunasync-worker.service`: + +``` +[Unit] +Description = TUNA mirrors sync worker +After=network.target + +[Service] +Type=simple +User=tunasync +ExecStart=/home/bin/tunasync worker -c /etc/tunasync/worker.conf --with-systemd +ExecReload=/bin/kill -SIGHUP $MAINPID +Delegate=yes + +[Install] +WantedBy=multi-user.target +``` + +`worker.conf`: + +``` toml +[cgroup] +enable = true +``` + + +## Two versions of cgroups + +Due to various of reasons, there are two versions of cgroups in the kernel, which are incompatible with each other. Most of the current linux distributions adopts systemd as the init system, which relies on cgroup and is responsible for initializing cgroup. As a result, the selection of the version of cgroups is mainly decided by systemd. Since version 243, the "unified" cgroup hierarchy setup has become the default. + +Tunasync can automatically detect which version of cgroup is in use and enable the corresponding operating interface, but due to the fact that systemd behaves slightly differently in the two cases, different configurations for tunasync are recomended. + +## Two modes of group name discovery + +Two modes of group name discovery are provided: implicit mode and manual mode. + +### Manual Mode + +In this mode, the administrator should 1. manually create an empty cgroup (for cgroup v2 unified hierarchy) or empty cgroups in certain controller subsystems with the same name (for cgroup v1 hybird hierarchy); 2. change the ownership of the cgroups to the running user of the tunasync worker; and 3. specify the path in the configuration. On start, tunasync will automatically detect which controllers are enabled (for v1) or enable needed controllers (for v2). + +Example 1: + +``` bash +# suppose we have cgroup v1 +sudo mkdir -p /sys/fs/cgroup/cpu/test/tunasync +sudo mkdir -p /sys/fs/cgroup/memory/test/tunasync +sudo chown -R tunasync:tunasync /sys/fs/cgroup/cpu/test/tunasync +sudo chown -R tunasync:tunasync /sys/fs/cgroup/memory/test/tunasync + +# in worker.conf, we have group = "/test/tunasync" or "test/tunasync" +tunasync worker -c /path/to/worker.conf +``` + +In the above scenario, tunasync will detect the enabled subsystem controllers are cpu and memory. When running a mirror job named `foo`, sub-cgroups will be created in both `/sys/fs/cgroup/cpu/test/tunasync/foo` and `/sys/fs/cgroup/memory/test/tunasync/foo`. + +Example 2 (not recommended): + +``` bash +# suppose we have cgroup v2 +sudo mkdir -p /sys/fs/cgroup/test/tunasync +sudo chown -R tunasync:tunasync /sys/fs/cgroup/test/tunasync + +# in worker.conf, we have group = "/test/tunasync" or "test/tunasync" +tunasync worker -c /path/to/worker.conf +``` + +In the above scenario, tunasync will directly use the cgroup `/sys/fs/cgroup/test/tunasync`. In most cases, due to the design of cgroupv2, since tunasync is not running as root, tunasync won't have the permission to move the processes it starts to the correct cgroup. That's because cgroup2 requires the operating process should also have the write permission of the common ancestor of the source group and the target group when moving processes between groups. So this example is only for demonstration of the functionality and you should prevent it. + +### Implicit mode + +In this mode, tunasync will use the cgroup it is currently running in and create sub-groups for jobs in that group. Tunasync will first create a sub-group named `__worker` in that group, and move itself in the `__worker` sub-group, to prevent processes in non-leaf cgroups. + +Mostly, this mode is cooperated with the `Delegate=yes` option of the systemd service configuration of tunasync, which will permit the running process to self-manage the cgroup the service in running in. Due to security considerations, systemd won't give write permissions of the current running cgroups to the service when using v1 (legacy, hybrid) cgroup hierarchy and non-root user, so it is more meaningful to use this mode with v2 cgroup hierarchy. + + +## Configruation + +``` toml +[cgroup] +enable = true +base_path = "/sys/fs/cgroup" +group = "tunasync" +subsystem = "memory" +``` + +The defination of the above options is: + +* `enable`: `Bool`, specifies whether cgroup is enabled. When cgroup is disabled, `memory_limit` for non-docker jobs will be ignored, and the following options are also ignored. +* `group`: `String`, specifies the cgroup tunasync will use. When not provided, or provided with empty string, cgroup discovery will work in "Implicit mode", i.e. will create sub-cgroups in the current running cgroup. Otherwise, cgroup discovery will work in "Manual mode", where tunasync will create sub-cgroups in the specified cgroup. +* `base_path`: `String`, ignored. It originally specifies the mounting path of cgroup filesystem, but for making everything work, it is now required that the cgroup filesystem should be mounted at its default path(`/sys/fs/cgroup`). +* `subsystem `: `String`, ignored. It originally specifies which cgroupv1 controller is enabled and now becomes meaningless since the discovery is now automatic. + +## References: + +* [https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html]() +* [https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v1/index.html]() +* [https://systemd.io/CGROUP_DELEGATION/]() +* [https://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#Delegate=]() diff --git a/docs/zh_CN/workers.conf b/docs/zh_CN/workers.conf index a9586c6..2a0c67d 100644 --- a/docs/zh_CN/workers.conf +++ b/docs/zh_CN/workers.conf @@ -5,7 +5,7 @@ name = "mirror_worker" log_dir = "/srv/tunasync/log/tunasync/{{.Name}}" mirror_dir = "/srv/tunasync" concurrent = 10 -interval = 1 +interval = 120 # ensure the exec user be add into `docker` group [docker] @@ -176,7 +176,7 @@ command = "/home/scripts/pub.sh" interval = 30 docker_image = "tunathu/pub-mirror:latest" - [mirrors.env] + [mirrors.env] MIRROR_BASE_URL = "https://mirrors.tuna.tsinghua.edu.cn/dart-pub" [[mirrors]] @@ -604,7 +604,7 @@ docker_volumes = [ ] docker_options = [ ] - [mirrors.env] + [mirrors.env] MIRROR_BASE_URL = "https://mirrors.tuna.tsinghua.edu.cn/rustup" [[mirrors]] @@ -710,7 +710,7 @@ command = "/home/tunasync-scripts/aosp.sh" upstream = "https://android.googlesource.com/mirror/manifest" size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)" docker_image = "tunathu/tunasync-scripts:latest" - [mirrors.env] + [mirrors.env] REPO = "/usr/local/bin/aosp-repo" REPO_URL = "https://mirrors.tuna.tsinghua.edu.cn/git/git-repo" USE_BITMAP_INDEX = "1" @@ -737,7 +737,7 @@ upstream = "https://chromium.googlesource.com" size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)" fail_on_match = "fatal: " docker_image = "tunathu/tunasync-scripts:latest" - [mirrors.env] + [mirrors.env] USE_BITMAP_INDEX = "1" CONCURRENT_JOBS = "20" diff --git a/go.mod b/go.mod index 6ff93bb..27f7ca7 100644 --- a/go.mod +++ b/go.mod @@ -8,21 +8,31 @@ require ( github.com/alicebob/miniredis v2.5.0+incompatible github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 github.com/boltdb/bolt v1.3.1 + github.com/cilium/ebpf v0.6.2 // indirect github.com/codeskyblue/go-sh v0.0.0-20190412065543-76bd3d59ff27 + github.com/containerd/cgroups v1.0.2-0.20210729163027-ddda8a174e9a github.com/dennwc/btrfs v0.0.0-20190517175702-d917b30ff035 github.com/dgraph-io/badger/v2 v2.2007.2 + github.com/docker/go-units v0.4.0 github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 // indirect github.com/gin-gonic/gin v1.7.0 github.com/go-redis/redis/v8 v8.3.0 github.com/gomodule/redigo v1.8.2 // indirect github.com/imdario/mergo v0.3.9 + github.com/moby/moby v20.10.7+incompatible + github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 github.com/pkg/errors v0.9.1 github.com/pkg/profile v1.4.0 github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 + github.com/sirupsen/logrus v1.8.1 // indirect + github.com/smartystreets/assertions v1.2.0 // indirect github.com/smartystreets/goconvey v1.6.4 github.com/syndtr/goleveldb v1.0.0 github.com/urfave/cli v1.22.3 github.com/yuin/gopher-lua v0.0.0-20191220021717-ab39c6098bdb // indirect - golang.org/x/sys v0.0.0-20200519105757-fe76b779f299 + golang.org/x/net v0.0.0-20201224014010-6772e930b67b // indirect + golang.org/x/sys v0.0.0-20210426230700-d19ff857e887 + google.golang.org/protobuf v1.26.0 // indirect gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473 + gotest.tools/v3 v3.0.3 // indirect ) diff --git a/go.sum b/go.sum index c524425..f1b8840 100644 --- a/go.sum +++ b/go.sum @@ -20,17 +20,25 @@ github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.6.2 h1:iHsfF/t4aW4heW2YKfeHrVPGdtYTL4C4KocpM8KTSnI= +github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 h1:sDMmm+q/3+BukdIpxwO365v/Rbspp2Nt5XntgQRXq8Q= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= github.com/codeskyblue/go-sh v0.0.0-20190412065543-76bd3d59ff27 h1:HHUr4P/aKh4quafGxDT9LDasjGdlGkzLbfmmrlng3kA= github.com/codeskyblue/go-sh v0.0.0-20190412065543-76bd3d59ff27/go.mod h1:VQx0hjo2oUeQkQUET7wRwradO6f+fN5jzXgB/zROxxE= +github.com/containerd/cgroups v1.0.2-0.20210729163027-ddda8a174e9a h1:Se756mbFRj+3RITm/9NYHknEo1TJEpCV8jHI2e8QOEo= +github.com/containerd/cgroups v1.0.2-0.20210729163027-ddda8a174e9a/go.mod h1:M9MzGh4G4yzSq0e3Bf6tQCoDsvGewJdfhIix9CRaOWo= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -46,10 +54,14 @@ github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczC github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= @@ -67,6 +79,10 @@ github.com/go-playground/validator/v10 v10.4.1 h1:pH2c5ADXtd66mxoE0Zm9SUhxE20r7a github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= github.com/go-redis/redis/v8 v8.3.0 h1:Xrwvn8+QqUYD1MbQmda3cVR2U9li5XbtRFkKZN5Y0hk= github.com/go-redis/redis/v8 v8.3.0/go.mod h1:a2xkpBM7NJUN5V5kiF46X5Ltx4WeXJ9757X/ScKUBdE= +github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= @@ -75,8 +91,9 @@ github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:x github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0 h1:LUVKkCeviFUMKqHa4tXIIij/lbhnMbP7Fn5wKdKkRh4= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -85,8 +102,10 @@ github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUz github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -99,8 +118,11 @@ github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGn github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= @@ -111,6 +133,8 @@ github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHX github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/moby/moby v20.10.7+incompatible h1:mMDsIjUeon2FpxCJz0Xj32wzRcTbGLVzG1uEbPalok4= +github.com/moby/moby v20.10.7+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg= @@ -127,6 +151,9 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.2 h1:aY/nuoWlKJud2J6U0E3NWsjlg+0GtwXxgEqthRdzlcs= github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 h1:3snG66yBm59tKhhSPQrQ/0bCrv1LQbKt40LnUPiUxdc= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -143,8 +170,12 @@ github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8= github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs= +github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= @@ -170,24 +201,36 @@ github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVM github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.3 h1:FpNT6zq26xNpHZy08emi755QwzLPs6Pukqjlc7RfOMU= github.com/urfave/cli v1.22.3/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/gopher-lua v0.0.0-20191220021717-ab39c6098bdb h1:ZkM6LRnq40pR1Ox0hTHlnpkcOTuFIDQpZ1IN8rKKhX0= github.com/yuin/gopher-lua v0.0.0-20191220021717-ab39c6098bdb/go.mod h1:gqRgreBUhTSL0GeU64rtZ3Uq3wtjOa/TB2YfrtkCbVQ= go.opentelemetry.io/otel v0.13.0 h1:2isEnyzjjJZq6r2EKMsFj4TxiQiexsM04AVhwbR/oBA= go.opentelemetry.io/otel v0.13.0/go.mod h1:dlSNewoRYikTkotEnxdmuBHgzT+k/idJSfDv/FxEnOY= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7 h1:AeiKBIuRw3UomYXSbLy0Mc2dDLfdtbT/IVn4keq83P0= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b h1:iFwSg7t5GZmB/Q5TjiEAsdoLDrdJRC1RiF2WhuV29Qw= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -196,25 +239,41 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299 h1:DYfZAGf2WMFjMxbgTjaC+2HC7NkNAQs+6Q8b9WEB/F4= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426230700-d19ff857e887 h1:dXfMednGJh/SUUFjTLsWJz3P+TQt9qnR11GgeI3vWKs= +golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -231,3 +290,5 @@ gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= diff --git a/internal/version.go b/internal/version.go index b71fdc0..9eac8b3 100644 --- a/internal/version.go +++ b/internal/version.go @@ -1,4 +1,4 @@ package internal // Version of the program -const Version string = "0.7.1" +const Version string = "0.8.0" diff --git a/worker/cgroup.go b/worker/cgroup.go index cf2b572..2f87bd5 100644 --- a/worker/cgroup.go +++ b/worker/cgroup.go @@ -1,64 +1,296 @@ package worker import ( - "bufio" "errors" "fmt" + "io/ioutil" "os" + "os/exec" "path/filepath" - "strconv" "syscall" "time" "golang.org/x/sys/unix" - "github.com/codeskyblue/go-sh" + "github.com/moby/moby/pkg/reexec" + cgv1 "github.com/containerd/cgroups" + cgv2 "github.com/containerd/cgroups/v2" + contspecs "github.com/opencontainers/runtime-spec/specs-go" ) type cgroupHook struct { emptyHook - basePath string - baseGroup string - created bool - subsystem string - memLimit string + cgCfg cgroupConfig + memLimit MemBytes + cgMgrV1 cgv1.Cgroup + cgMgrV2 *cgv2.Manager } -func newCgroupHook(p mirrorProvider, basePath, baseGroup, subsystem, memLimit string) *cgroupHook { - if basePath == "" { - basePath = "/sys/fs/cgroup" +type execCmd string + +const ( + cmdCont execCmd = "cont" + cmdAbrt execCmd = "abrt" +) + +func init () { + reexec.Register("tunasync-exec", waitExec) +} + +func waitExec () { + binary, err := exec.LookPath(os.Args[1]) + if err != nil { + panic(err) + } + + pipe := os.NewFile(3, "pipe") + if pipe != nil { + if _, err := pipe.Stat(); err == nil { + cmdBytes, err := ioutil.ReadAll(pipe) + if err != nil { + panic(err) + } + if err := pipe.Close(); err != nil { + } + cmd := execCmd(string(cmdBytes)) + switch cmd { + case cmdAbrt: + fallthrough + default: + panic("Exited on request") + case cmdCont: + } + } } - if baseGroup == "" { - baseGroup = "tunasync" + + args := os.Args[1:] + env := os.Environ() + if err := syscall.Exec(binary, args, env); err != nil { + panic(err) } - if subsystem == "" { - subsystem = "cpu" + panic("Exec failed.") +} + +func initCgroup(cfg *cgroupConfig) (error) { + + logger.Debugf("Initializing cgroup") + baseGroup := cfg.Group + //subsystem := cfg.Subsystem + + // If baseGroup is empty, it implies using the cgroup of the current process + // otherwise, it refers to a absolute group path + if baseGroup != "" { + baseGroup = filepath.Join("/", baseGroup) + } + + cfg.isUnified = cgv1.Mode() == cgv1.Unified + + if cfg.isUnified { + logger.Debugf("Cgroup V2 detected") + g := baseGroup + if g == "" { + logger.Debugf("Detecting my cgroup path") + var err error + if g, err = cgv2.NestedGroupPath(""); err != nil { + return err + } + } + logger.Infof("Using cgroup path: %s", g) + + var err error + if cfg.cgMgrV2, err = cgv2.LoadManager("/sys/fs/cgroup", g); err != nil { + return err + } + if baseGroup == "" { + logger.Debugf("Creating a sub group and move all processes into it") + wkrMgr, err := cfg.cgMgrV2.NewChild("__worker", nil); + if err != nil { + return err + } + for { + logger.Debugf("Reading pids") + procs, err := cfg.cgMgrV2.Procs(false) + if err != nil { + logger.Errorf("Cannot read pids in that group") + return err + } + if len(procs) == 0 { + break + } + for _, p := range(procs) { + if err := wkrMgr.AddProc(p); err != nil{ + if errors.Is(err, syscall.ESRCH) { + logger.Debugf("Write pid %d to sub group failed: process vanished, ignoring") + } else { + return err + } + } + } + } + } else { + logger.Debugf("Trying to create a sub group in that group") + testMgr, err := cfg.cgMgrV2.NewChild("__test", nil); + if err != nil { + logger.Errorf("Cannot create a sub group in the cgroup") + return err + } + if err := testMgr.Delete(); err != nil { + return err + } + procs, err := cfg.cgMgrV2.Procs(false) + if err != nil { + logger.Errorf("Cannot read pids in that group") + return err + } + if len(procs) != 0 { + return fmt.Errorf("There are remaining processes in cgroup %s", baseGroup) + } + } + } else { + logger.Debugf("Cgroup V1 detected") + var pather cgv1.Path + if baseGroup != "" { + pather = cgv1.StaticPath(baseGroup) + } else { + pather = (func(p cgv1.Path) (cgv1.Path){ + return func(subsys cgv1.Name) (string, error){ + path, err := p(subsys); + if err != nil { + return "", err + } + if path == "/" { + return "", cgv1.ErrControllerNotActive + } + return path, err + } + })(cgv1.NestedPath("")) + } + logger.Infof("Loading cgroup") + var err error + if cfg.cgMgrV1, err = cgv1.Load(cgv1.V1, pather, func(cfg *cgv1.InitConfig) error{ + cfg.InitCheck = cgv1.AllowAny + return nil + }); err != nil { + return err + } + logger.Debugf("Available subsystems:") + for _, subsys := range(cfg.cgMgrV1.Subsystems()) { + p, err := pather(subsys.Name()) + if err != nil { + return err + } + logger.Debugf("%s: %s", subsys.Name(), p) + } + if baseGroup == "" { + logger.Debugf("Creating a sub group and move all processes into it") + wkrMgr, err := cfg.cgMgrV1.New("__worker", &contspecs.LinuxResources{}); + if err != nil { + return err + } + for _, subsys := range(cfg.cgMgrV1.Subsystems()) { + logger.Debugf("Reading pids for subsystem %s", subsys.Name()) + for { + procs, err := cfg.cgMgrV1.Processes(subsys.Name(), false) + if err != nil { + p, err := pather(subsys.Name()) + if err != nil { + return err + } + logger.Errorf("Cannot read pids in group %s of subsystem %s", p, subsys.Name()) + return err + } + if len(procs) == 0 { + break + } + for _, proc := range(procs) { + if err := wkrMgr.Add(proc); err != nil { + if errors.Is(err, syscall.ESRCH) { + logger.Debugf("Write pid %d to sub group failed: process vanished, ignoring") + } else { + return err + } + } + } + } + } + } else { + logger.Debugf("Trying to create a sub group in that group") + testMgr, err := cfg.cgMgrV1.New("__test", &contspecs.LinuxResources{}); + if err != nil { + logger.Errorf("Cannot create a sub group in the cgroup") + return err + } + if err := testMgr.Delete(); err != nil { + return err + } + for _, subsys := range(cfg.cgMgrV1.Subsystems()) { + logger.Debugf("Reading pids for subsystem %s", subsys.Name()) + procs, err := cfg.cgMgrV1.Processes(subsys.Name(), false) + if err != nil { + p, err := pather(subsys.Name()) + if err != nil { + return err + } + logger.Errorf("Cannot read pids in group %s of subsystem %s", p, subsys.Name()) + return err + } + if len(procs) != 0 { + p, err := pather(subsys.Name()) + if err != nil { + return err + } + return fmt.Errorf("There are remaining processes in cgroup %s of subsystem %s", p, subsys.Name()) + } + } + } } + + return nil +} + +func newCgroupHook(p mirrorProvider, cfg cgroupConfig, memLimit MemBytes) *cgroupHook { return &cgroupHook{ emptyHook: emptyHook{ provider: p, }, - basePath: basePath, - baseGroup: baseGroup, - subsystem: subsystem, + cgCfg: cfg, + memLimit: memLimit, } } func (c *cgroupHook) preExec() error { - c.created = true - if err := sh.Command("cgcreate", "-g", c.Cgroup()).Run(); err != nil { - return err - } - if c.subsystem != "memory" { - return nil - } - if c.memLimit != "" { - gname := fmt.Sprintf("%s/%s", c.baseGroup, c.provider.Name()) - return sh.Command( - "cgset", "-r", - fmt.Sprintf("memory.limit_in_bytes=%s", c.memLimit), - gname, - ).Run() + if c.cgCfg.isUnified { + logger.Debugf("Creating v2 cgroup for task %s", c.provider.Name()) + var resSet *cgv2.Resources + if c.memLimit != 0 { + resSet = &cgv2.Resources { + Memory: &cgv2.Memory{ + Max: func(i int64) *int64 { return &i }(c.memLimit.Value()), + }, + } + } + subMgr, err := c.cgCfg.cgMgrV2.NewChild(c.provider.Name(), resSet) + if err != nil { + logger.Errorf("Failed to create cgroup for task %s: %s", c.provider.Name(), err.Error()) + return err + } + c.cgMgrV2 = subMgr + } else { + logger.Debugf("Creating v1 cgroup for task %s", c.provider.Name()) + var resSet contspecs.LinuxResources + if c.memLimit != 0 { + resSet = contspecs.LinuxResources { + Memory: &contspecs.LinuxMemory{ + Limit: func(i int64) *int64 { return &i }(c.memLimit.Value()), + }, + } + } + subMgr, err := c.cgCfg.cgMgrV1.New(c.provider.Name(), &resSet) + if err != nil { + logger.Errorf("Failed to create cgroup for task %s: %s", c.provider.Name(), err.Error()) + return err + } + c.cgMgrV1 = subMgr } return nil } @@ -69,36 +301,59 @@ func (c *cgroupHook) postExec() error { logger.Errorf("Error killing tasks: %s", err.Error()) } - c.created = false - return sh.Command("cgdelete", c.Cgroup()).Run() -} - -func (c *cgroupHook) Cgroup() string { - name := c.provider.Name() - return fmt.Sprintf("%s:%s/%s", c.subsystem, c.baseGroup, name) + if c.cgCfg.isUnified { + logger.Debugf("Deleting v2 cgroup for task %s", c.provider.Name()) + if err := c.cgMgrV2.Delete(); err != nil { + logger.Errorf("Failed to delete cgroup for task %s: %s", c.provider.Name(), err.Error()) + return err + } + c.cgMgrV2 = nil + } else { + logger.Debugf("Deleting v1 cgroup for task %s", c.provider.Name()) + if err := c.cgMgrV1.Delete(); err != nil { + logger.Errorf("Failed to delete cgroup for task %s: %s", c.provider.Name(), err.Error()) + return err + } + c.cgMgrV1 = nil + } + return nil } func (c *cgroupHook) killAll() error { - if !c.created { - return nil + if c.cgCfg.isUnified { + if c.cgMgrV2 == nil { + return nil + } + } else { + if c.cgMgrV1 == nil { + return nil + } } - name := c.provider.Name() readTaskList := func() ([]int, error) { taskList := []int{} - taskFile, err := os.Open(filepath.Join(c.basePath, c.subsystem, c.baseGroup, name, "tasks")) - if err != nil { - return taskList, err - } - defer taskFile.Close() - - scanner := bufio.NewScanner(taskFile) - for scanner.Scan() { - pid, err := strconv.Atoi(scanner.Text()) - if err != nil { - return taskList, err + if c.cgCfg.isUnified { + procs, err := c.cgMgrV2.Procs(false) + if (err != nil) { + return []int{}, err + } + for _, proc := range procs { + taskList = append(taskList, int(proc)) + } + } else { + taskSet := make(map[int]struct{}) + for _, subsys := range(c.cgMgrV1.Subsystems()) { + procs, err := c.cgMgrV1.Processes(subsys.Name(), false) + if err != nil { + return []int{}, err + } + for _, proc := range(procs) { + taskSet[proc.Pid] = struct{}{} + } + } + for proc := range(taskSet) { + taskList = append(taskList, proc) } - taskList = append(taskList, pid) } return taskList, nil } diff --git a/worker/cgroup_test.go b/worker/cgroup_test.go index 835ddd6..8adca9a 100644 --- a/worker/cgroup_test.go +++ b/worker/cgroup_test.go @@ -3,17 +3,101 @@ package worker import ( "io/ioutil" "os" + "os/exec" "path/filepath" "strconv" "strings" "testing" "time" + "errors" + "syscall" + cgv1 "github.com/containerd/cgroups" + cgv2 "github.com/containerd/cgroups/v2" + units "github.com/docker/go-units" + "github.com/moby/moby/pkg/reexec" . "github.com/smartystreets/goconvey/convey" ) +func init() { + _, testReexec := os.LookupEnv("TESTREEXEC") + if ! testReexec { + reexec.Init() + } +} + +func TestReexec(t *testing.T) { + testCase, testReexec := os.LookupEnv("TESTREEXEC") + if ! testReexec { + return + } + for len(os.Args) > 1 { + thisArg := os.Args[1] + os.Args = append([]string{os.Args[0]}, os.Args[2:]...) + if thisArg == "--" { + break + } + } + switch testCase { + case "1": + Convey("Reexec should panic when command not found", t, func(ctx C){ + So(func(){ + reexec.Init() + }, ShouldPanicWith, exec.ErrNotFound) + }) + case "2": + Convey("Reexec should run when fd 3 is not open", t, func(ctx C){ + So((func() error{ + pipe := os.NewFile(3, "pipe") + if pipe == nil { + return errors.New("pipe is nil") + } else { + _, err := pipe.Stat() + return err + } + })(), ShouldNotBeNil) + So(func(){ + reexec.Init() + }, ShouldPanicWith, syscall.ENOEXEC) + }) + case "3": + Convey("Reexec should fail when fd 3 is sent with abrt cmd", t, func(ctx C){ + So(func(){ + reexec.Init() + }, ShouldPanicWith, "Exited on request") + }) + case "4": + Convey("Reexec should run when fd 3 is sent with cont cmd", t, func(ctx C){ + So(func(){ + reexec.Init() + }, ShouldPanicWith, syscall.ENOEXEC) + }) + case "5": + Convey("Reexec should not be triggered when argv[0] is not reexec", t, func(ctx C){ + So(func(){ + reexec.Init() + }, ShouldNotPanic) + }) + } +} + func TestCgroup(t *testing.T) { - Convey("Cgroup Should Work", t, func(ctx C) { + var cgcf *cgroupConfig + Convey("init cgroup", t, func(ctx C){ + _, useCurrentCgroup := os.LookupEnv("USECURCGROUP") + cgcf = &cgroupConfig{BasePath: "/sys/fs/cgroup", Group: "tunasync", Subsystem: "cpu"} + if useCurrentCgroup { + cgcf.Group = "" + } + err := initCgroup(cgcf) + So(err, ShouldBeNil) + if cgcf.isUnified { + So(cgcf.cgMgrV2, ShouldNotBeNil) + } else { + So(cgcf.cgMgrV1, ShouldNotBeNil) + } + + Convey("Cgroup Should Work", func(ctx C) { tmpDir, err := ioutil.TempDir("", "tunasync") defer os.RemoveAll(tmpDir) So(err, ShouldBeNil) @@ -45,13 +129,13 @@ redirect-std() { close-fds() { eval exec {3..255}\>\&- } - + # full daemonization of external command with setsid daemonize() { ( - redirect-std - cd / - close-fds + redirect-std + cd / + close-fds exec setsid "$@" ) & } @@ -72,14 +156,10 @@ sleep 30 provider, err := newCmdProvider(c) So(err, ShouldBeNil) - cg := newCgroupHook(provider, "/sys/fs/cgroup", "tunasync", "cpu", "") + cg := newCgroupHook(provider, *cgcf, 0) provider.AddHook(cg) err = cg.preExec() - if err != nil { - logger.Errorf("Failed to create cgroup") - return - } So(err, ShouldBeNil) go func() { @@ -111,7 +191,7 @@ sleep 30 }) - Convey("Rsync Memory Should Be Limited", t, func() { + Convey("Rsync Memory Should Be Limited", func() { tmpDir, err := ioutil.TempDir("", "tunasync") defer os.RemoveAll(tmpDir) So(err, ShouldBeNil) @@ -132,19 +212,112 @@ sleep 30 provider, err := newRsyncProvider(c) So(err, ShouldBeNil) - cg := newCgroupHook(provider, "/sys/fs/cgroup", "tunasync", "cpu", "512M") + cg := newCgroupHook(provider, *cgcf, 512 * units.MiB) provider.AddHook(cg) err = cg.preExec() - if err != nil { - logger.Errorf("Failed to create cgroup") - return - } - if cg.subsystem == "memory" { - memoLimit, err := ioutil.ReadFile(filepath.Join(cg.basePath, "memory", cg.baseGroup, provider.Name(), "memory.limit_in_bytes")) + So(err, ShouldBeNil) + if cgcf.isUnified { + cgpath := filepath.Join(cgcf.BasePath, cgcf.Group, provider.Name()) + if useCurrentCgroup { + group, err := cgv2.NestedGroupPath(filepath.Join("..", provider.Name())) + So(err, ShouldBeNil) + cgpath = filepath.Join(cgcf.BasePath, group) + } + memoLimit, err := ioutil.ReadFile(filepath.Join(cgpath, "memory.max")) So(err, ShouldBeNil) So(strings.Trim(string(memoLimit), "\n"), ShouldEqual, strconv.Itoa(512*1024*1024)) + } else { + for _, subsys := range(cg.cgMgrV1.Subsystems()) { + if subsys.Name() == cgv1.Memory { + cgpath := filepath.Join(cgcf.Group, provider.Name()) + if useCurrentCgroup { + p, err := cgv1.NestedPath(filepath.Join("..", provider.Name()))(cgv1.Memory) + So(err, ShouldBeNil) + cgpath = p + } + memoLimit, err := ioutil.ReadFile(filepath.Join(cgcf.BasePath, "memory", cgpath, "memory.limit_in_bytes")) + So(err, ShouldBeNil) + So(strings.Trim(string(memoLimit), "\n"), ShouldEqual, strconv.Itoa(512*1024*1024)) + } + } } cg.postExec() + So(cg.cgMgrV1, ShouldBeNil) + }) + Reset(func() { + if cgcf.isUnified { + if cgcf.Group == "" { + wkrg, err := cgv2.NestedGroupPath(""); + So(err, ShouldBeNil) + wkrMgr, err := cgv2.LoadManager("/sys/fs/cgroup", wkrg); + allCtrls, err := wkrMgr.Controllers() + So(err, ShouldBeNil) + err = wkrMgr.ToggleControllers(allCtrls, cgv2.Disable) + So(err, ShouldBeNil) + origMgr := cgcf.cgMgrV2 + for { + logger.Debugf("Restoring pids") + procs, err := wkrMgr.Procs(false) + So(err, ShouldBeNil) + if len(procs) == 0 { + break + } + for _, p := range(procs) { + if err := origMgr.AddProc(p); err != nil{ + if errors.Is(err, syscall.ESRCH) { + logger.Debugf("Write pid %d to sub group failed: process vanished, ignoring") + } else { + So(err, ShouldBeNil) + } + } + } + } + err = wkrMgr.Delete() + So(err, ShouldBeNil) + } + } else { + if cgcf.Group == "" { + pather := (func(p cgv1.Path) (cgv1.Path){ + return func(subsys cgv1.Name) (string, error){ + path, err := p(subsys); + if err != nil { + return "", err + } + if path == "/" { + return "", cgv1.ErrControllerNotActive + } + return path, err + } + })(cgv1.NestedPath("")) + wkrMgr, err := cgv1.Load(cgv1.V1, pather, func(cfg *cgv1.InitConfig) error{ + cfg.InitCheck = cgv1.AllowAny + return nil + }) + So(err, ShouldBeNil) + origMgr := cgcf.cgMgrV1 + for _, subsys := range(wkrMgr.Subsystems()){ + for { + procs, err := wkrMgr.Processes(subsys.Name(), false) + So(err, ShouldBeNil) + if len(procs) == 0 { + break + } + for _, proc := range(procs) { + if err := origMgr.Add(proc); err != nil { + if errors.Is(err, syscall.ESRCH) { + logger.Debugf("Write pid %d to sub group failed: process vanished, ignoring") + } else { + So(err, ShouldBeNil) + } + } + } + } + } + err = wkrMgr.Delete() + So(err, ShouldBeNil) + } + } + }) }) } diff --git a/worker/config.go b/worker/config.go index b9e13f4..c91cbc2 100644 --- a/worker/config.go +++ b/worker/config.go @@ -7,6 +7,9 @@ import ( "github.com/BurntSushi/toml" "github.com/imdario/mergo" + units "github.com/docker/go-units" + cgv1 "github.com/containerd/cgroups" + cgv2 "github.com/containerd/cgroups/v2" ) type providerEnum uint8 @@ -87,6 +90,9 @@ type cgroupConfig struct { BasePath string `toml:"base_path"` Group string `toml:"group"` Subsystem string `toml:"subsystem"` + isUnified bool + cgMgrV1 cgv1.Cgroup + cgMgrV2 *cgv2.Manager } type dockerConfig struct { @@ -113,6 +119,32 @@ type includedMirrorConfig struct { Mirrors []mirrorConfig `toml:"mirrors"` } +type MemBytes int64 + +// Set sets the value of the MemBytes by passing a string +func (m *MemBytes) Set(value string) error { + val, err := units.RAMInBytes(value) + *m = MemBytes(val) + return err +} + +// Type returns the type +func (m *MemBytes) Type() string { + return "bytes" +} + +// Value returns the value in int64 +func (m *MemBytes) Value() int64 { + return int64(*m) +} + +// UnmarshalJSON is the customized unmarshaler for MemBytes +func (m *MemBytes) UnmarshalText(s []byte) error { + val, err := units.RAMInBytes(string(s)) + *m = MemBytes(val) + return err +} + type mirrorConfig struct { Name string `toml:"name"` Provider providerEnum `toml:"provider"` @@ -148,7 +180,7 @@ type mirrorConfig struct { RsyncOverride []string `toml:"rsync_override"` Stage1Profile string `toml:"stage1_profile"` - MemoryLimit string `toml:"memory_limit"` + MemoryLimit MemBytes `toml:"memory_limit"` DockerImage string `toml:"docker_image"` DockerVolumes []string `toml:"docker_volumes"` diff --git a/worker/config_test.go b/worker/config_test.go index ed054fc..c8de85d 100644 --- a/worker/config_test.go +++ b/worker/config_test.go @@ -7,6 +7,7 @@ import ( "path/filepath" "testing" "time" + units "github.com/docker/go-units" . "github.com/smartystreets/goconvey/convey" ) @@ -53,12 +54,15 @@ provider = "two-stage-rsync" stage1_profile = "debian" upstream = "rsync://ftp.debian.org/debian/" use_ipv6 = true +memory_limit = "256MiB" [[mirrors]] name = "fedora" provider = "rsync" upstream = "rsync://ftp.fedoraproject.org/fedora/" use_ipv6 = true +memory_limit = "128M" + exclude_file = "/etc/tunasync.d/fedora-exclude.txt" exec_on_failure = [ "bash -c 'echo ${TUNASYNC_JOB_EXIT_STATUS} > ${TUNASYNC_WORKING_DIR}/exit_status'" @@ -141,17 +145,20 @@ use_ipv6 = true So(m.Name, ShouldEqual, "debian") So(m.MirrorDir, ShouldEqual, "") So(m.Provider, ShouldEqual, provTwoStageRsync) + So(m.MemoryLimit.Value(), ShouldEqual, 256 * units.MiB) m = cfg.Mirrors[2] So(m.Name, ShouldEqual, "fedora") So(m.MirrorDir, ShouldEqual, "") So(m.Provider, ShouldEqual, provRsync) So(m.ExcludeFile, ShouldEqual, "/etc/tunasync.d/fedora-exclude.txt") + So(m.MemoryLimit.Value(), ShouldEqual, 128 * units.MiB) m = cfg.Mirrors[3] So(m.Name, ShouldEqual, "debian-cd") So(m.MirrorDir, ShouldEqual, "") So(m.Provider, ShouldEqual, provTwoStageRsync) + So(m.MemoryLimit.Value(), ShouldEqual, 0) m = cfg.Mirrors[4] So(m.Name, ShouldEqual, "debian-security") diff --git a/worker/docker.go b/worker/docker.go index b321ced..ae8cf77 100644 --- a/worker/docker.go +++ b/worker/docker.go @@ -13,6 +13,7 @@ type dockerHook struct { image string volumes []string options []string + memoryLimit MemBytes } func newDockerHook(p mirrorProvider, gCfg dockerConfig, mCfg mirrorConfig) *dockerHook { @@ -35,6 +36,7 @@ func newDockerHook(p mirrorProvider, gCfg dockerConfig, mCfg mirrorConfig) *dock image: mCfg.DockerImage, volumes: volumes, options: options, + memoryLimit: mCfg.MemoryLimit, } } diff --git a/worker/docker_test.go b/worker/docker_test.go index 2d1d849..0e5110b 100644 --- a/worker/docker_test.go +++ b/worker/docker_test.go @@ -8,6 +8,7 @@ import ( "path/filepath" "testing" "time" + units "github.com/docker/go-units" "github.com/codeskyblue/go-sh" . "github.com/smartystreets/goconvey/convey" @@ -77,6 +78,7 @@ sleep 20 volumes: []string{ fmt.Sprintf("%s:%s", cmdScript, "/bin/cmd.sh"), }, + memoryLimit: 512 * units.MiB, } provider.AddHook(d) So(provider.Docker(), ShouldNotBeNil) diff --git a/worker/provider.go b/worker/provider.go index cd89472..443c8f5 100644 --- a/worker/provider.go +++ b/worker/provider.go @@ -212,8 +212,7 @@ func newMirrorProvider(mirror mirrorConfig, cfg *Config) mirrorProvider { // Add Cgroup Hook provider.AddHook( newCgroupHook( - provider, cfg.Cgroup.BasePath, cfg.Cgroup.Group, - cfg.Cgroup.Subsystem, mirror.MemoryLimit, + provider, cfg.Cgroup, mirror.MemoryLimit, ), ) } diff --git a/worker/runner.go b/worker/runner.go index 01addcd..5f25a84 100644 --- a/worker/runner.go +++ b/worker/runner.go @@ -12,6 +12,8 @@ import ( "github.com/codeskyblue/go-sh" "golang.org/x/sys/unix" + "github.com/moby/moby/pkg/reexec" + cgv1 "github.com/containerd/cgroups" ) // runner is to run os commands giving command line, env and log file @@ -56,6 +58,10 @@ func newCmdJob(provider mirrorProvider, cmdAndArgs []string, workingDir string, kv := fmt.Sprintf("%s=%s", k, v) args = append(args, "-e", kv) } + // set memlimit + if d.memoryLimit != 0 { + args = append(args, "-m", fmt.Sprint(d.memoryLimit.Value())) + } // apply options args = append(args, d.options...) // apply image and command @@ -66,10 +72,7 @@ func newCmdJob(provider mirrorProvider, cmdAndArgs []string, workingDir string, cmd = exec.Command(c, args...) } else if provider.Cgroup() != nil { - c := "cgexec" - args := []string{"-g", provider.Cgroup().Cgroup()} - args = append(args, cmdAndArgs...) - cmd = exec.Command(c, args...) + cmd = reexec.Command(append([]string{"tunasync-exec"}, cmdAndArgs...)...) } else { if len(cmdAndArgs) == 1 { @@ -104,9 +107,59 @@ func newCmdJob(provider mirrorProvider, cmdAndArgs []string, workingDir string, } func (c *cmdJob) Start() error { + cg := c.provider.Cgroup() + var ( + pipeR *os.File + pipeW *os.File + ) + if cg != nil { + logger.Debugf("Preparing cgroup sync pipes for job %s", c.provider.Name()) + var err error + pipeR, pipeW, err = os.Pipe(); + if err != nil { + return err + } + c.cmd.ExtraFiles = []*os.File{pipeR} + defer pipeR.Close() + defer pipeW.Close() + } + logger.Debugf("Command start: %v", c.cmd.Args) c.finished = make(chan empty, 1) - return c.cmd.Start() + + if err := c.cmd.Start(); err != nil { + return err + } + if cg != nil { + if err := pipeR.Close(); err != nil { + return err + } + if c.cmd == nil || c.cmd.Process == nil { + return errProcessNotStarted + } + pid := c.cmd.Process.Pid + if cg.cgCfg.isUnified { + if err := cg.cgMgrV2.AddProc(uint64(pid)); err != nil{ + if errors.Is(err, syscall.ESRCH) { + logger.Infof("Write pid %d to cgroup failed: process vanished, ignoring") + } else { + return err + } + } + } else { + if err := cg.cgMgrV1.Add(cgv1.Process{Pid: pid}); err != nil{ + if errors.Is(err, syscall.ESRCH) { + logger.Infof("Write pid %d to cgroup failed: process vanished, ignoring") + } else { + return err + } + } + } + if _, err := pipeW.WriteString(string(cmdCont)); err != nil { + return err + } + } + return nil } func (c *cmdJob) Wait() error { diff --git a/worker/worker.go b/worker/worker.go index 56e5f68..c288b23 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -54,6 +54,12 @@ func NewTUNASyncWorker(cfg *Config) *Worker { w.httpClient = httpClient } + if cfg.Cgroup.Enable { + if err := initCgroup(&cfg.Cgroup); err != nil { + logger.Errorf("Error initializing Cgroup: %s", err.Error()) + return nil + } + } w.initJobs() w.makeHTTPServer() return w