From 2c122e65219a82ff942fda7de5744345d62f1e48 Mon Sep 17 00:00:00 2001 From: Dorinda Bassey Date: Thu, 31 Oct 2024 15:19:36 +0100 Subject: [PATCH] vhost-device-gpu: Add Initial Implementation This program is a vhost-user backend daemon that provides VIRTIO GPU device emulation as specified in the VIRTIO Spec v.1.2 https://docs.oasis-open.org/virtio/virtio/v1.2/csd01/virtio-v1.2-csd01.html This crate utilizes the rutabaga crate from crosvm with some minor modification to rutabaga crate to fix compilation. This crate depends on this PR[rust-vmm/vhost#239] that implements support for QEMU's vhost-user-gpu protocol. This device uses the rutabaga_gfx crate to offer two rendering backends: 1. Virglrenderer: - Rutabaga translates OpenGL API and Vulkan calls to an intermediate representation and allows for OpenGL acceleration on the host. 2. Gfxstream: - GLES and Vulkan calls are forwarded to the host. These backends can be used by simply changing the `--gpu-mode` command line option. This crate also includes some modifications from libkrun virtio-gpu device https://github.com/containers/libkrun/tree/main/src/devices/src/virtio/gpu Fixes: rust-vmm#598 Co-authored-by: Dorinda Bassey Co-authored-by: Matej Hrica Signed-off-by: Dorinda Bassey Signed-off-by: Matej Hrica --- staging/Cargo.lock | 240 ++- staging/Cargo.toml | 1 + staging/coverage_config_x86_64.json | 2 +- staging/vhost-device-gpu/CHANGELOG.md | 10 + staging/vhost-device-gpu/Cargo.toml | 40 + staging/vhost-device-gpu/LICENSE-APACHE | 1 + staging/vhost-device-gpu/LICENSE-BSD-3-Clause | 1 + staging/vhost-device-gpu/README.md | 101 ++ staging/vhost-device-gpu/rustfmt.toml | 7 + staging/vhost-device-gpu/src/device.rs | 1607 +++++++++++++++++ staging/vhost-device-gpu/src/lib.rs | 101 ++ staging/vhost-device-gpu/src/main.rs | 121 ++ staging/vhost-device-gpu/src/protocol.rs | 1453 +++++++++++++++ staging/vhost-device-gpu/src/virtio_gpu.rs | 1011 +++++++++++ 14 files changed, 4680 insertions(+), 16 deletions(-) create mode 100644 staging/vhost-device-gpu/CHANGELOG.md create mode 100644 staging/vhost-device-gpu/Cargo.toml create mode 100644 staging/vhost-device-gpu/LICENSE-APACHE create mode 100644 staging/vhost-device-gpu/LICENSE-BSD-3-Clause create mode 100644 staging/vhost-device-gpu/README.md create mode 100644 staging/vhost-device-gpu/rustfmt.toml create mode 100644 staging/vhost-device-gpu/src/device.rs create mode 100644 staging/vhost-device-gpu/src/lib.rs create mode 100644 staging/vhost-device-gpu/src/main.rs create mode 100644 staging/vhost-device-gpu/src/protocol.rs create mode 100644 staging/vhost-device-gpu/src/virtio_gpu.rs diff --git a/staging/Cargo.lock b/staging/Cargo.lock index 50c77e000..c0742b64a 100644 --- a/staging/Cargo.lock +++ b/staging/Cargo.lock @@ -114,11 +114,17 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "cfg_aliases" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" + [[package]] name = "clap" -version = "4.5.19" +version = "4.5.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7be5744db7978a28d9df86a214130d106a89ce49644cbc4e3f0c22c3fba30615" +checksum = "b97f376d85a664d5837dbae44bf546e6477a679ff6610010f17276f686d867e8" dependencies = [ "clap_builder", "clap_derive", @@ -126,9 +132,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.19" +version = "4.5.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5fbc17d3ef8278f55b282b2a2e75ae6f6c7d4bb70ed3d0382375104bfafdb4b" +checksum = "19bc80abd44e4bed93ca373a0704ccbd1b710dc5749406201bb018272808dc54" dependencies = [ "anstream", "anstyle", @@ -198,6 +204,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "downcast" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" + [[package]] name = "either" version = "1.13.0" @@ -298,6 +310,18 @@ version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "fragile" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" + [[package]] name = "futures" version = "0.3.31" @@ -532,6 +556,15 @@ dependencies = [ "autocfg", ] +[[package]] +name = "memoffset" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "488016bfae457b036d996092f6cb448677611ce4449e970ceaf42695203f218a" +dependencies = [ + "autocfg", +] + [[package]] name = "mio" version = "0.8.11" @@ -544,6 +577,32 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "mockall" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4c28b3fb6d753d28c20e826cd46ee611fda1cf3cde03a443a974043247c065a" +dependencies = [ + "cfg-if", + "downcast", + "fragile", + "mockall_derive", + "predicates", + "predicates-tree", +] + +[[package]] +name = "mockall_derive" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "341014e7f530314e9a1fdbc7400b244efea7122662c96bfa248c31da5bfb2020" +dependencies = [ + "cfg-if", + "proc-macro2", + "quote", + "syn 2.0.79", +] + [[package]] name = "nb" version = "1.1.0" @@ -584,7 +643,7 @@ dependencies = [ "bitflags 1.3.2", "cfg-if", "libc", - "memoffset", + "memoffset 0.7.1", "pin-utils", ] @@ -599,6 +658,19 @@ dependencies = [ "libc", ] +[[package]] +name = "nix" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4" +dependencies = [ + "bitflags 2.6.0", + "cfg-if", + "cfg_aliases", + "libc", + "memoffset 0.9.1", +] + [[package]] name = "num_cpus" version = "1.16.0" @@ -671,13 +743,45 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "pkg-config" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" + [[package]] name = "ppv-lite86" version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" dependencies = [ - "zerocopy", + "zerocopy 0.7.35", +] + +[[package]] +name = "predicates" +version = "3.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e9086cc7640c29a356d1a29fd134380bee9d8f79a17410aa76e7ad295f42c97" +dependencies = [ + "anstyle", + "predicates-core", +] + +[[package]] +name = "predicates-core" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae8177bee8e75d6846599c6b9ff679ed51e882816914eec639944d7c9aa11931" + +[[package]] +name = "predicates-tree" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41b740d195ed3166cd147c8047ec98db0e22ec019eb8eeb76d343b795304fb13" +dependencies = [ + "predicates-core", + "termtree", ] [[package]] @@ -691,9 +795,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.86" +version = "1.0.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" +checksum = "b3e4daa0dcf6feba26f985457cdf104d4b4256fc5a09547140f3631bb076b19a" dependencies = [ "unicode-ident", ] @@ -704,6 +808,12 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1475abae4f8ad4998590fe3acfe20104f0a5d48fc420c817cd2c09c3f56151f0" +[[package]] +name = "quick-error" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" + [[package]] name = "quote" version = "1.0.37" @@ -787,6 +897,17 @@ version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba39f3699c378cd8970968dcbff9c43159ea4cfbd88d43c00b22f2ef10a435d2" +[[package]] +name = "remain" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46aef80f842736de545ada6ec65b81ee91504efd6853f4b96de7414c42ae7443" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.79", +] + [[package]] name = "rstest" version = "0.23.0" @@ -839,6 +960,36 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "rusty-fork" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" +dependencies = [ + "fnv", + "quick-error", + "tempfile", + "wait-timeout", +] + +[[package]] +name = "rutabaga_gfx" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6628c6391bc654170f64fe8bfb7e5da3bf97f7ed3174c52c7a7349c012adec9c" +dependencies = [ + "anyhow", + "cfg-if", + "libc", + "log", + "nix 0.28.0", + "pkg-config", + "remain", + "thiserror", + "winapi", + "zerocopy 0.7.35", +] + [[package]] name = "scopeguard" version = "1.2.0" @@ -995,6 +1146,12 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "termtree" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" + [[package]] name = "thiserror" version = "1.0.64" @@ -1087,9 +1244,9 @@ dependencies = [ [[package]] name = "vhost" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c1c4c6c9f79fbe3150d9a403008ca416d34c489897effdda28b646f09900aad" +checksum = "79e9f0c62b0e4d5adbb7a9dc138b6003bdf823f196a927daf7ca0ae93cafd8ce" dependencies = [ "bitflags 2.6.0", "libc", @@ -1139,6 +1296,29 @@ dependencies = [ "vmm-sys-util", ] +[[package]] +name = "vhost-device-gpu" +version = "0.1.0" +dependencies = [ + "assert_matches", + "clap", + "env_logger 0.11.5", + "libc", + "log", + "mockall", + "rusty-fork", + "rutabaga_gfx", + "tempfile", + "thiserror", + "vhost", + "vhost-user-backend", + "virtio-bindings", + "virtio-queue", + "vm-memory", + "vmm-sys-util", + "zerocopy 0.6.6", +] + [[package]] name = "vhost-device-video" version = "0.1.0" @@ -1166,9 +1346,9 @@ dependencies = [ [[package]] name = "vhost-user-backend" -version = "0.16.0" +version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73768c8584e0be5ed8feb063785910cabe3f1af6661a5953fd3247fa611ddfaf" +checksum = "0b68f4bc879573f00724b9f1e0d0bbcd9fca151e9fc0bd81b157e48e9002d72a" dependencies = [ "libc", "log", @@ -1181,9 +1361,9 @@ dependencies = [ [[package]] name = "virtio-bindings" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68d0df4f5ad79b1dc81b5913ac737e24a84dcd5100f36ed953a1faec18aba241" +checksum = "1711e61c00f8cb450bd15368152a1e37a12ef195008ddc7d0f4812f9e2b30a68" [[package]] name = "virtio-queue" @@ -1221,6 +1401,15 @@ dependencies = [ "libc", ] +[[package]] +name = "wait-timeout" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f200f5b12eb75f8c1ed65abd4b2db8a6e1b138a20de009dacee265a2498f3f6" +dependencies = [ + "libc", +] + [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" @@ -1415,6 +1604,16 @@ dependencies = [ "memchr", ] +[[package]] +name = "zerocopy" +version = "0.6.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "854e949ac82d619ee9a14c66a1b674ac730422372ccb759ce0c39cabcf2bf8e6" +dependencies = [ + "byteorder", + "zerocopy-derive 0.6.6", +] + [[package]] name = "zerocopy" version = "0.7.35" @@ -1422,7 +1621,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" dependencies = [ "byteorder", - "zerocopy-derive", + "zerocopy-derive 0.7.35", +] + +[[package]] +name = "zerocopy-derive" +version = "0.6.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "125139de3f6b9d625c39e2efdd73d41bdac468ccd556556440e322be0e1bbd91" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.79", ] [[package]] diff --git a/staging/Cargo.toml b/staging/Cargo.toml index 88d416856..1a970b77e 100644 --- a/staging/Cargo.toml +++ b/staging/Cargo.toml @@ -1,6 +1,7 @@ [workspace] resolver = "2" members = [ + "vhost-device-gpu", "vhost-device-video", "vhost-device-can", "vhost-device-console", diff --git a/staging/coverage_config_x86_64.json b/staging/coverage_config_x86_64.json index c1371fdb9..fbda5c566 100644 --- a/staging/coverage_config_x86_64.json +++ b/staging/coverage_config_x86_64.json @@ -1,5 +1,5 @@ { - "coverage_score": 80.31, + "coverage_score": 80.64, "exclude_path": "", "crate_features": "" } diff --git a/staging/vhost-device-gpu/CHANGELOG.md b/staging/vhost-device-gpu/CHANGELOG.md new file mode 100644 index 000000000..8f67e1ba9 --- /dev/null +++ b/staging/vhost-device-gpu/CHANGELOG.md @@ -0,0 +1,10 @@ +# Changelog +## Unreleased + +### Added + +### Changed + +### Fixed + +### Deprecated \ No newline at end of file diff --git a/staging/vhost-device-gpu/Cargo.toml b/staging/vhost-device-gpu/Cargo.toml new file mode 100644 index 000000000..1ffadfcdd --- /dev/null +++ b/staging/vhost-device-gpu/Cargo.toml @@ -0,0 +1,40 @@ +[package] +name = "vhost-device-gpu" +version = "0.1.0" +authors = ["Dorinda Bassey ", "Matej Hrica "] +description = "A virtio-gpu device using the vhost-user protocol." +repository = "https://github.com/rust-vmm/vhost-device" +readme = "README.md" +keywords = ["gpu", "vhost", "vhost-user", "virtio"] +categories = ["multimedia::video", "virtualization"] +license = "Apache-2.0 OR BSD-3-Clause" +edition = "2021" +publish = false + +[features] +xen = ["vm-memory/xen", "vhost/xen", "vhost-user-backend/xen"] + +[dependencies] +clap = { version = "4.4", features = ["derive"] } +env_logger = "0.11.5" +libc = "0.2" +log = "0.4" + +[target.'cfg(not(target_env = "musl"))'.dependencies] +rutabaga_gfx = { version = "0.1.4", features = ["gfxstream", "virgl_renderer"] } +thiserror = "1.0" +vhost = { version = "0.12.1", features = ["vhost-user-backend"] } +vhost-user-backend = "0.16.1" +virtio-bindings = "0.2.2" +virtio-queue = "0.13.0" +vm-memory = "0.15.0" +vmm-sys-util = "0.12.1" +zerocopy = "0.6.3" + +[dev-dependencies] +assert_matches = "1.5" +tempfile = "3.13" +virtio-queue = { version = "0.13", features = ["test-utils"] } +vm-memory = { version = "0.15.0", features = ["backend-mmap", "backend-atomic"] } +mockall = "0.13.0" +rusty-fork = "0.3.0" \ No newline at end of file diff --git a/staging/vhost-device-gpu/LICENSE-APACHE b/staging/vhost-device-gpu/LICENSE-APACHE new file mode 100644 index 000000000..1cd601d0a --- /dev/null +++ b/staging/vhost-device-gpu/LICENSE-APACHE @@ -0,0 +1 @@ +../../LICENSE-APACHE \ No newline at end of file diff --git a/staging/vhost-device-gpu/LICENSE-BSD-3-Clause b/staging/vhost-device-gpu/LICENSE-BSD-3-Clause new file mode 100644 index 000000000..a60f1af6d --- /dev/null +++ b/staging/vhost-device-gpu/LICENSE-BSD-3-Clause @@ -0,0 +1 @@ +../../LICENSE-BSD-3-Clause \ No newline at end of file diff --git a/staging/vhost-device-gpu/README.md b/staging/vhost-device-gpu/README.md new file mode 100644 index 000000000..9878ad090 --- /dev/null +++ b/staging/vhost-device-gpu/README.md @@ -0,0 +1,101 @@ +# vhost-device-gpu - GPU emulation backend daemon + +## Synopsis +```shell +vhost-device-gpu --socket-path +``` + +## Description +A virtio-gpu device using the vhost-user protocol. + +## Options + +```text + -s, --socket-path + vhost-user Unix domain socket path + + -h, --help + Print help + + -V, --version + Print version +``` + +## Limitations + +We are currently only supporting sharing the display output to QEMU through a +socket using the transfer_read operation triggered by +VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D to transfer data from and to virtio-gpu 3d +resources. It'll be nice to have support for directly sharing display output +resource using dmabuf. + +This device does not yet support the VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB, +VIRTIO_GPU_CMD_SET_SCANOUT_BLOB and VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID features. + +Currently this crate requires some necessary bits in order to move the crate out of staging: + +- Achieving a minimum of ~87% code coverage in the main vhost-device repository, + which requires some additional unit tests to increase code coverage. +- Addition of CLI arguments to specify the exact number of capsets and use + a default capset configuration when no capset is specified rather than using + hard-coded capset value. + +## Features + +The device leverages the [rutabaga_gfx](https://crates.io/crates/rutabaga_gfx) crate +to provide virglrenderer and gfxstream rendering. With Virglrenderer, Rutabaga +translates OpenGL API and Vulkan calls to an intermediate representation and allows +for OpenGL acceleration on the host. With the gfxstream rendering mode, GLES and +Vulkan calls are forwarded to the host with minimal modification. + +## Examples + +First start the daemon on the host machine using either of the 2 gpu modes: + +1) virgl-renderer +2) gfxstream + +```shell +host# vhost-device-gpu --socket-path /tmp/gpu.socket --gpu-mode virgl-renderer +``` + +With QEMU, there are two device front-ends you can use with this device. +You can either use `vhost-user-gpu-pci` or `vhost-user-vga`, which also +implements VGA, that allows you to see boot messages before the guest +initializes the GPU. You can also use different display outputs (for example +`gtk` or `dbus`). +By default, QEMU also adds another VGA output, use `-vga none` to make +sure it is disabled. + +1) Using `vhost-user-gpu-pci` + +Start QEMU with the following flags: + +```text +-chardev socket,id=vgpu,path=/tmp/gpu.socket \ +-device vhost-user-gpu-pci,chardev=vgpu,id=vgpu \ +-object memory-backend-memfd,share=on,id=mem0,size=4G, \ +-machine q35,memory-backend=mem0,accel=kvm \ +-display gtk,gl=on,show-cursor=on \ +-vga none +``` + +2) Using `vhost-user-vga` + +Start QEMU with the following flags: + +```text +-chardev socket,id=vgpu,path=/tmp/gpu.socket \ +-device vhost-user-vga,chardev=vgpu,id=vgpu \ +-object memory-backend-memfd,share=on,id=mem0,size=4G, \ +-machine q35,memory-backend=mem0,accel=kvm \ +-display gtk,gl=on,show-cursor=on \ +-vga none +``` + +## License + +This project is licensed under either of + +- [Apache License](http://www.apache.org/licenses/LICENSE-2.0), Version 2.0 +- [BSD-3-Clause License](https://opensource.org/licenses/BSD-3-Clause) diff --git a/staging/vhost-device-gpu/rustfmt.toml b/staging/vhost-device-gpu/rustfmt.toml new file mode 100644 index 000000000..c6f0942d7 --- /dev/null +++ b/staging/vhost-device-gpu/rustfmt.toml @@ -0,0 +1,7 @@ +edition = "2018" +format_generated_files = false +format_code_in_doc_comments = true +format_strings = true +imports_granularity = "Crate" +group_imports = "StdExternalCrate" +wrap_comments = true diff --git a/staging/vhost-device-gpu/src/device.rs b/staging/vhost-device-gpu/src/device.rs new file mode 100644 index 000000000..fe0e74938 --- /dev/null +++ b/staging/vhost-device-gpu/src/device.rs @@ -0,0 +1,1607 @@ +// vhost device Gpu +// +// Copyright 2024 RedHat +// +// SPDX-License-Identifier: Apache-2.0 or BSD-3-Clause + +use std::{ + cell::RefCell, + io::{self, ErrorKind, Result as IoResult}, + os::fd::AsRawFd, + sync::{self, Arc, Mutex}, +}; + +use log::{debug, error, trace, warn}; +use rutabaga_gfx::{ + ResourceCreate3D, RutabagaFence, Transfer3D, RUTABAGA_PIPE_BIND_RENDER_TARGET, + RUTABAGA_PIPE_TEXTURE_2D, +}; +use thiserror::Error as ThisError; +use vhost::vhost_user::{ + gpu_message::{VhostUserGpuCursorPos, VhostUserGpuEdidRequest}, + message::{VhostUserProtocolFeatures, VhostUserVirtioFeatures}, + GpuBackend, +}; +use vhost_user_backend::{VhostUserBackend, VringEpollHandler, VringRwLock, VringT}; +use virtio_bindings::{ + bindings::{ + virtio_config::{VIRTIO_F_NOTIFY_ON_EMPTY, VIRTIO_F_RING_RESET, VIRTIO_F_VERSION_1}, + virtio_ring::{VIRTIO_RING_F_EVENT_IDX, VIRTIO_RING_F_INDIRECT_DESC}, + }, + virtio_gpu::{ + VIRTIO_GPU_F_CONTEXT_INIT, VIRTIO_GPU_F_EDID, VIRTIO_GPU_F_RESOURCE_BLOB, + VIRTIO_GPU_F_VIRGL, + }, +}; +use virtio_queue::{QueueOwnedT, Reader, Writer}; +use vm_memory::{ByteValued, GuestAddressSpace, GuestMemoryAtomic, GuestMemoryMmap, Le32}; +use vmm_sys_util::{ + epoll::EventSet, + eventfd::{EventFd, EFD_NONBLOCK}, +}; + +use crate::{ + protocol::{ + virtio_gpu_ctrl_hdr, virtio_gpu_ctx_create, virtio_gpu_get_edid, + virtio_gpu_resource_create_2d, virtio_gpu_resource_create_3d, virtio_gpu_transfer_host_3d, + virtio_gpu_transfer_to_host_2d, virtio_gpu_update_cursor, GpuCommand, + GpuCommandDecodeError, GpuResponse::ErrUnspec, GpuResponseEncodeError, VirtioGpuConfig, + VirtioGpuResult, CONTROL_QUEUE, CURSOR_QUEUE, NUM_QUEUES, POLL_EVENT, QUEUE_SIZE, + VIRTIO_GPU_FLAG_FENCE, VIRTIO_GPU_FLAG_INFO_RING_IDX, VIRTIO_GPU_MAX_SCANOUTS, + }, + virtio_gpu::{RutabagaVirtioGpu, VirtioGpu, VirtioGpuRing}, + GpuConfig, GpuMode, +}; + +type Result = std::result::Result; + +#[derive(Debug, ThisError)] +pub enum Error { + #[error("Failed to handle event, didn't match EPOLLIN")] + HandleEventNotEpollIn, + #[error("Failed to handle unknown event")] + HandleEventUnknown, + #[error("Descriptor read failed")] + DescriptorReadFailed, + #[error("Descriptor write failed")] + DescriptorWriteFailed, + #[error("Invalid command type {0}")] + InvalidCommandType(u32), + #[error("Failed to send used queue notification: {0}")] + NotificationFailed(io::Error), + #[error("Failed to create new EventFd")] + EventFdFailed, + #[error("Failed to create an iterator over a descriptor chain: {0}")] + CreateIteratorDescChain(virtio_queue::Error), + #[error("Failed to create descriptor chain Reader: {0}")] + CreateReader(virtio_queue::Error), + #[error("Failed to create descriptor chain Writer: {0}")] + CreateWriter(virtio_queue::Error), + #[error("Failed to decode gpu command: {0}")] + GpuCommandDecode(GpuCommandDecodeError), + #[error("Failed to encode gpu response: {0}")] + GpuResponseEncode(GpuResponseEncodeError), + #[error("Failed add used chain to queue: {0}")] + QueueAddUsed(virtio_queue::Error), + #[error("Epoll handler not available: {0}")] + EpollHandler(String), + #[error("Failed register epoll listener: {0}")] + RegisterEpollListener(io::Error), +} + +impl From for io::Error { + fn from(e: Error) -> Self { + Self::new(io::ErrorKind::Other, e) + } +} + +struct VhostUserGpuBackendInner { + virtio_cfg: VirtioGpuConfig, + event_idx_enabled: bool, + gpu_backend: Option, + pub exit_event: EventFd, + mem: Option>, + gpu_mode: GpuMode, +} + +pub struct VhostUserGpuBackend { + inner: Mutex, + // this uses sync::Weak to avoid a reference cycle + epoll_handler: Mutex>>>, + poll_event_fd: Mutex>, +} + +impl VhostUserGpuBackend { + pub fn new(gpu_config: &GpuConfig) -> Result> { + log::trace!("VhostUserGpuBackend::new(config = {:?})", &gpu_config); + let inner = VhostUserGpuBackendInner { + virtio_cfg: VirtioGpuConfig { + events_read: 0.into(), + events_clear: 0.into(), + num_scanouts: Le32::from(VIRTIO_GPU_MAX_SCANOUTS), + num_capsets: RutabagaVirtioGpu::MAX_NUMBER_OF_CAPSETS.into(), + }, + event_idx_enabled: false, + gpu_backend: None, + exit_event: EventFd::new(EFD_NONBLOCK).map_err(|_| Error::EventFdFailed)?, + mem: None, + gpu_mode: gpu_config.gpu_mode(), + }; + + Ok(Arc::new(Self { + inner: Mutex::new(inner), + epoll_handler: Mutex::new(sync::Weak::new()), + poll_event_fd: Mutex::new(None), + })) + } + + pub fn set_epoll_handler(&self, epoll_handlers: &[Arc>>]) { + // We only expect 1 thread to which we want to register all handlers + assert_eq!(epoll_handlers.len(), 1); + let mut handler = match self.epoll_handler.lock() { + Ok(h) => h, + Err(poisoned) => poisoned.into_inner(), + }; + *handler = Arc::downgrade(&epoll_handlers[0]); + } +} + +impl VhostUserGpuBackendInner { + fn process_gpu_command( + virtio_gpu: &mut impl VirtioGpu, + mem: &GuestMemoryMmap, + hdr: virtio_gpu_ctrl_hdr, + cmd: GpuCommand, + ) -> VirtioGpuResult { + virtio_gpu.force_ctx_0(); + debug!("process_gpu_command: {cmd:?}"); + match cmd { + GpuCommand::GetDisplayInfo => virtio_gpu.display_info(), + GpuCommand::GetEdid(req) => Self::handle_get_edid(virtio_gpu, req), + GpuCommand::ResourceCreate2d(req) => Self::handle_resource_create_2d(virtio_gpu, req), + GpuCommand::ResourceUnref(req) => virtio_gpu.unref_resource(req.resource_id), + GpuCommand::SetScanout(req) => { + virtio_gpu.set_scanout(req.scanout_id, req.resource_id, req.r.into()) + } + GpuCommand::ResourceFlush(req) => { + virtio_gpu.flush_resource(req.resource_id, req.r.into()) + } + GpuCommand::TransferToHost2d(req) => Self::handle_transfer_to_host_2d(virtio_gpu, req), + GpuCommand::ResourceAttachBacking(req, iovecs) => { + virtio_gpu.attach_backing(req.resource_id, mem, iovecs) + } + GpuCommand::ResourceDetachBacking(req) => virtio_gpu.detach_backing(req.resource_id), + GpuCommand::UpdateCursor(req) => Self::handle_update_cursor(virtio_gpu, req), + GpuCommand::MoveCursor(req) => Self::handle_move_cursor(virtio_gpu, req), + GpuCommand::ResourceAssignUuid(_) => { + panic!("virtio_gpu: GpuCommand::ResourceAssignUuid unimplemented") + } + GpuCommand::GetCapsetInfo(req) => virtio_gpu.get_capset_info(req.capset_index), + GpuCommand::GetCapset(req) => virtio_gpu.get_capset(req.capset_id, req.capset_version), + GpuCommand::CtxCreate(req) => Self::handle_ctx_create(virtio_gpu, hdr, req), + GpuCommand::CtxDestroy(_) => virtio_gpu.destroy_context(hdr.ctx_id), + GpuCommand::CtxAttachResource(req) => { + virtio_gpu.context_attach_resource(hdr.ctx_id, req.resource_id) + } + GpuCommand::CtxDetachResource(req) => { + virtio_gpu.context_detach_resource(hdr.ctx_id, req.resource_id) + } + GpuCommand::ResourceCreate3d(req) => Self::handle_resource_create_3d(virtio_gpu, req), + GpuCommand::TransferToHost3d(req) => { + Self::handle_transfer_to_host_3d(virtio_gpu, hdr.ctx_id, req) + } + GpuCommand::TransferFromHost3d(req) => { + Self::handle_transfer_from_host_3d(virtio_gpu, hdr.ctx_id, req) + } + GpuCommand::CmdSubmit3d { + fence_ids, + mut cmd_data, + } => virtio_gpu.submit_command(hdr.ctx_id, &mut cmd_data, &fence_ids), + GpuCommand::ResourceCreateBlob(_) => { + panic!("virtio_gpu: GpuCommand::ResourceCreateBlob unimplemented") + } + GpuCommand::SetScanoutBlob(_) => { + panic!("virtio_gpu: GpuCommand::SetScanoutBlob unimplemented") + } + GpuCommand::ResourceMapBlob(_) => { + panic!("virtio_gpu: GpuCommand::ResourceMapBlob unimplemented") + } + GpuCommand::ResourceUnmapBlob(_) => { + panic!("virtio_gpu: GpuCommand::ResourceUnmapBlob unimplemented") + } + } + } + + fn handle_get_edid(virtio_gpu: &impl VirtioGpu, req: virtio_gpu_get_edid) -> VirtioGpuResult { + let edid_req = VhostUserGpuEdidRequest { + scanout_id: req.scanout, + }; + virtio_gpu.get_edid(edid_req) + } + + fn handle_resource_create_2d( + virtio_gpu: &mut impl VirtioGpu, + req: virtio_gpu_resource_create_2d, + ) -> VirtioGpuResult { + let resource_create_3d = ResourceCreate3D { + target: RUTABAGA_PIPE_TEXTURE_2D, + format: req.format, + bind: RUTABAGA_PIPE_BIND_RENDER_TARGET, + width: req.width, + height: req.height, + depth: 1, + array_size: 1, + last_level: 0, + nr_samples: 0, + flags: 0, + }; + virtio_gpu.resource_create_3d(req.resource_id, resource_create_3d) + } + + fn handle_transfer_to_host_2d( + virtio_gpu: &mut impl VirtioGpu, + req: virtio_gpu_transfer_to_host_2d, + ) -> VirtioGpuResult { + let transfer = Transfer3D::new_2d(req.r.x, req.r.y, req.r.width, req.r.height, req.offset); + virtio_gpu.transfer_write(0, req.resource_id, transfer) + } + + fn handle_update_cursor( + virtio_gpu: &mut impl VirtioGpu, + req: virtio_gpu_update_cursor, + ) -> VirtioGpuResult { + let cursor_pos = VhostUserGpuCursorPos { + scanout_id: req.pos.scanout_id, + x: req.pos.x, + y: req.pos.y, + }; + virtio_gpu.update_cursor(req.resource_id, cursor_pos, req.hot_x, req.hot_y) + } + + fn handle_move_cursor( + virtio_gpu: &mut impl VirtioGpu, + req: virtio_gpu_update_cursor, + ) -> VirtioGpuResult { + let cursor = VhostUserGpuCursorPos { + scanout_id: req.pos.scanout_id, + x: req.pos.x, + y: req.pos.y, + }; + virtio_gpu.move_cursor(req.resource_id, cursor) + } + + fn handle_ctx_create( + virtio_gpu: &mut impl VirtioGpu, + hdr: virtio_gpu_ctrl_hdr, + req: virtio_gpu_ctx_create, + ) -> VirtioGpuResult { + let context_name: Option = String::from_utf8(req.debug_name.to_vec()).ok(); + virtio_gpu.create_context(hdr.ctx_id, req.context_init, context_name.as_deref()) + } + + fn handle_resource_create_3d( + virtio_gpu: &mut impl VirtioGpu, + req: virtio_gpu_resource_create_3d, + ) -> VirtioGpuResult { + let resource_create_3d = ResourceCreate3D { + target: req.target, + format: req.format, + bind: req.bind, + width: req.width, + height: req.height, + depth: req.depth, + array_size: req.array_size, + last_level: req.last_level, + nr_samples: req.nr_samples, + flags: req.flags, + }; + virtio_gpu.resource_create_3d(req.resource_id, resource_create_3d) + } + + fn handle_transfer_to_host_3d( + virtio_gpu: &mut impl VirtioGpu, + ctx_id: u32, + req: virtio_gpu_transfer_host_3d, + ) -> VirtioGpuResult { + let transfer = Transfer3D { + x: req.box_.x, + y: req.box_.y, + z: req.box_.z, + w: req.box_.w, + h: req.box_.h, + d: req.box_.d, + level: req.level, + stride: req.stride, + layer_stride: req.layer_stride, + offset: req.offset, + }; + virtio_gpu.transfer_write(ctx_id, req.resource_id, transfer) + } + + fn handle_transfer_from_host_3d( + virtio_gpu: &mut impl VirtioGpu, + ctx_id: u32, + req: virtio_gpu_transfer_host_3d, + ) -> VirtioGpuResult { + let transfer = Transfer3D { + x: req.box_.x, + y: req.box_.y, + z: req.box_.z, + w: req.box_.w, + h: req.box_.h, + d: req.box_.d, + level: req.level, + stride: req.stride, + layer_stride: req.layer_stride, + offset: req.offset, + }; + virtio_gpu.transfer_read(ctx_id, req.resource_id, transfer, None) + } + + fn process_queue_chain( + &self, + virtio_gpu: &mut impl VirtioGpu, + vring: &VringRwLock, + head_index: u16, + reader: &mut Reader, + writer: &mut Writer, + signal_used_queue: &mut bool, + ) -> Result<()> { + let mut response = ErrUnspec; + let mem = self.mem.as_ref().unwrap().memory().into_inner(); + + let ctrl_hdr = match GpuCommand::decode(reader) { + Ok((ctrl_hdr, gpu_cmd)) => { + let cmd_name = gpu_cmd.command_name(); + let response_result = + Self::process_gpu_command(virtio_gpu, &mem, ctrl_hdr, gpu_cmd); + // Unwrap the response from inside Result and log information + response = match response_result { + Ok(response) => response, + Err(response) => { + debug!("GpuCommand {cmd_name} failed: {response:?}"); + response + } + }; + Some(ctrl_hdr) + } + Err(e) => { + warn!("Failed to decode GpuCommand: {e}"); + None + } + }; + + if writer.available_bytes() == 0 { + debug!("Command does not have descriptors for a response"); + vring.add_used(head_index, 0).map_err(Error::QueueAddUsed)?; + *signal_used_queue = true; + return Ok(()); + } + + let mut fence_id = 0; + let mut ctx_id = 0; + let mut flags = 0; + let mut ring_idx = 0; + + if let Some(ctrl_hdr) = ctrl_hdr { + if ctrl_hdr.flags & VIRTIO_GPU_FLAG_FENCE != 0 { + flags = ctrl_hdr.flags; + fence_id = ctrl_hdr.fence_id; + ctx_id = ctrl_hdr.ctx_id; + ring_idx = ctrl_hdr.ring_idx; + + let fence = RutabagaFence { + flags, + fence_id, + ctx_id, + ring_idx, + }; + if let Err(fence_response) = virtio_gpu.create_fence(fence) { + warn!( + "Failed to create fence: fence_id: {fence_id} fence_response: \ + {fence_response}" + ); + response = fence_response; + } + } + } + + // Prepare the response now, even if it is going to wait until + // fence is complete. + let response_len = response + .encode(flags, fence_id, ctx_id, ring_idx, writer) + .map_err(Error::GpuResponseEncode)?; + + let add_to_queue = if flags & VIRTIO_GPU_FLAG_FENCE != 0 { + let ring = match flags & VIRTIO_GPU_FLAG_INFO_RING_IDX { + 0 => VirtioGpuRing::Global, + _ => VirtioGpuRing::ContextSpecific { ctx_id, ring_idx }, + }; + debug!("Trying to process_fence for the command"); + virtio_gpu.process_fence(ring, fence_id, head_index, response_len) + } else { + true + }; + + if add_to_queue { + vring + .add_used(head_index, response_len) + .map_err(Error::QueueAddUsed)?; + trace!("add_used {}bytes", response_len); + *signal_used_queue = true; + } + Ok(()) + } + + /// Process the requests in the vring and dispatch replies + fn process_queue(&self, virtio_gpu: &mut impl VirtioGpu, vring: &VringRwLock) -> Result<()> { + let mem = self.mem.as_ref().unwrap().memory().into_inner(); + let desc_chains: Vec<_> = vring + .get_mut() + .get_queue_mut() + .iter(mem.clone()) + .map_err(Error::CreateIteratorDescChain)? + .collect(); + + let mut signal_used_queue = false; + for desc_chain in desc_chains { + let head_index = desc_chain.head_index(); + let mut reader = desc_chain + .clone() + .reader(&mem) + .map_err(Error::CreateReader)?; + let mut writer = desc_chain.writer(&mem).map_err(Error::CreateWriter)?; + + self.process_queue_chain( + virtio_gpu, + vring, + head_index, + &mut reader, + &mut writer, + &mut signal_used_queue, + )?; + } + + if signal_used_queue { + debug!("Notifying used queue"); + vring + .signal_used_queue() + .map_err(Error::NotificationFailed)?; + } + debug!("Processing control queue finished"); + + Ok(()) + } + + fn handle_event( + &self, + device_event: u16, + virtio_gpu: &mut impl VirtioGpu, + vrings: &[VringRwLock], + ) -> IoResult<()> { + match device_event { + CONTROL_QUEUE | CURSOR_QUEUE => { + let vring = &vrings + .get(device_event as usize) + .ok_or_else(|| Error::HandleEventUnknown)?; + if self.event_idx_enabled { + // vm-virtio's Queue implementation only checks avail_index + // once, so to properly support EVENT_IDX we need to keep + // calling process_queue() until it stops finding new + // requests on the queue. + loop { + vring.disable_notification().unwrap(); + self.process_queue(virtio_gpu, vring)?; + if !vring.enable_notification().unwrap() { + break; + } + } + } else { + // Without EVENT_IDX, a single call is enough. + self.process_queue(virtio_gpu, vring)?; + } + } + POLL_EVENT => { + trace!("Handling POLL_EVENT"); + virtio_gpu.event_poll(); + } + _ => { + warn!("unhandled device_event: {}", device_event); + return Err(Error::HandleEventUnknown.into()); + } + } + + Ok(()) + } + + fn lazy_init_and_handle_event( + &mut self, + device_event: u16, + evset: EventSet, + vrings: &[VringRwLock], + _thread_id: usize, + ) -> IoResult> { + // We use thread_local here because it is the easiest way to handle VirtioGpu + // being !Send + thread_local! { + static VIRTIO_GPU_REF: RefCell> = const { RefCell::new(None) }; + } + + debug!("Handle event called"); + if evset != EventSet::IN { + return Err(Error::HandleEventNotEpollIn.into()); + }; + + let mut event_poll_fd = None; + VIRTIO_GPU_REF.with_borrow_mut(|maybe_virtio_gpu| { + let virtio_gpu = match maybe_virtio_gpu { + Some(virtio_gpu) => virtio_gpu, + None => { + let gpu_backend = self.gpu_backend.take().ok_or_else(|| { + io::Error::new( + ErrorKind::Other, + "set_gpu_socket() not called, GpuBackend missing", + ) + })?; + + // We currently pass the CONTROL_QUEUE vring to RutabagaVirtioGpu, because we + // only expect to process fences for that queue. + let control_vring = &vrings[CONTROL_QUEUE as usize]; + + // VirtioGpu::new can be called once per process (otherwise it panics), + // so if somehow another thread accidentally wants to create another gpu here, + // it will panic anyway + let virtio_gpu = + RutabagaVirtioGpu::new(control_vring, self.gpu_mode, gpu_backend); + event_poll_fd = virtio_gpu.get_event_poll_fd(); + + maybe_virtio_gpu.insert(virtio_gpu) + } + }; + + self.handle_event(device_event, virtio_gpu, vrings) + })?; + + Ok(event_poll_fd) + } + + fn get_config(&self, offset: u32, size: u32) -> Vec { + let offset = offset as usize; + let size = size as usize; + + let buf = self.virtio_cfg.as_slice(); + + if offset + size > buf.len() { + return Vec::new(); + } + + buf[offset..offset + size].to_vec() + } +} + +/// `VhostUserBackend` trait methods +impl VhostUserBackend for VhostUserGpuBackend { + type Vring = VringRwLock; + type Bitmap = (); + + fn num_queues(&self) -> usize { + debug!("Num queues called"); + NUM_QUEUES + } + + fn max_queue_size(&self) -> usize { + debug!("Max queues called"); + QUEUE_SIZE + } + + fn features(&self) -> u64 { + 1 << VIRTIO_F_VERSION_1 + | 1 << VIRTIO_F_RING_RESET + | 1 << VIRTIO_F_NOTIFY_ON_EMPTY + | 1 << VIRTIO_RING_F_INDIRECT_DESC + | 1 << VIRTIO_RING_F_EVENT_IDX + | 1 << VIRTIO_GPU_F_VIRGL + | 1 << VIRTIO_GPU_F_EDID + | 1 << VIRTIO_GPU_F_RESOURCE_BLOB + | 1 << VIRTIO_GPU_F_CONTEXT_INIT + | VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits() + } + + fn protocol_features(&self) -> VhostUserProtocolFeatures { + debug!("Protocol features called"); + VhostUserProtocolFeatures::CONFIG | VhostUserProtocolFeatures::MQ + } + + fn set_event_idx(&self, enabled: bool) { + self.inner.lock().unwrap().event_idx_enabled = enabled; + debug!("Event idx set to: {}", enabled); + } + + fn update_memory(&self, mem: GuestMemoryAtomic) -> IoResult<()> { + debug!("Update memory called"); + self.inner.lock().unwrap().mem = Some(mem); + Ok(()) + } + + fn set_gpu_socket(&self, backend: GpuBackend) -> IoResult<()> { + self.inner.lock().unwrap().gpu_backend = Some(backend); + Ok(()) + } + + fn get_config(&self, offset: u32, size: u32) -> Vec { + self.inner.lock().unwrap().get_config(offset, size) + } + + fn exit_event(&self, _thread_index: usize) -> Option { + self.inner.lock().unwrap().exit_event.try_clone().ok() + } + + fn handle_event( + &self, + device_event: u16, + evset: EventSet, + vrings: &[Self::Vring], + thread_id: usize, + ) -> IoResult<()> { + let poll_event_fd = self.inner.lock().unwrap().lazy_init_and_handle_event( + device_event, + evset, + vrings, + thread_id, + )?; + + if let Some(poll_event_fd) = poll_event_fd { + let epoll_handler = match self.epoll_handler.lock() { + Ok(h) => h, + Err(poisoned) => poisoned.into_inner(), + }; + let Some(epoll_handler) = epoll_handler.upgrade() else { + return Err( + Error::EpollHandler("Failed to upgrade epoll handler".to_string()).into(), + ); + }; + epoll_handler + .register_listener( + poll_event_fd.as_raw_fd(), + EventSet::IN, + u64::from(POLL_EVENT), + ) + .map_err(Error::RegisterEpollListener)?; + debug!("Registered POLL_EVENT on fd: {}", poll_event_fd.as_raw_fd()); + // store the fd, so it is not closed after exiting this scope + self.poll_event_fd.lock().unwrap().replace(poll_event_fd); + } + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use std::{ + fs::File, + io::{ErrorKind, Read}, + iter::zip, + mem, + os::{fd::FromRawFd, unix::net::UnixStream}, + sync::Arc, + thread, + time::Duration, + }; + + use assert_matches::assert_matches; + use mockall::predicate; + use rusty_fork::rusty_fork_test; + use tempfile::tempdir; + use vhost::vhost_user::gpu_message::{VhostUserGpuScanout, VhostUserGpuUpdate}; + use vhost_user_backend::{VhostUserDaemon, VringRwLock, VringT}; + use virtio_bindings::virtio_ring::{VRING_DESC_F_NEXT, VRING_DESC_F_WRITE}; + use virtio_queue::{mock::MockSplitQueue, Descriptor, Queue, QueueT}; + use vm_memory::{ + ByteValued, Bytes, GuestAddress, GuestMemory, GuestMemoryAtomic, GuestMemoryMmap, + }; + + use super::*; + use crate::{ + protocol::{ + virtio_gpu_mem_entry, virtio_gpu_rect, virtio_gpu_resource_attach_backing, + virtio_gpu_resource_flush, virtio_gpu_set_scanout, + GpuResponse::{OkCapsetInfo, OkDisplayInfo, OkEdid, OkNoData}, + VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING, VIRTIO_GPU_CMD_RESOURCE_CREATE_2D, + VIRTIO_GPU_CMD_RESOURCE_FLUSH, VIRTIO_GPU_CMD_SET_SCANOUT, + VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D, VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D, + VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D, VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM, + VIRTIO_GPU_RESP_ERR_UNSPEC, VIRTIO_GPU_RESP_OK_NODATA, + }, + virtio_gpu::MockVirtioGpu, + }; + + const SOCKET_PATH: &str = "/tmp/vgpu.socket"; + const MEM_SIZE: usize = 2 * 1024 * 1024; // 2MiB + + const CURSOR_QUEUE_ADDR: GuestAddress = GuestAddress(0x0); + const CURSOR_QUEUE_DATA_ADDR: GuestAddress = GuestAddress(0x1_000); + const CURSOR_QUEUE_SIZE: u16 = 16; + const CONTROL_QUEUE_ADDR: GuestAddress = GuestAddress(0x2_000); + const CONTROL_QUEUE_DATA_ADDR: GuestAddress = GuestAddress(0x10_000); + const CONTROL_QUEUE_SIZE: u16 = 1024; + + fn init() -> (Arc, GuestMemoryAtomic) { + let test_dir = tempdir().expect("Could not create a temp test directory."); + let socket_path = test_dir.path().join(SOCKET_PATH); + let backend = + VhostUserGpuBackend::new(&GpuConfig::new(socket_path, GpuMode::VirglRenderer)).unwrap(); + let mem = GuestMemoryAtomic::new( + GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), MEM_SIZE)]).unwrap(), + ); + + backend.update_memory(mem.clone()).unwrap(); + (backend, mem) + } + + /// Arguments to create a descriptor chain for testing + struct TestingDescChainArgs<'a> { + readable_desc_bufs: &'a [&'a [u8]], + writable_desc_lengths: &'a [u32], + } + + fn gpu_backend_pair() -> (UnixStream, GpuBackend) { + let (frontend, backend) = UnixStream::pair().unwrap(); + let backend = GpuBackend::from_stream(backend); + + (frontend, backend) + } + + fn event_fd_into_file(event_fd: EventFd) -> File { + // SAFETY: We ensure that the `event_fd` is properly handled such that its file + // descriptor is not closed after `File` takes ownership of it. + unsafe { + let event_fd_raw = event_fd.as_raw_fd(); + mem::forget(event_fd); + File::from_raw_fd(event_fd_raw) + } + } + + #[test] + fn test_process_gpu_command() { + let (_, mem) = init(); + let hdr = virtio_gpu_ctrl_hdr::default(); + + let test_cmd = |cmd: GpuCommand, setup: fn(&mut MockVirtioGpu)| { + let mut mock_gpu = MockVirtioGpu::new(); + mock_gpu.expect_force_ctx_0().return_once(|| ()); + setup(&mut mock_gpu); + VhostUserGpuBackendInner::process_gpu_command(&mut mock_gpu, &mem.memory(), hdr, cmd) + }; + + let cmd = GpuCommand::GetDisplayInfo; + let result = test_cmd(cmd, |g| { + g.expect_display_info() + .return_once(|| Ok(OkDisplayInfo(vec![(1280, 720, true)]))); + }); + assert_matches!(result, Ok(OkDisplayInfo(_))); + + let cmd = GpuCommand::GetEdid(Default::default()); + let result = test_cmd(cmd, |g| { + g.expect_get_edid().return_once(|_| { + Ok(OkEdid { + blob: Box::new([0xff; 512]), + }) + }); + }); + assert_matches!(result, Ok(OkEdid { .. })); + + let cmd = GpuCommand::ResourceCreate2d(Default::default()); + let result = test_cmd(cmd, |g| { + g.expect_resource_create_3d() + .return_once(|_, _| Ok(OkNoData)); + }); + assert_matches!(result, Ok(OkNoData)); + + let cmd = GpuCommand::ResourceUnref(Default::default()); + let result = test_cmd(cmd, |g| { + g.expect_unref_resource().return_once(|_| Ok(OkNoData)); + }); + assert_matches!(result, Ok(OkNoData)); + + let cmd = GpuCommand::SetScanout(Default::default()); + let result = test_cmd(cmd, |g| { + g.expect_set_scanout().return_once(|_, _, _| Ok(OkNoData)); + }); + assert_matches!(result, Ok(OkNoData)); + + let cmd = GpuCommand::ResourceFlush(Default::default()); + let result = test_cmd(cmd, |g| { + g.expect_flush_resource().return_once(|_, _| Ok(OkNoData)); + }); + assert_matches!(result, Ok(OkNoData)); + + let cmd = GpuCommand::TransferToHost2d(Default::default()); + let result = test_cmd(cmd, |g| { + g.expect_transfer_write() + .return_once(|_, _, _| Ok(OkNoData)); + }); + assert_matches!(result, Ok(OkNoData)); + + let cmd = GpuCommand::ResourceAttachBacking(Default::default(), Default::default()); + let result = test_cmd(cmd, |g| { + g.expect_attach_backing() + .return_once(|_, _, _| Ok(OkNoData)); + }); + assert_matches!(result, Ok(OkNoData)); + + let cmd = GpuCommand::ResourceDetachBacking(Default::default()); + let result = test_cmd(cmd, |g| { + g.expect_detach_backing().return_once(|_| Ok(OkNoData)); + }); + assert_matches!(result, Ok(OkNoData)); + + let cmd = GpuCommand::GetCapsetInfo(Default::default()); + let result = test_cmd(cmd, |g| { + g.expect_get_capset_info().return_once(|_| { + Ok(OkCapsetInfo { + capset_id: 1, + version: 2, + size: 32, + }) + }); + }); + assert_matches!( + result, + Ok(OkCapsetInfo { + capset_id: 1, + version: 2, + size: 32 + }) + ); + + let cmd = GpuCommand::CtxCreate(Default::default()); + let result = test_cmd(cmd, |g| { + g.expect_create_context() + .return_once(|_, _, _| Ok(OkNoData)); + }); + assert_matches!(result, Ok(OkNoData)); + + let cmd = GpuCommand::CtxDestroy(Default::default()); + let result = test_cmd(cmd, |g| { + g.expect_destroy_context().return_once(|_| Ok(OkNoData)); + }); + assert_matches!(result, Ok(OkNoData)); + + let cmd = GpuCommand::CtxAttachResource(Default::default()); + let result = test_cmd(cmd, |g| { + g.expect_context_attach_resource() + .return_once(|_, _| Ok(OkNoData)); + }); + assert_matches!(result, Ok(OkNoData)); + + let cmd = GpuCommand::CtxDetachResource(Default::default()); + let result = test_cmd(cmd, |g| { + g.expect_context_detach_resource() + .return_once(|_, _| Ok(OkNoData)); + }); + assert_matches!(result, Ok(OkNoData)); + + let cmd = GpuCommand::ResourceCreate3d(Default::default()); + let result = test_cmd(cmd, |g| { + g.expect_resource_create_3d() + .return_once(|_, _| Ok(OkNoData)); + }); + assert_matches!(result, Ok(OkNoData)); + + let cmd = GpuCommand::TransferToHost3d(Default::default()); + let result = test_cmd(cmd, |g| { + g.expect_transfer_write() + .return_once(|_, _, _| Ok(OkNoData)); + }); + assert_matches!(result, Ok(OkNoData)); + + let cmd = GpuCommand::TransferFromHost3d(Default::default()); + let result = test_cmd(cmd, |g| { + g.expect_transfer_read() + .return_once(|_, _, _, _| Ok(OkNoData)); + }); + assert_matches!(result, Ok(OkNoData)); + + let cmd = GpuCommand::CmdSubmit3d { + cmd_data: vec![0xff; 512], + fence_ids: vec![], + }; + let result = test_cmd(cmd, |g| { + g.expect_submit_command() + .return_once(|_, _, _| Ok(OkNoData)); + }); + assert_matches!(result, Ok(OkNoData)); + + let cmd = GpuCommand::UpdateCursor(Default::default()); + let result = test_cmd(cmd, |g| { + g.expect_update_cursor() + .return_once(|_, _, _, _| Ok(OkNoData)); + }); + assert_matches!(result, Ok(OkNoData)); + + let cmd = GpuCommand::MoveCursor(Default::default()); + let result = test_cmd(cmd, |g| { + g.expect_move_cursor().return_once(|_, _| Ok(OkNoData)); + }); + assert_matches!(result, Ok(OkNoData)); + + let cmd = GpuCommand::MoveCursor(Default::default()); + let result = test_cmd(cmd, |g| { + g.expect_move_cursor().return_once(|_, _| Ok(OkNoData)); + }); + assert_matches!(result, Ok(OkNoData)); + } + + fn make_descriptors_into_a_chain(start_idx: u16, descriptors: &mut [Descriptor]) { + let last_idx = start_idx + descriptors.len() as u16 - 1; + for (idx, desc) in zip(start_idx.., descriptors.iter_mut()) { + if idx == last_idx { + desc.set_flags(desc.flags() & !VRING_DESC_F_NEXT as u16); + } else { + desc.set_flags(desc.flags() | VRING_DESC_F_NEXT as u16); + desc.set_next(idx + 1); + }; + } + } + + // Creates a vring from the specified descriptor chains + // For each created device-writable descriptor chain a Vec<(GuestAddress, + // usize)> is returned representing the descriptors of that chain. + fn create_vring( + mem: &GuestMemoryAtomic, + chains: &[TestingDescChainArgs], + queue_addr_start: GuestAddress, + data_addr_start: GuestAddress, + queue_size: u16, + ) -> (VringRwLock, Vec>, EventFd) { + let mem_handle = mem.memory(); + mem.memory() + .check_address(queue_addr_start) + .expect("Invalid start adress"); + + let mut output_bufs = Vec::new(); + let vq = MockSplitQueue::create(&*mem_handle, queue_addr_start, queue_size); + // Address of the buffer associated with the descriptor + let mut next_addr = data_addr_start.0; + let mut chain_index_start = 0; + let mut descriptors = Vec::new(); + + for chain in chains { + for buf in chain.readable_desc_bufs { + mem.memory() + .check_address(GuestAddress(next_addr)) + .expect("Readable descriptor's buffer address is not valid!"); + let desc = Descriptor::new( + next_addr, + buf.len() + .try_into() + .expect("Buffer too large to fit into descriptor"), + 0, + 0, + ); + mem_handle.write(buf, desc.addr()).unwrap(); + descriptors.push(desc); + next_addr += buf.len() as u64; + } + let mut writable_descriptor_adresses = Vec::new(); + for desc_len in chain.writable_desc_lengths.iter().copied() { + mem.memory() + .check_address(GuestAddress(next_addr)) + .expect("Writable descriptor's buffer address is not valid!"); + let desc = Descriptor::new(next_addr, desc_len, VRING_DESC_F_WRITE as u16, 0); + writable_descriptor_adresses.push(desc.addr()); + descriptors.push(desc); + next_addr += u64::from(desc_len); + } + output_bufs.push(writable_descriptor_adresses); + make_descriptors_into_a_chain( + chain_index_start as u16, + &mut descriptors[chain_index_start..], + ); + chain_index_start = descriptors.len(); + } + + assert!(descriptors.len() < queue_size as usize); + if !descriptors.is_empty() { + vq.build_multiple_desc_chains(&descriptors) + .expect("Failed to build descriptor chain"); + } + + let queue: Queue = vq.create_queue().unwrap(); + let vring = VringRwLock::new(mem.clone(), queue_size).unwrap(); + let signal_used_queue_evt = EventFd::new(EFD_NONBLOCK).unwrap(); + let signal_used_queue_evt_clone = signal_used_queue_evt.try_clone().unwrap(); + vring + .set_queue_info(queue.desc_table(), queue.avail_ring(), queue.used_ring()) + .unwrap(); + vring.set_call(Some(event_fd_into_file(signal_used_queue_evt_clone))); + + vring.set_enabled(true); + vring.set_queue_ready(true); + + (vring, output_bufs, signal_used_queue_evt) + } + + fn create_control_vring( + mem: &GuestMemoryAtomic, + chains: &[TestingDescChainArgs], + ) -> (VringRwLock, Vec>, EventFd) { + create_vring( + mem, + chains, + CONTROL_QUEUE_ADDR, + CONTROL_QUEUE_DATA_ADDR, + CONTROL_QUEUE_SIZE, + ) + } + + fn create_cursor_vring( + mem: &GuestMemoryAtomic, + chains: &[TestingDescChainArgs], + ) -> (VringRwLock, Vec>, EventFd) { + create_vring( + mem, + chains, + CURSOR_QUEUE_ADDR, + CURSOR_QUEUE_DATA_ADDR, + CURSOR_QUEUE_SIZE, + ) + } + + #[test] + fn test_handle_event_executes_gpu_commands() { + let (backend, mem) = init(); + backend.update_memory(mem.clone()).unwrap(); + let backend_inner = backend.inner.lock().unwrap(); + + let hdr = virtio_gpu_ctrl_hdr { + type_: VIRTIO_GPU_CMD_RESOURCE_CREATE_2D, + ..Default::default() + }; + + let cmd = virtio_gpu_resource_create_2d { + resource_id: 1, + format: VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM, + width: 1920, + height: 1080, + }; + + let chain1 = TestingDescChainArgs { + readable_desc_bufs: &[hdr.as_slice(), cmd.as_slice()], + writable_desc_lengths: &[mem::size_of::() as u32], + }; + + let chain2 = TestingDescChainArgs { + readable_desc_bufs: &[hdr.as_slice(), cmd.as_slice()], + writable_desc_lengths: &[mem::size_of::() as u32], + }; + + let (control_vring, outputs, control_signal_used_queue_evt) = + create_control_vring(&mem, &[chain1, chain2]); + let (cursor_vring, _, cursor_signal_used_queue_evt) = create_cursor_vring(&mem, &[]); + + let mem = mem.memory().into_inner(); + + let mut mock_gpu = MockVirtioGpu::new(); + let seq = &mut mockall::Sequence::new(); + + mock_gpu + .expect_force_ctx_0() + .return_const(()) + .once() + .in_sequence(seq); + + mock_gpu + .expect_resource_create_3d() + .with(predicate::eq(1), predicate::always()) + .returning(|_, _| Ok(OkNoData)) + .once() + .in_sequence(seq); + + mock_gpu + .expect_force_ctx_0() + .return_const(()) + .once() + .in_sequence(seq); + + mock_gpu + .expect_resource_create_3d() + .with(predicate::eq(1), predicate::always()) + .returning(|_, _| Err(ErrUnspec)) + .once() + .in_sequence(seq); + + assert_eq!( + cursor_signal_used_queue_evt.read().unwrap_err().kind(), + ErrorKind::WouldBlock + ); + + backend_inner + .handle_event(0, &mut mock_gpu, &[control_vring, cursor_vring]) + .unwrap(); + + let expected_hdr1 = virtio_gpu_ctrl_hdr { + type_: VIRTIO_GPU_RESP_OK_NODATA, + ..Default::default() + }; + + let expected_hdr2 = virtio_gpu_ctrl_hdr { + type_: VIRTIO_GPU_RESP_ERR_UNSPEC, + ..Default::default() + }; + control_signal_used_queue_evt + .read() + .expect("Expected device to signal used queue!"); + assert_eq!( + cursor_signal_used_queue_evt.read().unwrap_err().kind(), + ErrorKind::WouldBlock, + "Unexpected signal_used_queue on cursor queue!" + ); + + let result_hdr1: virtio_gpu_ctrl_hdr = mem.memory().read_obj(outputs[0][0]).unwrap(); + assert_eq!(result_hdr1, expected_hdr1); + + let result_hdr2: virtio_gpu_ctrl_hdr = mem.memory().read_obj(outputs[1][0]).unwrap(); + assert_eq!(result_hdr2, expected_hdr2); + } + + #[test] + fn test_command_with_fence_ready_immediately() { + let (backend, mem) = init(); + backend.update_memory(mem.clone()).unwrap(); + let backend_inner = backend.inner.lock().unwrap(); + + const FENCE_ID: u64 = 123; + + let hdr = virtio_gpu_ctrl_hdr { + type_: VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D, + flags: VIRTIO_GPU_FLAG_FENCE, + fence_id: FENCE_ID, + ctx_id: 0, + ring_idx: 0, + padding: Default::default(), + }; + + let cmd = virtio_gpu_transfer_host_3d::default(); + + let chain = TestingDescChainArgs { + readable_desc_bufs: &[hdr.as_slice(), cmd.as_slice()], + writable_desc_lengths: &[mem::size_of::() as u32], + }; + + let (control_vring, outputs, control_signal_used_queue_evt) = + create_control_vring(&mem, &[chain]); + let (cursor_vring, _, _) = create_cursor_vring(&mem, &[]); + + let mut mock_gpu = MockVirtioGpu::new(); + let seq = &mut mockall::Sequence::new(); + + mock_gpu + .expect_force_ctx_0() + .return_const(()) + .once() + .in_sequence(seq); + + mock_gpu + .expect_transfer_write() + .returning(|_, _, _| Ok(OkNoData)) + .once() + .in_sequence(seq); + + mock_gpu + .expect_create_fence() + .withf(|fence| fence.fence_id == FENCE_ID) + .returning(|_| Ok(OkNoData)) + .once() + .in_sequence(seq); + + mock_gpu + .expect_process_fence() + .with( + predicate::eq(VirtioGpuRing::Global), + predicate::eq(FENCE_ID), + predicate::eq(0), + predicate::eq(mem::size_of_val(&hdr) as u32), + ) + .return_const(true) + .once() + .in_sequence(seq); + + backend_inner + .handle_event(0, &mut mock_gpu, &[control_vring, cursor_vring]) + .unwrap(); + + let expected_hdr = virtio_gpu_ctrl_hdr { + type_: VIRTIO_GPU_RESP_OK_NODATA, + flags: VIRTIO_GPU_FLAG_FENCE, + fence_id: FENCE_ID, + ctx_id: 0, + ring_idx: 0, + padding: Default::default(), + }; + + control_signal_used_queue_evt + .read() + .expect("Expected device to call signal_used_queue!"); + + let result_hdr1: virtio_gpu_ctrl_hdr = mem.memory().read_obj(outputs[0][0]).unwrap(); + assert_eq!(result_hdr1, expected_hdr); + } + + #[test] + fn test_command_with_fence_not_ready() { + let (backend, mem) = init(); + backend.update_memory(mem.clone()).unwrap(); + let backend_inner = backend.inner.lock().unwrap(); + + const FENCE_ID: u64 = 123; + const CTX_ID: u32 = 1; + const RING_IDX: u8 = 2; + + let hdr = virtio_gpu_ctrl_hdr { + type_: VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D, + flags: VIRTIO_GPU_FLAG_FENCE | VIRTIO_GPU_FLAG_INFO_RING_IDX, + fence_id: FENCE_ID, + ctx_id: CTX_ID, + ring_idx: RING_IDX, + padding: Default::default(), + }; + + let cmd = virtio_gpu_transfer_host_3d::default(); + + let chain = TestingDescChainArgs { + readable_desc_bufs: &[hdr.as_slice(), cmd.as_slice()], + writable_desc_lengths: &[mem::size_of::() as u32], + }; + + let (control_vring, _, control_signal_used_queue_evt) = + create_control_vring(&mem, &[chain]); + let (cursor_vring, _, _) = create_cursor_vring(&mem, &[]); + + let mut mock_gpu = MockVirtioGpu::new(); + let seq = &mut mockall::Sequence::new(); + + mock_gpu + .expect_force_ctx_0() + .return_const(()) + .once() + .in_sequence(seq); + + mock_gpu + .expect_transfer_read() + .returning(|_, _, _, _| Ok(OkNoData)) + .once() + .in_sequence(seq); + + mock_gpu + .expect_create_fence() + .withf(|fence| fence.fence_id == FENCE_ID) + .returning(|_| Ok(OkNoData)) + .once() + .in_sequence(seq); + + mock_gpu + .expect_process_fence() + .with( + predicate::eq(VirtioGpuRing::ContextSpecific { + ctx_id: CTX_ID, + ring_idx: RING_IDX, + }), + predicate::eq(FENCE_ID), + predicate::eq(0), + predicate::eq(mem::size_of_val(&hdr) as u32), + ) + .return_const(false) + .once() + .in_sequence(seq); + + backend_inner + .handle_event(0, &mut mock_gpu, &[control_vring, cursor_vring]) + .unwrap(); + + assert_eq!( + control_signal_used_queue_evt.read().unwrap_err().kind(), + ErrorKind::WouldBlock + ); + } + + rusty_fork_test! { + #[test] + fn test_verify_backend() { + let test_dir = tempdir().expect("Could not create a temp test directory."); + let socket_path = test_dir.path().join(SOCKET_PATH); + let gpu_config = GpuConfig::new(socket_path, GpuMode::VirglRenderer); + let backend = VhostUserGpuBackend::new(&gpu_config).unwrap(); + + assert_eq!(backend.num_queues(), NUM_QUEUES); + assert_eq!(backend.max_queue_size(), QUEUE_SIZE); + assert_eq!(backend.features(), 0x1017100001B); + assert_eq!( + backend.protocol_features(), + VhostUserProtocolFeatures::CONFIG | VhostUserProtocolFeatures::MQ + ); + assert_eq!(backend.queues_per_thread(), vec![0xffff_ffff]); + assert_eq!(backend.get_config(0, 0), vec![]); + + assert!(backend.inner.lock().unwrap().gpu_backend.is_none()); + backend.set_gpu_socket(gpu_backend_pair().1).unwrap(); + assert!(backend.inner.lock().unwrap().gpu_backend.is_some()); + + backend.set_event_idx(true); + assert!(backend.inner.lock().unwrap().event_idx_enabled); + + assert!(backend.exit_event(0).is_some()); + + let mem = GuestMemoryAtomic::new( + GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x1000)]).unwrap(), + ); + backend.update_memory(mem.clone()).unwrap(); + + let vring = VringRwLock::new(mem, 0x1000).unwrap(); + vring.set_queue_info(0x100, 0x200, 0x300).unwrap(); + vring.set_queue_ready(true); + + assert_eq!( + backend + .handle_event(0, EventSet::OUT, &[vring.clone()], 0) + .unwrap_err() + .kind(), + io::ErrorKind::Other + ); + + assert_eq!( + backend + .handle_event(1, EventSet::IN, &[vring.clone()], 0) + .unwrap_err() + .kind(), + io::ErrorKind::Other + ); + + // Hit the loop part + backend.set_event_idx(true); + backend + .handle_event(0, EventSet::IN, &[vring.clone()], 0) + .unwrap(); + + // Hit the non-loop part + backend.set_event_idx(false); + backend.handle_event(0, EventSet::IN, &[vring], 0).unwrap(); + } + } + + mod test_image { + use super::*; + const GREEN_PIXEL: u32 = 0x00FF00FF; + const RED_PIXEL: u32 = 0xFF0000FF; + const BYTES_PER_PIXEL: usize = 4; + + pub fn write(mem: &GuestMemoryMmap, image_addr: GuestAddress, width: u32, height: u32) { + let mut image_addr: u64 = image_addr.0; + for i in 0..width * height { + let pixel = if i % 2 == 0 { RED_PIXEL } else { GREEN_PIXEL }; + let pixel = pixel.to_be_bytes(); + + mem.memory() + .write_slice(&pixel, GuestAddress(image_addr)) + .unwrap(); + image_addr += BYTES_PER_PIXEL as u64; + } + } + + pub fn assert(data: &[u8], width: u32, height: u32) { + assert_eq!(data.len(), (width * height) as usize * BYTES_PER_PIXEL); + for (i, pixel) in data.chunks(BYTES_PER_PIXEL).enumerate() { + let expected_pixel = if i % 2 == 0 { RED_PIXEL } else { GREEN_PIXEL }; + assert_eq!( + pixel, + expected_pixel.to_be_bytes(), + "Wrong pixel at index {i}" + ); + } + } + } + + fn split_into_mem_entries( + addr: GuestAddress, + len: u32, + chunk_size: u32, + ) -> Vec { + let mut entries = Vec::new(); + let mut addr = addr.0; + let mut remaining = len; + + while remaining >= chunk_size { + entries.push(virtio_gpu_mem_entry { + addr, + length: chunk_size, + padding: Default::default(), + }); + addr += u64::from(chunk_size); + remaining -= chunk_size; + } + + if remaining != 0 { + entries.push(virtio_gpu_mem_entry { + addr, + length: remaining, + padding: Default::default(), + }) + } + + entries + } + + fn new_hdr(type_: u32) -> virtio_gpu_ctrl_hdr { + virtio_gpu_ctrl_hdr { + type_, + ..Default::default() + } + } + + rusty_fork_test! { + /// This test uses multiple gpu commands, it crates a resource, writes a test image into it and + /// then present the display output. + #[test] + fn test_display_output() { + let (backend, mem) = init(); + let (mut gpu_frontend, gpu_backend) = gpu_backend_pair(); + gpu_frontend + .set_read_timeout(Some(Duration::from_secs(10))) + .unwrap(); + gpu_frontend + .set_write_timeout(Some(Duration::from_secs(10))) + .unwrap(); + + backend.set_gpu_socket(gpu_backend).unwrap(); + + // Unfortunately there is no way to crate a VringEpollHandler directly (the ::new is not public) + // So we create a daemon to create the epoll handler for us here + let daemon = VhostUserDaemon::new( + "vhost-device-gpu-backend".to_string(), + backend.clone(), + mem.clone(), + ) + .expect("Could not create daemon"); + let epoll_handlers = daemon.get_epoll_handlers(); + backend.set_epoll_handler(&epoll_handlers); + mem::drop(daemon); + + const IMAGE_ADDR: GuestAddress = GuestAddress(0x30_000); + const IMAGE_WIDTH: u32 = 640; + const IMAGE_HEIGHT: u32 = 480; + const RESP_SIZE: u32 = mem::size_of::() as u32; + + let image_rect = virtio_gpu_rect { + x: 0, + y: 0, + width: IMAGE_WIDTH, + height: IMAGE_HEIGHT, + }; + + // Construct a command to create a resource + let hdr = new_hdr(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D); + let cmd = virtio_gpu_resource_create_2d { + resource_id: 1, + format: VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM, // RGBA8888 + width: IMAGE_WIDTH, + height: IMAGE_HEIGHT, + }; + let create_resource_cmd = TestingDescChainArgs { + readable_desc_bufs: &[hdr.as_slice(), cmd.as_slice()], + writable_desc_lengths: &[RESP_SIZE], + }; + + // Construct a command to attach backing memory location(s) to the resource + let hdr = new_hdr(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING); + let mem_entries = split_into_mem_entries(IMAGE_ADDR, IMAGE_WIDTH * IMAGE_HEIGHT * 4, 4096); + let cmd = virtio_gpu_resource_attach_backing { + resource_id: 1, + nr_entries: mem_entries.len() as u32, + }; + let mut readable_desc_bufs = vec![hdr.as_slice(), cmd.as_slice()]; + readable_desc_bufs.extend(mem_entries.iter().map(|entry| entry.as_slice())); + let attach_backing_cmd = TestingDescChainArgs { + readable_desc_bufs: &readable_desc_bufs, + writable_desc_lengths: &[RESP_SIZE], + }; + + // Construct a command to transfer the resource data from the attached memory to gpu + let hdr = new_hdr(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D); + let cmd = virtio_gpu_transfer_to_host_2d { + r: image_rect, + offset: 0, + resource_id: 1, + padding: Default::default(), + }; + let transfer_to_host_cmd = TestingDescChainArgs { + readable_desc_bufs: &[hdr.as_slice(), cmd.as_slice()], + writable_desc_lengths: &[RESP_SIZE], + }; + + // Construct a command to set the scanout (display) output + let hdr = new_hdr(VIRTIO_GPU_CMD_SET_SCANOUT); + let cmd = virtio_gpu_set_scanout { + r: image_rect, + resource_id: 1, + scanout_id: 1, + }; + let set_scanout_cmd = TestingDescChainArgs { + readable_desc_bufs: &[hdr.as_slice(), cmd.as_slice()], + writable_desc_lengths: &[RESP_SIZE], + }; + + // Construct a command to flush the resource + let hdr = new_hdr(VIRTIO_GPU_CMD_RESOURCE_FLUSH); + let cmd = virtio_gpu_resource_flush { + r: image_rect, + resource_id: 1, + padding: Default::default(), + }; + let flush_resource_cmd = TestingDescChainArgs { + readable_desc_bufs: &[hdr.as_slice(), cmd.as_slice()], + writable_desc_lengths: &[RESP_SIZE], + }; + + // Create a control queue with all the commands defined above + let commands = [ + create_resource_cmd, + attach_backing_cmd, + transfer_to_host_cmd, + set_scanout_cmd, + flush_resource_cmd, + ]; + let (control_vring, _, _) = create_control_vring(&mem, &commands); + + // Create an empty cursor queue with no commands + let (cursor_vring, _, _) = create_cursor_vring(&mem, &[]); + + // Write the test image in guest memory + test_image::write(&mem.memory(), IMAGE_ADDR, IMAGE_WIDTH, IMAGE_HEIGHT); + + const EXPECTED_SCANOUT_REQUEST: VhostUserGpuScanout = VhostUserGpuScanout { + scanout_id: 1, + width: IMAGE_WIDTH, + height: IMAGE_HEIGHT, + }; + + const EXPECTED_UPDATE_REQUEST: VhostUserGpuUpdate = VhostUserGpuUpdate { + scanout_id: 1, + x: 0, + y: 0, + width: IMAGE_WIDTH, + height: IMAGE_HEIGHT, + }; + + // This simulates the frontend vmm. Here we check the issued frontend requests and if the + // output matches the test image. + let frontend_thread = thread::spawn(move || { + let mut scanout_request_hdr = [0; 12]; + let mut scanout_request = VhostUserGpuScanout::default(); + let mut update_request_hdr = [0; 12]; + let mut update_request = VhostUserGpuUpdate::default(); + let mut result_img = vec![0xdd; (IMAGE_WIDTH * IMAGE_HEIGHT * 4) as usize]; + + gpu_frontend.read_exact(&mut scanout_request_hdr).unwrap(); + gpu_frontend + .read_exact(scanout_request.as_mut_slice()) + .unwrap(); + gpu_frontend.read_exact(&mut update_request_hdr).unwrap(); + gpu_frontend + .read_exact(update_request.as_mut_slice()) + .unwrap(); + gpu_frontend.read_exact(&mut result_img).unwrap(); + + assert_eq!(scanout_request, EXPECTED_SCANOUT_REQUEST); + assert_eq!(update_request, EXPECTED_UPDATE_REQUEST); + test_image::assert(&result_img, IMAGE_WIDTH, IMAGE_HEIGHT); + }); + + backend + .handle_event(0, EventSet::IN, &[control_vring, cursor_vring], 0) + .unwrap(); + + frontend_thread.join().unwrap(); + } + } +} diff --git a/staging/vhost-device-gpu/src/lib.rs b/staging/vhost-device-gpu/src/lib.rs new file mode 100644 index 000000000..9e78d3141 --- /dev/null +++ b/staging/vhost-device-gpu/src/lib.rs @@ -0,0 +1,101 @@ +// Copyright 2024 Red Hat Inc +// +// SPDX-License-Identifier: Apache-2.0 or BSD-3-Clause + +#![deny( + clippy::undocumented_unsafe_blocks, + /* groups */ + clippy::correctness, + clippy::suspicious, + clippy::complexity, + clippy::perf, + clippy::style, + clippy::nursery, + //* restriction */ + clippy::dbg_macro, + clippy::rc_buffer, + clippy::as_underscore, + clippy::assertions_on_result_states, + //* pedantic */ + clippy::cast_lossless, + clippy::cast_possible_wrap, + clippy::ptr_as_ptr, + clippy::bool_to_int_with_if, + clippy::borrow_as_ptr, + clippy::case_sensitive_file_extension_comparisons, + clippy::cast_lossless, + clippy::cast_ptr_alignment, + clippy::naive_bytecount +)] +#![allow( + clippy::missing_errors_doc, + clippy::missing_panics_doc, + clippy::significant_drop_in_scrutinee, + clippy::significant_drop_tightening +)] + +#[cfg(target_env = "gnu")] +pub mod device; +#[cfg(target_env = "gnu")] +pub mod protocol; +#[cfg(target_env = "gnu")] +pub mod virtio_gpu; + +use std::path::{Path, PathBuf}; + +use clap::ValueEnum; + +#[derive(Clone, Copy, Debug, PartialEq, Eq, ValueEnum)] +pub enum GpuMode { + VirglRenderer, + Gfxstream, +} + +#[derive(Debug, Clone)] +/// This structure holds the internal configuration for the GPU backend, +/// derived from the command-line arguments provided through `GpuArgs`. +pub struct GpuConfig { + /// vhost-user Unix domain socket + socket_path: PathBuf, + gpu_mode: GpuMode, +} + +impl GpuConfig { + /// Create a new instance of the `GpuConfig` struct, containing the + /// parameters to be fed into the gpu-backend server. + #[must_use] + pub const fn new(socket_path: PathBuf, gpu_mode: GpuMode) -> Self { + Self { + socket_path, + gpu_mode, + } + } + + /// Return the path of the unix domain socket which is listening to + /// requests from the guest. + #[must_use] + pub fn socket_path(&self) -> &Path { + &self.socket_path + } + + #[must_use] + pub const fn gpu_mode(&self) -> GpuMode { + self.gpu_mode + } +} + +#[cfg(test)] +mod tests { + use tempfile::tempdir; + + use super::*; + + #[test] + fn test_gpu_config() { + // Test the creation of `GpuConfig` struct + let test_dir = tempdir().expect("Could not create a temp test directory."); + let socket_path = test_dir.path().join("socket"); + let gpu_config = GpuConfig::new(socket_path.clone(), GpuMode::VirglRenderer); + assert_eq!(gpu_config.socket_path(), socket_path); + } +} diff --git a/staging/vhost-device-gpu/src/main.rs b/staging/vhost-device-gpu/src/main.rs new file mode 100644 index 000000000..868d95f98 --- /dev/null +++ b/staging/vhost-device-gpu/src/main.rs @@ -0,0 +1,121 @@ +// VIRTIO GPU Emulation via vhost-user +// +// Copyright 2024 Red Hat Inc +// +// SPDX-License-Identifier: Apache-2.0 or BSD-3-Clause + +use std::{path::PathBuf, process::exit}; + +use clap::Parser; +use log::{error, info}; +use thiserror::Error as ThisError; +use vhost_device_gpu::{ + device::{self, VhostUserGpuBackend}, + GpuConfig, GpuMode, +}; +use vhost_user_backend::VhostUserDaemon; +use vm_memory::{GuestMemoryAtomic, GuestMemoryMmap}; + +type Result = std::result::Result; + +#[derive(Debug, ThisError)] +pub(crate) enum Error { + #[error("Could not create backend: {0}")] + CouldNotCreateBackend(device::Error), + #[error("Could not create daemon: {0}")] + CouldNotCreateDaemon(vhost_user_backend::Error), + #[error("Fatal error: {0}")] + ServeFailed(vhost_user_backend::Error), +} + +#[derive(Parser, Debug)] +#[clap(author, version, about, long_about = None)] +struct GpuArgs { + /// vhost-user Unix domain socket. + #[clap(short, long, value_name = "SOCKET")] + socket_path: PathBuf, + #[clap(short, long, value_enum)] + gpu_mode: GpuMode, +} + +impl From for GpuConfig { + fn from(args: GpuArgs) -> Self { + let socket_path = args.socket_path; + let gpu_mode: GpuMode = args.gpu_mode; + + GpuConfig::new(socket_path, gpu_mode) + } +} + +fn start_backend(config: &GpuConfig) -> Result<()> { + info!("Starting backend"); + let socket = config.socket_path(); + let backend = VhostUserGpuBackend::new(config).map_err(Error::CouldNotCreateBackend)?; + + let mut daemon = VhostUserDaemon::new( + "vhost-device-gpu-backend".to_string(), + backend.clone(), + GuestMemoryAtomic::new(GuestMemoryMmap::new()), + ) + .map_err(Error::CouldNotCreateDaemon)?; + + backend.set_epoll_handler(&daemon.get_epoll_handlers()); + + daemon.serve(socket).map_err(Error::ServeFailed)?; + Ok(()) +} + +// Rust vmm container (https://github.com/rust-vmm/rust-vmm-container) doesn't +// have tools to do a musl build at the moment, and adding that support is +// tricky as well to the container. Skip musl builds until the time pre-built +// rutabaga library is available for musl. +#[cfg(target_env = "musl")] +compile_error!("musl is not supported yet"); +fn main() { + env_logger::init(); + + if let Err(e) = start_backend(&GpuConfig::from(GpuArgs::parse())) { + error!("{e}"); + exit(1); + } +} + +#[cfg(test)] +mod tests { + use std::path::Path; + + use assert_matches::assert_matches; + use tempfile::tempdir; + + use super::*; + + impl GpuArgs { + pub(crate) fn from_args(path: &Path) -> Self { + Self { + socket_path: path.to_path_buf(), + gpu_mode: GpuMode::Gfxstream, + } + } + } + + #[test] + fn test_parse_successful() { + let test_dir = tempdir().expect("Could not create a temp test directory."); + let socket_path = test_dir.path().join("vgpu.sock"); + + let cmd_args = GpuArgs::from_args(socket_path.as_path()); + let config = GpuConfig::from(cmd_args); + + assert_eq!(config.socket_path(), socket_path); + } + + #[test] + fn test_fail_listener() { + // This will fail the listeners and thread will panic. + let socket_name = Path::new("/proc/-1/nonexistent"); + let cmd_args = GpuArgs::from_args(socket_name); + let config = GpuConfig::from(cmd_args); + + assert_matches!(start_backend(&config).unwrap_err(), Error::ServeFailed(_)); + } +} diff --git a/staging/vhost-device-gpu/src/protocol.rs b/staging/vhost-device-gpu/src/protocol.rs new file mode 100644 index 000000000..2408b0c66 --- /dev/null +++ b/staging/vhost-device-gpu/src/protocol.rs @@ -0,0 +1,1453 @@ +// Copyright 2024 Red Hat Inc +// Copyright 2019 The ChromiumOS Authors +// +// SPDX-License-Identifier: Apache-2.0 or BSD-3-Clause + +#![allow(non_camel_case_types)] + +use std::{ + cmp::min, + convert::From, + ffi::CStr, + fmt::{self, Display}, + io::{self, Read, Write}, + marker::PhantomData, + mem::{size_of, size_of_val}, +}; + +use log::trace; +use rutabaga_gfx::RutabagaError; +use thiserror::Error; +pub use virtio_bindings::virtio_gpu::{ + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE as VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_CTX_CREATE as VIRTIO_GPU_CMD_CTX_CREATE, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_CTX_DESTROY as VIRTIO_GPU_CMD_CTX_DESTROY, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE as VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_GET_CAPSET as VIRTIO_GPU_CMD_GET_CAPSET, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_GET_CAPSET_INFO as VIRTIO_GPU_CMD_GET_CAPSET_INFO, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_GET_DISPLAY_INFO as VIRTIO_GPU_CMD_GET_DISPLAY_INFO, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_GET_EDID as VIRTIO_GPU_CMD_GET_EDID, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_MOVE_CURSOR as VIRTIO_GPU_CMD_MOVE_CURSOR, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID as VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING as VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_RESOURCE_CREATE_2D as VIRTIO_GPU_CMD_RESOURCE_CREATE_2D, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_RESOURCE_CREATE_3D as VIRTIO_GPU_CMD_RESOURCE_CREATE_3D, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB as VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING as VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_RESOURCE_FLUSH as VIRTIO_GPU_CMD_RESOURCE_FLUSH, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB as VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB as VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_RESOURCE_UNREF as VIRTIO_GPU_CMD_RESOURCE_UNREF, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_SET_SCANOUT as VIRTIO_GPU_CMD_SET_SCANOUT, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_SET_SCANOUT_BLOB as VIRTIO_GPU_CMD_SET_SCANOUT_BLOB, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_SUBMIT_3D as VIRTIO_GPU_CMD_SUBMIT_3D, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D as VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D as VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D as VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_UPDATE_CURSOR as VIRTIO_GPU_CMD_UPDATE_CURSOR, + virtio_gpu_ctrl_type_VIRTIO_GPU_RESP_ERR_INVALID_CONTEXT_ID as VIRTIO_GPU_RESP_ERR_INVALID_CONTEXT_ID, + virtio_gpu_ctrl_type_VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER as VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER, + virtio_gpu_ctrl_type_VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID as VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID, + virtio_gpu_ctrl_type_VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID as VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID, + virtio_gpu_ctrl_type_VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY as VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY, + virtio_gpu_ctrl_type_VIRTIO_GPU_RESP_ERR_UNSPEC as VIRTIO_GPU_RESP_ERR_UNSPEC, + virtio_gpu_ctrl_type_VIRTIO_GPU_RESP_OK_CAPSET as VIRTIO_GPU_RESP_OK_CAPSET, + virtio_gpu_ctrl_type_VIRTIO_GPU_RESP_OK_CAPSET_INFO as VIRTIO_GPU_RESP_OK_CAPSET_INFO, + virtio_gpu_ctrl_type_VIRTIO_GPU_RESP_OK_DISPLAY_INFO as VIRTIO_GPU_RESP_OK_DISPLAY_INFO, + virtio_gpu_ctrl_type_VIRTIO_GPU_RESP_OK_EDID as VIRTIO_GPU_RESP_OK_EDID, + virtio_gpu_ctrl_type_VIRTIO_GPU_RESP_OK_MAP_INFO as VIRTIO_GPU_RESP_OK_MAP_INFO, + virtio_gpu_ctrl_type_VIRTIO_GPU_RESP_OK_NODATA as VIRTIO_GPU_RESP_OK_NODATA, + virtio_gpu_ctrl_type_VIRTIO_GPU_RESP_OK_RESOURCE_UUID as VIRTIO_GPU_RESP_OK_RESOURCE_UUID, +}; +use virtio_queue::{Reader, Writer}; +use vm_memory::{ByteValued, GuestAddress, Le32}; +use zerocopy::{AsBytes, FromBytes}; + +use crate::device::{self, Error}; + +pub const QUEUE_SIZE: usize = 1024; +pub const NUM_QUEUES: usize = 2; + +pub const CONTROL_QUEUE: u16 = 0; +pub const CURSOR_QUEUE: u16 = 1; +pub const POLL_EVENT: u16 = 3; + +pub const VIRTIO_GPU_MAX_SCANOUTS: u32 = 16; + +/// `CHROMIUM(b/277982577)` success responses +pub const VIRTIO_GPU_RESP_OK_RESOURCE_PLANE_INFO: u32 = 0x11FF; + +/// Create a OS-specific handle from guest memory (not upstreamed). +pub const VIRTIO_GPU_BLOB_FLAG_CREATE_GUEST_HANDLE: u32 = 0x0008; + +pub const VIRTIO_GPU_FLAG_FENCE: u32 = 1 << 0; +pub const VIRTIO_GPU_FLAG_INFO_RING_IDX: u32 = 1 << 1; + +/// Virtio Gpu Configuration +#[derive(Copy, Clone, Debug, Default, PartialEq, Eq)] +#[repr(C)] +pub struct VirtioGpuConfig { + /// Signals pending events to the driver + pub events_read: Le32, + /// Clears pending events in the device + pub events_clear: Le32, + /// Maximum number of scanouts supported by the device + pub num_scanouts: Le32, + /// Maximum number of capability sets supported by the device + pub num_capsets: Le32, +} + +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for VirtioGpuConfig {} + +#[derive(Debug, PartialEq, Eq)] +pub struct InvalidCommandType(u32); + +impl std::fmt::Display for InvalidCommandType { + fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(fmt, "Invalid command type {}", self.0) + } +} + +impl From for crate::device::Error { + fn from(val: InvalidCommandType) -> Self { + Self::InvalidCommandType(val.0) + } +} + +impl std::error::Error for InvalidCommandType {} + +#[derive(Copy, Clone, Debug, Default, AsBytes, FromBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_ctrl_hdr { + pub type_: u32, + pub flags: u32, + pub fence_id: u64, + pub ctx_id: u32, + pub ring_idx: u8, + pub padding: [u8; 3], +} + +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_ctrl_hdr {} + +/// Data passed in the cursor `vq` + +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_cursor_pos { + pub scanout_id: u32, + pub x: u32, + pub y: u32, + pub padding: u32, +} + +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_cursor_pos {} + +// VIRTIO_GPU_CMD_UPDATE_CURSOR, VIRTIO_GPU_CMD_MOVE_CURSOR +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_update_cursor { + pub pos: virtio_gpu_cursor_pos, // update & move + pub resource_id: u32, // update only + pub hot_x: u32, // update only + pub hot_y: u32, // update only + pub padding: u32, +} + +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_update_cursor {} + +/// Data passed in the control `vq`, 2d related + +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_rect { + pub x: u32, + pub y: u32, + pub width: u32, + pub height: u32, +} + +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_rect {} + +// VIRTIO_GPU_CMD_GET_EDID +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_get_edid { + pub scanout: u32, + pub padding: u32, +} + +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_get_edid {} + +// VIRTIO_GPU_CMD_RESOURCE_UNREF +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_resource_unref { + pub resource_id: u32, + pub padding: u32, +} + +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_resource_unref {} + +// VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: create a 2d resource with a format +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_resource_create_2d { + pub resource_id: u32, + pub format: u32, + pub width: u32, + pub height: u32, +} + +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_resource_create_2d {} + +// VIRTIO_GPU_CMD_SET_SCANOUT +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_set_scanout { + pub r: virtio_gpu_rect, + pub scanout_id: u32, + pub resource_id: u32, +} + +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_set_scanout {} + +// VIRTIO_GPU_CMD_RESOURCE_FLUSH +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_resource_flush { + pub r: virtio_gpu_rect, + pub resource_id: u32, + pub padding: u32, +} + +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_resource_flush {} + +// VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: simple transfer to_host +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_transfer_to_host_2d { + pub r: virtio_gpu_rect, + pub offset: u64, + pub resource_id: u32, + pub padding: u32, +} + +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_transfer_to_host_2d {} + +#[derive(Copy, Clone, Debug, Default, AsBytes, FromBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_mem_entry { + pub addr: u64, + pub length: u32, + pub padding: u32, +} + +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_mem_entry {} + +// VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_resource_attach_backing { + pub resource_id: u32, + pub nr_entries: u32, +} + +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_resource_attach_backing {} + +// VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_resource_detach_backing { + pub resource_id: u32, + pub padding: u32, +} + +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_resource_detach_backing {} + +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_display_one { + pub r: virtio_gpu_rect, + pub enabled: u32, + pub flags: u32, +} + +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_display_one {} + +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes)] +#[repr(C)] +pub struct virtio_gpu_resp_display_info { + pub hdr: virtio_gpu_ctrl_hdr, + pub pmodes: [virtio_gpu_display_one; VIRTIO_GPU_MAX_SCANOUTS as usize], +} + +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_resp_display_info {} + +const EDID_BLOB_MAX_SIZE: usize = 1024; + +#[derive(Debug, Copy, Clone)] +#[repr(C)] +pub struct virtio_gpu_resp_edid { + pub hdr: virtio_gpu_ctrl_hdr, + pub size: u32, + pub padding: u32, + pub edid: [u8; EDID_BLOB_MAX_SIZE], +} + +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_resp_edid {} + +// data passed in the control vq, 3d related + +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_box { + pub x: u32, + pub y: u32, + pub z: u32, + pub w: u32, + pub h: u32, + pub d: u32, +} + +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_box {} + +// VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D, VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_transfer_host_3d { + pub box_: virtio_gpu_box, + pub offset: u64, + pub resource_id: u32, + pub level: u32, + pub stride: u32, + pub layer_stride: u32, +} + +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_transfer_host_3d {} + +// VIRTIO_GPU_CMD_RESOURCE_CREATE_3D +pub const VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP: u32 = 1 << 0; +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_resource_create_3d { + pub resource_id: u32, + pub target: u32, + pub format: u32, + pub bind: u32, + pub width: u32, + pub height: u32, + pub depth: u32, + pub array_size: u32, + pub last_level: u32, + pub nr_samples: u32, + pub flags: u32, + pub padding: u32, +} + +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_resource_create_3d {} + +// VIRTIO_GPU_CMD_CTX_CREATE +pub const VIRTIO_GPU_CONTEXT_INIT_CAPSET_ID_MASK: u32 = 1 << 0; +#[derive(Copy, Clone, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_ctx_create { + pub nlen: u32, + pub context_init: u32, + pub debug_name: [u8; 64], +} + +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_ctx_create {} + +impl Default for virtio_gpu_ctx_create { + fn default() -> Self { + Self { + nlen: 0, + context_init: 0, + debug_name: [0; 64], + } + } +} + +impl virtio_gpu_ctx_create { + #[must_use] + pub fn get_debug_name(&self) -> String { + CStr::from_bytes_with_nul(&self.debug_name[..min(64, self.nlen as usize)]).map_or_else( + |err| format!("Err({err})"), + |c_str| c_str.to_string_lossy().into_owned(), + ) + } +} +impl fmt::Debug for virtio_gpu_ctx_create { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("virtio_gpu_ctx_create") + .field("debug_name", &self.get_debug_name()) + .field("context_init", &self.context_init) + .finish_non_exhaustive() + } +} + +// VIRTIO_GPU_CMD_CTX_DESTROY +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_ctx_destroy {} + +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_ctx_destroy {} + +// VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE, VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_ctx_resource { + pub resource_id: u32, + pub padding: u32, +} + +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_ctx_resource {} + +// VIRTIO_GPU_CMD_SUBMIT_3D +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_cmd_submit { + pub size: u32, + + // The in-fence IDs are prepended to the cmd_buf and memory layout + // of the VIRTIO_GPU_CMD_SUBMIT_3D buffer looks like this: + // _________________ + // | CMD_SUBMIT_3D | + // ----------------- + // | header | + // | in-fence IDs | + // | cmd_buf | + // ----------------- + // + // This makes in-fence IDs naturally aligned to the sizeof(u64) inside + // of the virtio buffer. + pub num_in_fences: u32, +} + +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_cmd_submit {} + +pub const VIRTIO_GPU_CAPSET_VIRGL: u32 = 1; +pub const VIRTIO_GPU_CAPSET_VIRGL2: u32 = 2; +pub const VIRTIO_GPU_CAPSET_GFXSTREAM: u32 = 3; +pub const VIRTIO_GPU_CAPSET_VENUS: u32 = 4; +pub const VIRTIO_GPU_CAPSET_CROSS_DOMAIN: u32 = 5; + +// VIRTIO_GPU_CMD_GET_CAPSET_INFO +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_get_capset_info { + pub capset_index: u32, + pub padding: u32, +} + +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_get_capset_info {} + +// VIRTIO_GPU_RESP_OK_CAPSET_INFO +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes)] +#[repr(C)] +pub struct virtio_gpu_resp_capset_info { + pub hdr: virtio_gpu_ctrl_hdr, + pub capset_id: u32, + pub capset_max_version: u32, + pub capset_max_size: u32, + pub padding: u32, +} + +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_resp_capset_info {} + +// VIRTIO_GPU_CMD_GET_CAPSET +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_get_capset { + pub capset_id: u32, + pub capset_version: u32, +} + +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_get_capset {} + +// VIRTIO_GPU_RESP_OK_CAPSET +#[derive(Copy, Clone, Debug, Default)] +#[repr(C)] +pub struct virtio_gpu_resp_capset { + pub hdr: virtio_gpu_ctrl_hdr, + pub capset_data: PhantomData<[u8]>, +} + +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_resp_capset {} + +// VIRTIO_GPU_RESP_OK_RESOURCE_PLANE_INFO +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes)] +#[repr(C)] +pub struct virtio_gpu_resp_resource_plane_info { + pub hdr: virtio_gpu_ctrl_hdr, + pub count: u32, + pub padding: u32, + pub format_modifier: u64, + pub strides: [u32; 4], + pub offsets: [u32; 4], +} + +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_resp_resource_plane_info {} + +pub const PLANE_INFO_MAX_COUNT: usize = 4; + +pub const VIRTIO_GPU_EVENT_DISPLAY: u32 = 1 << 0; + +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_resource_create_blob { + pub resource_id: u32, + pub blob_mem: u32, + pub blob_flags: u32, + pub nr_entries: u32, + pub blob_id: u64, + pub size: u64, +} + +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_resource_create_blob {} + +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_resource_map_blob { + pub resource_id: u32, + pub padding: u32, + pub offset: u64, +} + +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_resource_map_blob {} + +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_resource_unmap_blob { + pub resource_id: u32, + pub padding: u32, +} + +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_resource_unmap_blob {} + +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_resp_map_info { + pub hdr: virtio_gpu_ctrl_hdr, + pub map_info: u32, + pub padding: u32, +} + +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_resp_map_info {} + +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_resource_assign_uuid { + pub resource_id: u32, + pub padding: u32, +} + +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_resource_assign_uuid {} + +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes)] +#[repr(C)] +pub struct virtio_gpu_resp_resource_uuid { + pub hdr: virtio_gpu_ctrl_hdr, + pub uuid: [u8; 16], +} + +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_resp_resource_uuid {} + +// VIRTIO_GPU_CMD_SET_SCANOUT_BLOB +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_set_scanout_blob { + pub r: virtio_gpu_rect, + pub scanout_id: u32, + pub resource_id: u32, + pub width: u32, + pub height: u32, + pub format: u32, + pub padding: u32, + pub strides: [u32; 4], + pub offsets: [u32; 4], +} + +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_set_scanout_blob {} + +// simple formats for fbcon/X use +pub const VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM: u32 = 1; +pub const VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM: u32 = 2; +pub const VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM: u32 = 3; +pub const VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM: u32 = 4; +pub const VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM: u32 = 67; +pub const VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM: u32 = 68; +pub const VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM: u32 = 121; +pub const VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM: u32 = 134; + +/// A virtio gpu command and associated metadata specific to each command. +#[derive(Clone, PartialEq, Eq)] +pub enum GpuCommand { + GetDisplayInfo, + GetEdid(virtio_gpu_get_edid), + ResourceCreate2d(virtio_gpu_resource_create_2d), + ResourceUnref(virtio_gpu_resource_unref), + SetScanout(virtio_gpu_set_scanout), + SetScanoutBlob(virtio_gpu_set_scanout_blob), + ResourceFlush(virtio_gpu_resource_flush), + TransferToHost2d(virtio_gpu_transfer_to_host_2d), + ResourceAttachBacking( + virtio_gpu_resource_attach_backing, + Vec<(GuestAddress, usize)>, + ), + ResourceDetachBacking(virtio_gpu_resource_detach_backing), + GetCapsetInfo(virtio_gpu_get_capset_info), + GetCapset(virtio_gpu_get_capset), + CtxCreate(virtio_gpu_ctx_create), + CtxDestroy(virtio_gpu_ctx_destroy), + CtxAttachResource(virtio_gpu_ctx_resource), + CtxDetachResource(virtio_gpu_ctx_resource), + ResourceCreate3d(virtio_gpu_resource_create_3d), + TransferToHost3d(virtio_gpu_transfer_host_3d), + TransferFromHost3d(virtio_gpu_transfer_host_3d), + CmdSubmit3d { + cmd_data: Vec, + fence_ids: Vec, + }, + ResourceCreateBlob(virtio_gpu_resource_create_blob), + ResourceMapBlob(virtio_gpu_resource_map_blob), + ResourceUnmapBlob(virtio_gpu_resource_unmap_blob), + UpdateCursor(virtio_gpu_update_cursor), + MoveCursor(virtio_gpu_update_cursor), + ResourceAssignUuid(virtio_gpu_resource_assign_uuid), +} + +/// An error indicating something went wrong decoding a `GpuCommand`. These +/// correspond to `VIRTIO_GPU_CMD_*`. +#[derive(Error, Debug)] +pub enum GpuCommandDecodeError { + /// The type of the command was invalid. + #[error("invalid command type ({0})")] + InvalidType(u32), + /// An I/O error occurred. + #[error("an I/O error occurred: {0}")] + IO(io::Error), + #[error("Descriptor read failed")] + DescriptorReadFailed, +} + +impl From for GpuCommandDecodeError { + fn from(e: io::Error) -> Self { + Self::IO(e) + } +} + +impl From for GpuCommandDecodeError { + fn from(_: device::Error) -> Self { + Self::DescriptorReadFailed + } +} + +impl From for GpuResponseEncodeError { + fn from(_: device::Error) -> Self { + Self::DescriptorWriteFailed + } +} + +impl fmt::Debug for GpuCommand { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct(self.command_name()).finish() + } +} + +impl GpuCommand { + #[must_use] + pub const fn command_name(&self) -> &'static str { + use GpuCommand::*; + match self { + GetDisplayInfo => "GetDisplayInfo", + GetEdid(_info) => "GetEdid", + ResourceCreate2d(_info) => "ResourceCreate2d", + ResourceUnref(_info) => "ResourceUnref", + SetScanout(_info) => "SetScanout", + SetScanoutBlob(_info) => "SetScanoutBlob", + ResourceFlush(_info) => "ResourceFlush", + TransferToHost2d(_info) => "TransferToHost2d", + ResourceAttachBacking(_info, _vecs) => "ResourceAttachBacking", + ResourceDetachBacking(_info) => "ResourceDetachBacking", + GetCapsetInfo(_info) => "GetCapsetInfo", + GetCapset(_info) => "GetCapset", + CtxCreate(_info) => "CtxCreate", + CtxDestroy(_info) => "CtxDestroy", + CtxAttachResource(_info) => "CtxAttachResource", + CtxDetachResource(_info) => "CtxDetachResource", + ResourceCreate3d(_info) => "ResourceCreate3d", + TransferToHost3d(_info) => "TransferToHost3d", + TransferFromHost3d(_info) => "TransferFromHost3d", + CmdSubmit3d { .. } => "CmdSubmit3d", + ResourceCreateBlob(_info) => "ResourceCreateBlob", + ResourceMapBlob(_info) => "ResourceMapBlob", + ResourceUnmapBlob(_info) => "ResourceUnmapBlob", + UpdateCursor(_info) => "UpdateCursor", + MoveCursor(_info) => "MoveCursor", + ResourceAssignUuid(_info) => "ResourceAssignUuid", + } + } + + /// Decodes a command from the given chunk of memory. + pub fn decode( + reader: &mut Reader, + ) -> Result<(virtio_gpu_ctrl_hdr, Self), GpuCommandDecodeError> { + use self::GpuCommand::*; + let hdr = reader + .read_obj::() + .map_err(|_| Error::DescriptorReadFailed)?; + trace!("Decoding GpuCommand 0x{:0x}", hdr.type_); + let cmd = match hdr.type_ { + VIRTIO_GPU_CMD_GET_DISPLAY_INFO => GetDisplayInfo, + VIRTIO_GPU_CMD_GET_EDID => { + GetEdid(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_RESOURCE_CREATE_2D => { + ResourceCreate2d(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_RESOURCE_UNREF => { + ResourceUnref(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_SET_SCANOUT => { + SetScanout(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_SET_SCANOUT_BLOB => { + SetScanoutBlob(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_RESOURCE_FLUSH => { + ResourceFlush(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D => { + TransferToHost2d(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING => { + let info: virtio_gpu_resource_attach_backing = + reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?; + let mut entries = Vec::with_capacity(info.nr_entries as usize); + for _ in 0..info.nr_entries { + let entry: virtio_gpu_mem_entry = + reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?; + entries.push((GuestAddress(entry.addr), entry.length as usize)); + } + ResourceAttachBacking(info, entries) + } + VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING => { + ResourceDetachBacking(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_GET_CAPSET_INFO => { + GetCapsetInfo(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_GET_CAPSET => { + GetCapset(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_CTX_CREATE => { + CtxCreate(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_CTX_DESTROY => { + CtxDestroy(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE => { + CtxAttachResource(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE => { + CtxDetachResource(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_RESOURCE_CREATE_3D => { + ResourceCreate3d(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D => { + TransferToHost3d(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D => { + TransferFromHost3d(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_SUBMIT_3D => { + let info: virtio_gpu_cmd_submit = + reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?; + + let mut cmd_data = vec![0; info.size as usize]; + let mut fence_ids: Vec = Vec::with_capacity(info.num_in_fences as usize); + + for _ in 0..info.num_in_fences { + let fence_id = reader + .read_obj::() + .map_err(|_| Error::DescriptorReadFailed)?; + fence_ids.push(fence_id); + } + + reader + .read_exact(&mut cmd_data[..]) + .map_err(|_| Error::DescriptorReadFailed)?; + + CmdSubmit3d { + cmd_data, + fence_ids, + } + } + VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB => { + ResourceCreateBlob(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB => { + ResourceMapBlob(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB => { + ResourceUnmapBlob(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_UPDATE_CURSOR => { + UpdateCursor(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_MOVE_CURSOR => { + MoveCursor(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID => { + ResourceAssignUuid(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + _ => return Err(GpuCommandDecodeError::InvalidType(hdr.type_)), + }; + + Ok((hdr, cmd)) + } +} + +#[derive(Debug, PartialEq, Eq)] +pub struct GpuResponsePlaneInfo { + pub stride: u32, + pub offset: u32, +} + +/// A response to a `GpuCommand`. These correspond to `VIRTIO_GPU_RESP_*`. +#[derive(Debug)] +pub enum GpuResponse { + OkNoData, + OkDisplayInfo(Vec<(u32, u32, bool)>), + OkEdid { + /// The EDID display data blob (as specified by VESA) + blob: Box<[u8]>, + }, + OkCapsetInfo { + capset_id: u32, + version: u32, + size: u32, + }, + OkCapset(Vec), + OkResourcePlaneInfo { + format_modifier: u64, + plane_info: Vec, + }, + OkResourceUuid { + uuid: [u8; 16], + }, + OkMapInfo { + map_info: u32, + }, + ErrUnspec, + ErrRutabaga(RutabagaError), + ErrScanout { + num_scanouts: u32, + }, + ErrOutOfMemory, + ErrInvalidScanoutId, + ErrInvalidResourceId, + ErrInvalidContextId, + ErrInvalidParameter, +} + +impl From for GpuResponse { + fn from(e: RutabagaError) -> Self { + Self::ErrRutabaga(e) + } +} + +impl Display for GpuResponse { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + use self::GpuResponse::{ErrRutabaga, ErrScanout}; + match self { + ErrRutabaga(e) => write!(f, "renderer error: {e}"), + ErrScanout { num_scanouts } => write!(f, "non-zero scanout: {num_scanouts}"), + _ => Ok(()), + } + } +} + +/// An error indicating something went wrong decoding a `GpuCommand`. +#[derive(Error, Debug)] +pub enum GpuResponseEncodeError { + /// An I/O error occurred. + #[error("an I/O error occurred: {0}")] + IO(io::Error), + /// Size conversion failed + #[error("Size conversion failed")] + SizeOverflow, + /// More displays than are valid were in a `OkDisplayInfo`. + #[error("{0} is more displays than are valid")] + TooManyDisplays(usize), + /// More planes than are valid were in a `OkResourcePlaneInfo`. + #[error("{0} is more planes than are valid")] + TooManyPlanes(usize), + #[error("Descriptor write failed")] + DescriptorWriteFailed, +} + +impl From for GpuResponseEncodeError { + fn from(e: io::Error) -> Self { + Self::IO(e) + } +} + +pub type VirtioGpuResult = std::result::Result; + +impl GpuResponse { + /// Encodes a this `GpuResponse` into `resp` and the given set of metadata. + pub fn encode( + &self, + flags: u32, + fence_id: u64, + ctx_id: u32, + ring_idx: u8, + writer: &mut Writer, + ) -> Result { + let hdr = virtio_gpu_ctrl_hdr { + type_: self.get_type(), + flags, + fence_id, + ctx_id, + ring_idx, + padding: Default::default(), + }; + let len = match *self { + Self::OkDisplayInfo(ref info) => { + if info.len() > VIRTIO_GPU_MAX_SCANOUTS as usize { + return Err(GpuResponseEncodeError::TooManyDisplays(info.len())); + } + let mut disp_info = virtio_gpu_resp_display_info { + hdr, + pmodes: Default::default(), + }; + for (disp_mode, &(width, height, enabled)) in disp_info.pmodes.iter_mut().zip(info) + { + disp_mode.r.width = width; + disp_mode.r.height = height; + disp_mode.enabled = u32::from(enabled); + } + writer + .write_obj(disp_info) + .map_err(|_| Error::DescriptorWriteFailed)?; + size_of_val(&disp_info) + } + Self::OkEdid { ref blob } => { + let Ok(size) = u32::try_from(blob.len()) else { + return Err(GpuResponseEncodeError::SizeOverflow); + }; + let mut edid_info = virtio_gpu_resp_edid { + hdr, + size, + edid: [0; EDID_BLOB_MAX_SIZE], + padding: Default::default(), + }; + edid_info.edid.copy_from_slice(blob); + writer + .write_obj(edid_info) + .map_err(|_| Error::DescriptorWriteFailed)?; + size_of_val(&edid_info) + } + Self::OkCapsetInfo { + capset_id, + version, + size, + } => { + writer + .write_obj(virtio_gpu_resp_capset_info { + hdr, + capset_id, + capset_max_version: version, + capset_max_size: size, + padding: 0u32, + }) + .map_err(|_| Error::DescriptorWriteFailed)?; + size_of::() + } + Self::OkCapset(ref data) => { + writer + .write_obj(hdr) + .map_err(|_| Error::DescriptorWriteFailed)?; + writer + .write(data) + .map_err(|_| Error::DescriptorWriteFailed)?; + size_of_val(&hdr) + data.len() + } + Self::OkResourcePlaneInfo { + format_modifier, + ref plane_info, + } => { + if plane_info.len() > PLANE_INFO_MAX_COUNT { + return Err(GpuResponseEncodeError::TooManyPlanes(plane_info.len())); + } + let mut strides = [u32::default(); PLANE_INFO_MAX_COUNT]; + let mut offsets = [u32::default(); PLANE_INFO_MAX_COUNT]; + for (plane_index, plane) in plane_info.iter().enumerate() { + strides[plane_index] = plane.stride; + offsets[plane_index] = plane.offset; + } + let Ok(count) = u32::try_from(plane_info.len()) else { + return Err(GpuResponseEncodeError::SizeOverflow); + }; + let plane_info = virtio_gpu_resp_resource_plane_info { + hdr, + count, + padding: 0u32, + format_modifier, + strides, + offsets, + }; + if writer.available_bytes() >= size_of_val(&plane_info) { + size_of_val(&plane_info) + } else { + // In case there is too little room in the response slice to store the + // entire virtio_gpu_resp_resource_plane_info, convert response to a regular + // VIRTIO_GPU_RESP_OK_NODATA and attempt to return that. + writer + .write_obj(virtio_gpu_ctrl_hdr { + type_: VIRTIO_GPU_RESP_OK_NODATA, + ..hdr + }) + .map_err(|_| Error::DescriptorWriteFailed)?; + size_of_val(&hdr) + } + } + Self::OkResourceUuid { uuid } => { + let resp_info = virtio_gpu_resp_resource_uuid { hdr, uuid }; + + writer + .write_obj(resp_info) + .map_err(|_| Error::DescriptorWriteFailed)?; + size_of_val(&resp_info) + } + Self::OkMapInfo { map_info } => { + let resp_info = virtio_gpu_resp_map_info { + hdr, + map_info, + padding: Default::default(), + }; + + writer + .write_obj(resp_info) + .map_err(|_| Error::DescriptorWriteFailed)?; + size_of_val(&resp_info) + } + _ => { + writer + .write_obj(hdr) + .map_err(|_| Error::DescriptorWriteFailed)?; + size_of_val(&hdr) + } + }; + let len = u32::try_from(len).map_err(|_| GpuResponseEncodeError::SizeOverflow)?; + + Ok(len) + } + + /// Gets the `VIRTIO_GPU_*` enum value that corresponds to this variant. + #[must_use] + pub const fn get_type(&self) -> u32 { + match self { + Self::OkNoData => VIRTIO_GPU_RESP_OK_NODATA, + Self::OkDisplayInfo(_) => VIRTIO_GPU_RESP_OK_DISPLAY_INFO, + Self::OkEdid { .. } => VIRTIO_GPU_RESP_OK_EDID, + Self::OkCapsetInfo { .. } => VIRTIO_GPU_RESP_OK_CAPSET_INFO, + Self::OkCapset(_) => VIRTIO_GPU_RESP_OK_CAPSET, + Self::OkResourcePlaneInfo { .. } => VIRTIO_GPU_RESP_OK_RESOURCE_PLANE_INFO, + Self::OkResourceUuid { .. } => VIRTIO_GPU_RESP_OK_RESOURCE_UUID, + Self::OkMapInfo { .. } => VIRTIO_GPU_RESP_OK_MAP_INFO, + Self::ErrUnspec | Self::ErrRutabaga(_) | Self::ErrScanout { .. } => { + VIRTIO_GPU_RESP_ERR_UNSPEC + } + Self::ErrOutOfMemory => VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY, + Self::ErrInvalidScanoutId => VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID, + Self::ErrInvalidResourceId => VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID, + Self::ErrInvalidContextId => VIRTIO_GPU_RESP_ERR_INVALID_CONTEXT_ID, + Self::ErrInvalidParameter => VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER, + } + } +} + +#[cfg(test)] +mod tests { + use virtio_bindings::virtio_ring::VRING_DESC_F_WRITE; + use virtio_queue::{mock::MockSplitQueue, Descriptor}; + use vm_memory::GuestMemoryMmap; + + use super::*; + + #[test] + fn test_virtio_gpu_config() { + // Test VirtioGpuConfig size + assert_eq!(std::mem::size_of::(), 16); + } + + #[test] + fn test_invalid_command_type_display() { + let error = InvalidCommandType(42); + assert_eq!(format!("{}", error), "Invalid command type 42"); + } + + #[test] + fn test_gpu_response_display() { + let err_rutabaga = GpuResponse::ErrRutabaga(RutabagaError::InvalidContextId); + assert_eq!( + format!("{}", err_rutabaga), + "renderer error: invalid context id" + ); + + let err_scanout = GpuResponse::ErrScanout { num_scanouts: 3 }; + assert_eq!(format!("{}", err_scanout), "non-zero scanout: 3"); + } + + #[test] + fn test_invalid_type_error() { + let error = GpuCommandDecodeError::InvalidType(42); + assert_eq!(format!("{}", error), "invalid command type (42)"); + } + + // Test io_error conversion to gpu command decode error + #[test] + fn test_io_error() { + let io_error = io::Error::new(io::ErrorKind::Other, "Test IO error"); + let gpu_error: GpuCommandDecodeError = io_error.into(); + match gpu_error { + GpuCommandDecodeError::IO(_) => (), + _ => panic!("Expected IO error"), + } + } + + //Test vhu_error conversion to gpu command decode/encode error + #[test] + fn test_device_error() { + let device_error = device::Error::DescriptorReadFailed; + let gpu_error: GpuCommandDecodeError = device_error.into(); + match gpu_error { + GpuCommandDecodeError::DescriptorReadFailed => (), + _ => panic!("Expected DescriptorReadFailed error"), + } + let device_error = device::Error::DescriptorWriteFailed; + let gpu_error: GpuResponseEncodeError = device_error.into(); + match gpu_error { + GpuResponseEncodeError::DescriptorWriteFailed => (), + _ => panic!("Expected DescriptorWriteFailed error"), + } + } + + #[test] + fn test_get_display_info_debug() { + let get_display_info = GpuCommand::GetDisplayInfo; + assert_eq!(format!("{:?}", get_display_info), "GetDisplayInfo"); + } + + #[test] + fn test_get_edid_debug() { + let get_edid = GpuCommand::GetEdid(virtio_gpu_get_edid::default()); + assert_eq!(format!("{:?}", get_edid), "GetEdid"); + } + + #[test] + fn test_resource_create_2d_debug() { + let resource_create_2d = + GpuCommand::ResourceCreate2d(virtio_gpu_resource_create_2d::default()); + assert_eq!(format!("{:?}", resource_create_2d), "ResourceCreate2d"); + } + + #[test] + fn test_resource_unref_debug() { + let resource_unref = GpuCommand::ResourceUnref(virtio_gpu_resource_unref::default()); + assert_eq!(format!("{:?}", resource_unref), "ResourceUnref"); + } + + #[test] + fn test_set_scanout_debug() { + let set_scanout = GpuCommand::SetScanout(virtio_gpu_set_scanout::default()); + assert_eq!(format!("{:?}", set_scanout), "SetScanout"); + } + + #[test] + fn test_set_scanout_blob_debug() { + let set_scanout_blob = GpuCommand::SetScanoutBlob(virtio_gpu_set_scanout_blob::default()); + assert_eq!(format!("{:?}", set_scanout_blob), "SetScanoutBlob"); + } + + #[test] + fn test_resource_flush_debug() { + let resource_flush = GpuCommand::ResourceFlush(virtio_gpu_resource_flush::default()); + assert_eq!(format!("{:?}", resource_flush), "ResourceFlush"); + } + + #[test] + fn test_transfer_to_host_2d_debug() { + let transfer_to_host_2d = + GpuCommand::TransferToHost2d(virtio_gpu_transfer_to_host_2d::default()); + assert_eq!(format!("{:?}", transfer_to_host_2d), "TransferToHost2d"); + } + + #[test] + fn test_resource_detach_backing_debug() { + let resource_detach_backing = + GpuCommand::ResourceDetachBacking(virtio_gpu_resource_detach_backing::default()); + assert_eq!( + format!("{:?}", resource_detach_backing), + "ResourceDetachBacking" + ); + } + + #[test] + fn test_get_capset_info_debug() { + let get_capset_info = GpuCommand::GetCapsetInfo(virtio_gpu_get_capset_info::default()); + assert_eq!(format!("{:?}", get_capset_info), "GetCapsetInfo"); + } + + #[test] + fn test_get_capset_debug() { + let get_capset = GpuCommand::GetCapset(virtio_gpu_get_capset::default()); + assert_eq!(format!("{:?}", get_capset), "GetCapset"); + } + + #[test] + fn test_ctx_create_debug() { + let ctx_create = GpuCommand::CtxCreate(virtio_gpu_ctx_create::default()); + assert_eq!(format!("{:?}", ctx_create), "CtxCreate"); + } + + #[test] + fn test_ctx_destroy_debug() { + let ctx_destroy = GpuCommand::CtxDestroy(virtio_gpu_ctx_destroy::default()); + assert_eq!(format!("{:?}", ctx_destroy), "CtxDestroy"); + } + + #[test] + fn test_ctx_attach_resource_debug() { + let ctx_attach_resource = GpuCommand::CtxAttachResource(virtio_gpu_ctx_resource::default()); + assert_eq!(format!("{:?}", ctx_attach_resource), "CtxAttachResource"); + } + + #[test] + fn test_ctx_detach_resource_debug() { + let ctx_detach_resource = GpuCommand::CtxDetachResource(virtio_gpu_ctx_resource::default()); + assert_eq!(format!("{:?}", ctx_detach_resource), "CtxDetachResource"); + } + + #[test] + fn test_resource_create_3d_debug() { + let resource_create_3d = + GpuCommand::ResourceCreate3d(virtio_gpu_resource_create_3d::default()); + assert_eq!(format!("{:?}", resource_create_3d), "ResourceCreate3d"); + } + + #[test] + fn test_transfer_to_host_3d_debug() { + let transfer_to_host_3d = + GpuCommand::TransferToHost3d(virtio_gpu_transfer_host_3d::default()); + assert_eq!(format!("{:?}", transfer_to_host_3d), "TransferToHost3d"); + } + + #[test] + fn test_transfer_from_host_3d_debug() { + let transfer_from_host_3d = + GpuCommand::TransferFromHost3d(virtio_gpu_transfer_host_3d::default()); + assert_eq!(format!("{:?}", transfer_from_host_3d), "TransferFromHost3d"); + } + + #[test] + fn test_cmd_submit_3d_debug() { + let cmd_submit_3d = GpuCommand::CmdSubmit3d { + cmd_data: Vec::new(), + fence_ids: Vec::new(), + }; + assert_eq!(format!("{:?}", cmd_submit_3d), "CmdSubmit3d"); + } + + #[test] + fn test_resource_create_blob_debug() { + let resource_create_blob = + GpuCommand::ResourceCreateBlob(virtio_gpu_resource_create_blob::default()); + assert_eq!(format!("{:?}", resource_create_blob), "ResourceCreateBlob"); + } + + #[test] + fn test_resource_map_blob_debug() { + let resource_map_blob = + GpuCommand::ResourceMapBlob(virtio_gpu_resource_map_blob::default()); + assert_eq!(format!("{:?}", resource_map_blob), "ResourceMapBlob"); + } + + #[test] + fn test_resource_unmap_blob_debug() { + let resource_unmap_blob = + GpuCommand::ResourceUnmapBlob(virtio_gpu_resource_unmap_blob::default()); + assert_eq!(format!("{:?}", resource_unmap_blob), "ResourceUnmapBlob"); + } + + #[test] + fn test_update_cursor_debug() { + let update_cursor = GpuCommand::UpdateCursor(virtio_gpu_update_cursor::default()); + assert_eq!(format!("{:?}", update_cursor), "UpdateCursor"); + } + + #[test] + fn test_move_cursor_debug() { + let move_cursor = GpuCommand::MoveCursor(virtio_gpu_update_cursor::default()); + assert_eq!(format!("{:?}", move_cursor), "MoveCursor"); + } + + #[test] + fn test_resource_assign_uuid_debug() { + let resource_assign_uuid = + GpuCommand::ResourceAssignUuid(virtio_gpu_resource_assign_uuid::default()); + assert_eq!(format!("{:?}", resource_assign_uuid), "ResourceAssignUuid"); + } + + #[test] + fn test_virtio_gpu_ctx_create_debug() { + let bytes = b"test_debug\0"; + let original = virtio_gpu_ctx_create { + debug_name: { + let mut debug_name = [0; 64]; + debug_name[..bytes.len()].copy_from_slice(bytes); + debug_name + }, + context_init: 0, + nlen: bytes.len() as u32, + }; + + let debug_string = format!("{:?}", original); + assert_eq!( + debug_string, + "virtio_gpu_ctx_create { debug_name: \"test_debug\", context_init: 0, .. }" + ); + } + + #[test] + fn test_gpu_response_encode() { + let mem = GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 16384)]).unwrap(); + + let vq = MockSplitQueue::new(&mem, 8); + let desc_chain = vq + .build_desc_chain(&[Descriptor::new(0x1000, 8192, VRING_DESC_F_WRITE as u16, 0)]) + .unwrap(); + + let mut writer = desc_chain + .clone() + .writer(&mem) + .map_err(Error::CreateWriter) + .unwrap(); + + let resp = GpuResponse::OkNoData; + let resp_ok_nodata = GpuResponse::encode(&resp, 0, 0, 0, 0, &mut writer).unwrap(); + assert_eq!(resp_ok_nodata, 24); + + let resp = GpuResponse::OkDisplayInfo(vec![(0, 0, false)]); + let resp_display_info = GpuResponse::encode(&resp, 0, 0, 0, 0, &mut writer).unwrap(); + assert_eq!(resp_display_info, 408); + + let edid_data: Box<[u8]> = Box::new([0u8; 1024]); + let resp = GpuResponse::OkEdid { blob: edid_data }; + let resp_edid = GpuResponse::encode(&resp, 0, 0, 0, 0, &mut writer).unwrap(); + assert_eq!(resp_edid, 1056); + + let resp = GpuResponse::OkCapset(vec![]); + let resp_capset = GpuResponse::encode(&resp, 0, 0, 0, 0, &mut writer).unwrap(); + assert_eq!(resp_capset, 24); + + let resp = GpuResponse::OkCapsetInfo { + capset_id: 0, + version: 0, + size: 0, + }; + let resp_capset = GpuResponse::encode(&resp, 0, 0, 0, 0, &mut writer).unwrap(); + assert_eq!(resp_capset, 40); + + let resp = GpuResponse::OkResourcePlaneInfo { + format_modifier: 0, + plane_info: vec![], + }; + let resp_resource_planeinfo = GpuResponse::encode(&resp, 0, 0, 0, 0, &mut writer).unwrap(); + assert_eq!(resp_resource_planeinfo, 72); + + let resp = GpuResponse::OkResourceUuid { uuid: [0u8; 16] }; + let resp_resource_uuid = GpuResponse::encode(&resp, 0, 0, 0, 0, &mut writer).unwrap(); + assert_eq!(resp_resource_uuid, 40); + + let resp = GpuResponse::OkMapInfo { map_info: 0 }; + let resp_map_info = GpuResponse::encode(&resp, 0, 0, 0, 0, &mut writer).unwrap(); + assert_eq!(resp_map_info, 32); + } +} diff --git a/staging/vhost-device-gpu/src/virtio_gpu.rs b/staging/vhost-device-gpu/src/virtio_gpu.rs new file mode 100644 index 000000000..91c3b28f1 --- /dev/null +++ b/staging/vhost-device-gpu/src/virtio_gpu.rs @@ -0,0 +1,1011 @@ +// Copyright 2024 Red Hat Inc +// Copyright 2019 The ChromiumOS Authors +// +// SPDX-License-Identifier: Apache-2.0 or BSD-3-Clause + +use std::{ + collections::BTreeMap, + io::IoSliceMut, + os::fd::FromRawFd, + result::Result, + sync::{Arc, Mutex}, +}; + +use libc::c_void; +use log::{debug, error, trace, warn}; +use rutabaga_gfx::{ + ResourceCreate3D, ResourceCreateBlob, Rutabaga, RutabagaBuilder, RutabagaComponentType, + RutabagaFence, RutabagaFenceHandler, RutabagaIntoRawDescriptor, RutabagaIovec, Transfer3D, +}; +use vhost::vhost_user::{ + gpu_message::{ + VhostUserGpuCursorPos, VhostUserGpuCursorUpdate, VhostUserGpuEdidRequest, + VhostUserGpuScanout, VhostUserGpuUpdate, + }, + GpuBackend, +}; +use vhost_user_backend::{VringRwLock, VringT}; +use vm_memory::{GuestAddress, GuestMemory, GuestMemoryMmap, VolatileSlice}; +use vmm_sys_util::eventfd::EventFd; + +use crate::{ + device::Error, + protocol::{ + virtio_gpu_rect, GpuResponse, + GpuResponse::{ + ErrInvalidParameter, ErrInvalidResourceId, ErrInvalidScanoutId, ErrUnspec, OkCapset, + OkCapsetInfo, OkDisplayInfo, OkEdid, OkNoData, OkResourcePlaneInfo, + }, + GpuResponsePlaneInfo, VirtioGpuResult, VIRTIO_GPU_FLAG_INFO_RING_IDX, + VIRTIO_GPU_MAX_SCANOUTS, + }, + GpuMode, +}; + +fn sglist_to_rutabaga_iovecs( + vecs: &[(GuestAddress, usize)], + mem: &GuestMemoryMmap, +) -> Result, ()> { + if vecs + .iter() + .any(|&(addr, len)| mem.get_slice(addr, len).is_err()) + { + return Err(()); + } + + let mut rutabaga_iovecs: Vec = Vec::new(); + for &(addr, len) in vecs { + let slice = mem.get_slice(addr, len).unwrap(); + rutabaga_iovecs.push(RutabagaIovec { + base: slice.ptr_guard_mut().as_ptr().cast::(), + len, + }); + } + Ok(rutabaga_iovecs) +} + +#[derive(Default, Debug)] +pub struct Rectangle { + pub x: u32, + pub y: u32, + pub width: u32, + pub height: u32, +} + +impl From for Rectangle { + fn from(r: virtio_gpu_rect) -> Self { + Self { + x: r.x, + y: r.y, + width: r.width, + height: r.height, + } + } +} + +#[cfg_attr(test, mockall::automock)] +// We need to specify some lifetimes explicitly, for mockall::automock attribute to compile +#[allow(clippy::needless_lifetimes)] +pub trait VirtioGpu { + /// Uses the hypervisor to unmap the blob resource. + fn resource_unmap_blob(&mut self, resource_id: u32) -> VirtioGpuResult; + + /// Uses the hypervisor to map the rutabaga blob resource. + /// + /// When sandboxing is disabled, `external_blob` is unset and opaque fds are + /// mapped by rutabaga as `ExternalMapping`. + /// When sandboxing is enabled, `external_blob` is set and opaque fds must + /// be mapped in the hypervisor process by Vulkano using metadata + /// provided by `Rutabaga::vulkan_info()`. + fn resource_map_blob(&mut self, resource_id: u32, offset: u64) -> VirtioGpuResult; + + /// Creates a blob resource using rutabaga. + fn resource_create_blob( + &mut self, + ctx_id: u32, + resource_id: u32, + resource_create_blob: ResourceCreateBlob, + vecs: Vec<(GuestAddress, usize)>, + mem: &GuestMemoryMmap, + ) -> VirtioGpuResult; + + fn process_fence( + &mut self, + ring: VirtioGpuRing, + fence_id: u64, + desc_index: u16, + len: u32, + ) -> bool; + + /// Creates a fence with the `RutabagaFence` that can be used to determine + /// when the previous command completed. + fn create_fence(&mut self, rutabaga_fence: RutabagaFence) -> VirtioGpuResult; + + /// Submits a command buffer to a rutabaga context. + fn submit_command( + &mut self, + ctx_id: u32, + commands: &mut [u8], + fence_ids: &[u64], + ) -> VirtioGpuResult; + + /// Detaches a resource from a rutabaga context. + fn context_detach_resource(&mut self, ctx_id: u32, resource_id: u32) -> VirtioGpuResult; + + /// Attaches a resource to a rutabaga context. + fn context_attach_resource(&mut self, ctx_id: u32, resource_id: u32) -> VirtioGpuResult; + + /// Destroys a rutabaga context. + fn destroy_context(&mut self, ctx_id: u32) -> VirtioGpuResult; + fn force_ctx_0(&self); + + /// Gets the list of supported display resolutions + fn display_info(&self) -> VirtioGpuResult; + + /// Gets the EDID for the specified scanout ID. If that scanout is not + /// enabled, it would return the EDID of a default display. + fn get_edid(&self, edid_req: VhostUserGpuEdidRequest) -> VirtioGpuResult; + + /// Sets the given resource id as the source of scanout to the display. + fn set_scanout( + &mut self, + scanout_id: u32, + resource_id: u32, + rect: Rectangle, + ) -> VirtioGpuResult; + + /// Creates a 3D resource with the given properties and `resource_id`. + fn resource_create_3d( + &mut self, + resource_id: u32, + resource_create_3d: ResourceCreate3D, + ) -> VirtioGpuResult; + + /// Releases guest kernel reference on the resource. + fn unref_resource(&mut self, resource_id: u32) -> VirtioGpuResult; + + /// If the resource is the scanout resource, flush it to the display. + fn flush_resource(&mut self, resource_id: u32, rect: Rectangle) -> VirtioGpuResult; + + /// Copies data to host resource from the attached iovecs. Can also be used + /// to flush caches. + fn transfer_write( + &mut self, + ctx_id: u32, + resource_id: u32, + transfer: Transfer3D, + ) -> VirtioGpuResult; + + /// Copies data from the host resource to: + /// 1) To the optional volatile slice + /// 2) To the host resource's attached iovecs + /// + /// Can also be used to invalidate caches. + fn transfer_read<'a>( + &mut self, + ctx_id: u32, + resource_id: u32, + transfer: Transfer3D, + buf: Option>, + ) -> VirtioGpuResult; + + /// Attaches backing memory to the given resource, represented by a `Vec` of + /// `(address, size)` tuples in the guest's physical address space. + /// Converts to `RutabagaIovec` from the memory mapping. + fn attach_backing( + &mut self, + resource_id: u32, + mem: &GuestMemoryMmap, + vecs: Vec<(GuestAddress, usize)>, + ) -> VirtioGpuResult; + + /// Detaches any previously attached iovecs from the resource. + fn detach_backing(&mut self, resource_id: u32) -> VirtioGpuResult; + + /// Updates the cursor's memory to the given `resource_id`, and sets its + /// position to the given coordinates. + fn update_cursor( + &mut self, + resource_id: u32, + cursor_pos: VhostUserGpuCursorPos, + hot_x: u32, + hot_y: u32, + ) -> VirtioGpuResult; + + /// Moves the cursor's position to the given coordinates. + fn move_cursor(&mut self, resource_id: u32, cursor: VhostUserGpuCursorPos) -> VirtioGpuResult; + + /// Returns a uuid for the resource. + fn resource_assign_uuid(&self, resource_id: u32) -> VirtioGpuResult; + + /// Gets rutabaga's capset information associated with `index`. + fn get_capset_info(&self, index: u32) -> VirtioGpuResult; + + /// Gets a capset from rutabaga. + fn get_capset(&self, capset_id: u32, version: u32) -> VirtioGpuResult; + + /// Creates a rutabaga context. + fn create_context<'a>( + &mut self, + ctx_id: u32, + context_init: u32, + context_name: Option<&'a str>, + ) -> VirtioGpuResult; + + /// Get an `EventFd` descriptor, that signals when to call `event_poll`. + fn get_event_poll_fd(&self) -> Option; + + /// Polls the Rutabaga backend. + fn event_poll(&self); +} + +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)] +pub enum VirtioGpuRing { + Global, + ContextSpecific { ctx_id: u32, ring_idx: u8 }, +} + +struct FenceDescriptor { + ring: VirtioGpuRing, + fence_id: u64, + desc_index: u16, + len: u32, +} + +#[derive(Default)] +pub struct FenceState { + descs: Vec, + completed_fences: BTreeMap, +} + +#[derive(Copy, Clone, Debug, Default)] +struct AssociatedScanouts(u32); + +impl AssociatedScanouts { + fn enable(&mut self, scanout_id: u32) { + self.0 |= 1 << scanout_id; + } + + fn disable(&mut self, scanout_id: u32) { + self.0 ^= 1 << scanout_id; + } + + const fn has_any_enabled(self) -> bool { + self.0 != 0 + } + + fn iter_enabled(self) -> impl Iterator { + (0..VIRTIO_GPU_MAX_SCANOUTS).filter(move |i| ((self.0 >> i) & 1) == 1) + } +} + +#[derive(Default, Copy, Clone)] +pub struct VirtioGpuResource { + id: u32, + width: u32, + height: u32, + /// Stores information about which scanouts are associated with the given + /// resource. Resource could be used for multiple scanouts (the displays + /// are mirrored). + scanouts: AssociatedScanouts, +} + +impl VirtioGpuResource { + fn calculate_size(&self) -> Result { + let width = self.width as usize; + let height = self.height as usize; + let size = width + .checked_mul(height) + .ok_or("Multiplication of width and height overflowed")? + .checked_mul(READ_RESOURCE_BYTES_PER_PIXEL as usize) + .ok_or("Multiplication of result and bytes_per_pixel overflowed")?; + + Ok(size) + } +} + +impl VirtioGpuResource { + /// Creates a new `VirtioGpuResource` with 2D/3D metadata + #[must_use] + pub fn new(resource_id: u32, width: u32, height: u32) -> Self { + Self { + id: resource_id, + width, + height, + scanouts: AssociatedScanouts::default(), + } + } +} + +pub struct VirtioGpuScanout { + resource_id: u32, +} + +pub struct RutabagaVirtioGpu { + pub(crate) rutabaga: Rutabaga, + pub(crate) gpu_backend: GpuBackend, + pub(crate) resources: BTreeMap, + pub(crate) fence_state: Arc>, + pub(crate) scanouts: [Option; VIRTIO_GPU_MAX_SCANOUTS as usize], +} + +const READ_RESOURCE_BYTES_PER_PIXEL: u32 = 4; + +impl RutabagaVirtioGpu { + // TODO: this depends on Rutabaga builder, so this will need to be handled at + // runtime eventually + pub const MAX_NUMBER_OF_CAPSETS: u32 = 3; + + fn create_fence_handler( + queue_ctl: VringRwLock, + fence_state: Arc>, + ) -> RutabagaFenceHandler { + RutabagaFenceHandler::new(move |completed_fence: RutabagaFence| { + debug!( + "XXX - fence called: id={}, ring_idx={}", + completed_fence.fence_id, completed_fence.ring_idx + ); + + let mut fence_state = fence_state.lock().unwrap(); + let mut i = 0; + + let ring = match completed_fence.flags & VIRTIO_GPU_FLAG_INFO_RING_IDX { + 0 => VirtioGpuRing::Global, + _ => VirtioGpuRing::ContextSpecific { + ctx_id: completed_fence.ctx_id, + ring_idx: completed_fence.ring_idx, + }, + }; + + while i < fence_state.descs.len() { + debug!("XXX - fence_id: {}", fence_state.descs[i].fence_id); + if fence_state.descs[i].ring == ring + && fence_state.descs[i].fence_id <= completed_fence.fence_id + { + let completed_desc = fence_state.descs.remove(i); + debug!( + "XXX - found fence: desc_index={}", + completed_desc.desc_index + ); + + queue_ctl + .add_used(completed_desc.desc_index, completed_desc.len) + .unwrap(); + + queue_ctl + .signal_used_queue() + .map_err(Error::NotificationFailed) + .unwrap(); + debug!("Notification sent"); + } else { + i += 1; + } + } + // Update the last completed fence for this context + fence_state + .completed_fences + .insert(ring, completed_fence.fence_id); + }) + } + + fn configure_rutabaga_builder(gpu_mode: GpuMode) -> RutabagaBuilder { + let component = match gpu_mode { + GpuMode::VirglRenderer => RutabagaComponentType::VirglRenderer, + GpuMode::Gfxstream => RutabagaComponentType::Gfxstream, + }; + RutabagaBuilder::new(component, 0) + .set_use_egl(true) + .set_use_gles(true) + .set_use_glx(true) + .set_use_surfaceless(true) + .set_use_external_blob(true) + } + + #[must_use] + pub fn new(queue_ctl: &VringRwLock, gpu_mode: GpuMode, gpu_backend: GpuBackend) -> Self { + let fence_state = Arc::new(Mutex::new(FenceState::default())); + let fence = Self::create_fence_handler(queue_ctl.clone(), fence_state.clone()); + let rutabaga = Self::configure_rutabaga_builder(gpu_mode) + .build(fence, None) + .expect("Rutabaga initialization failed!"); + + Self { + rutabaga, + gpu_backend, + resources: BTreeMap::default(), + fence_state, + scanouts: Default::default(), + } + } + + fn result_from_query(&self, resource_id: u32) -> GpuResponse { + let Ok(query) = self.rutabaga.query(resource_id) else { + return OkNoData; + }; + let mut plane_info = Vec::with_capacity(4); + for plane_index in 0..4 { + plane_info.push(GpuResponsePlaneInfo { + stride: query.strides[plane_index], + offset: query.offsets[plane_index], + }); + } + let format_modifier = query.modifier; + OkResourcePlaneInfo { + format_modifier, + plane_info, + } + } + + fn read_2d_resource( + &mut self, + resource: VirtioGpuResource, + output: &mut [u8], + ) -> Result<(), String> { + let minimal_buffer_size = resource.calculate_size()?; + assert!(output.len() >= minimal_buffer_size); + + let transfer = Transfer3D { + x: 0, + y: 0, + z: 0, + w: resource.width, + h: resource.height, + d: 1, + level: 0, + stride: resource.width * READ_RESOURCE_BYTES_PER_PIXEL, + layer_stride: 0, + offset: 0, + }; + + // ctx_id 0 seems to be special, crosvm uses it for this purpose too + self.rutabaga + .transfer_read(0, resource.id, transfer, Some(IoSliceMut::new(output))) + .map_err(|e| format!("{e}"))?; + + Ok(()) + } +} + +impl VirtioGpu for RutabagaVirtioGpu { + fn force_ctx_0(&self) { + self.rutabaga.force_ctx_0(); + } + + fn display_info(&self) -> VirtioGpuResult { + let backend_display_info = self.gpu_backend.get_display_info().map_err(|e| { + error!("Failed to get display info: {e:?}"); + ErrUnspec + })?; + + let display_info = backend_display_info + .pmodes + .iter() + .map(|display| (display.r.width, display.r.height, display.enabled == 1)) + .collect::>(); + + debug!("Displays: {:?}", display_info); + Ok(OkDisplayInfo(display_info)) + } + + fn get_edid(&self, edid_req: VhostUserGpuEdidRequest) -> VirtioGpuResult { + debug!("edid request: {edid_req:?}"); + let edid = self.gpu_backend.get_edid(&edid_req).map_err(|e| { + error!("Failed to get edid from frontend: {}", e); + ErrUnspec + })?; + + Ok(OkEdid { + blob: Box::from(&edid.edid[..edid.size as usize]), + }) + } + + fn set_scanout( + &mut self, + scanout_id: u32, + resource_id: u32, + rect: Rectangle, + ) -> VirtioGpuResult { + let scanout = self + .scanouts + .get_mut(scanout_id as usize) + .ok_or(ErrInvalidScanoutId)?; + + // If a resource is already associated with this scanout, make sure to disable + // this scanout for that resource + if let Some(resource_id) = scanout.as_ref().map(|scanout| scanout.resource_id) { + let resource = self + .resources + .get_mut(&resource_id) + .ok_or(ErrInvalidResourceId)?; + + resource.scanouts.disable(scanout_id); + } + + // Virtio spec: "The driver can use resource_id = 0 to disable a scanout." + if resource_id == 0 { + *scanout = None; + debug!("Disabling scanout scanout_id={scanout_id}"); + self.gpu_backend + .set_scanout(&VhostUserGpuScanout { + scanout_id, + width: 0, + height: 0, + }) + .map_err(|e| { + error!("Failed to set_scanout: {e:?}"); + ErrUnspec + })?; + return Ok(OkNoData); + } + + debug!("Enabling scanout scanout_id={scanout_id}, resource_id={resource_id}: {rect:?}"); + + // QEMU doesn't like (it lags) when we call set_scanout while the scanout is + // enabled + if scanout.is_none() { + self.gpu_backend + .set_scanout(&VhostUserGpuScanout { + scanout_id, + width: rect.width, + height: rect.height, + }) + .map_err(|e| { + error!("Failed to set_scanout: {e:?}"); + ErrUnspec + })?; + } + + let resource = self + .resources + .get_mut(&resource_id) + .ok_or(ErrInvalidResourceId)?; + + resource.scanouts.enable(scanout_id); + *scanout = Some(VirtioGpuScanout { resource_id }); + Ok(OkNoData) + } + + fn resource_create_3d( + &mut self, + resource_id: u32, + resource_create_3d: ResourceCreate3D, + ) -> VirtioGpuResult { + self.rutabaga + .resource_create_3d(resource_id, resource_create_3d)?; + + let resource = VirtioGpuResource::new( + resource_id, + resource_create_3d.width, + resource_create_3d.height, + ); + + debug_assert!( + !self.resources.contains_key(&resource_id), + "Resource ID {resource_id} already exists in the resources map." + ); + + // Rely on rutabaga to check for duplicate resource ids. + self.resources.insert(resource_id, resource); + Ok(self.result_from_query(resource_id)) + } + + fn unref_resource(&mut self, resource_id: u32) -> VirtioGpuResult { + let resource = self.resources.remove(&resource_id); + match resource { + None => return Err(ErrInvalidResourceId), + // The spec doesn't say anything about this situation and this doesn't actually seem + // to happen in practise but let's be careful and refuse to disable the resource. + // This keeps the internal state of the gpu device and the fronted consistent. + Some(resource) if resource.scanouts.has_any_enabled() => { + warn!( + "The driver requested unref_resource, but resource {resource_id} has \ + associated scanouts, refusing to delete the resource." + ); + return Err(ErrUnspec); + } + _ => (), + } + self.rutabaga.unref_resource(resource_id)?; + Ok(OkNoData) + } + + /// If the resource is the scanout resource, flush it to the display. + fn flush_resource(&mut self, resource_id: u32, _rect: Rectangle) -> VirtioGpuResult { + if resource_id == 0 { + return Ok(OkNoData); + } + + let resource = *self + .resources + .get(&resource_id) + .ok_or(ErrInvalidResourceId)?; + + for scanout_id in resource.scanouts.iter_enabled() { + let resource_size = resource.calculate_size().map_err(|e| { + error!( + "Resource {id} size calculation failed: {e}", + id = resource.id + ); + ErrUnspec + })?; + + let mut data = vec![0; resource_size]; + + // Gfxstream doesn't support transfer_read for portion of the resource. So we + // always read the whole resource, even if the guest specified to + // flush only a portion of it. + // + // The function stream_renderer_transfer_read_iov seems to ignore the stride and + // transfer_box parameters and expects the provided buffer to fit the whole + // resource. + if let Err(e) = self.read_2d_resource(resource, &mut data) { + log::error!("Failed to read resource {resource_id} for scanout {scanout_id}: {e}"); + continue; + } + + self.gpu_backend + .update_scanout( + &VhostUserGpuUpdate { + scanout_id, + x: 0, + y: 0, + width: resource.width, + height: resource.height, + }, + &data, + ) + .map_err(|e| { + error!("Failed to update_scanout: {e:?}"); + ErrUnspec + })?; + } + + Ok(OkNoData) + } + + fn transfer_write( + &mut self, + ctx_id: u32, + resource_id: u32, + transfer: Transfer3D, + ) -> VirtioGpuResult { + trace!("transfer_write ctx_id {ctx_id}, resource_id {resource_id}, {transfer:?}"); + + self.rutabaga + .transfer_write(ctx_id, resource_id, transfer)?; + Ok(OkNoData) + } + + fn transfer_read( + &mut self, + ctx_id: u32, + resource_id: u32, + transfer: Transfer3D, + buf: Option, + ) -> VirtioGpuResult { + let buf = buf.map(|vs| { + IoSliceMut::new( + // SAFETY: trivially safe + unsafe { std::slice::from_raw_parts_mut(vs.ptr_guard_mut().as_ptr(), vs.len()) }, + ) + }); + self.rutabaga + .transfer_read(ctx_id, resource_id, transfer, buf)?; + Ok(OkNoData) + } + + fn attach_backing( + &mut self, + resource_id: u32, + mem: &GuestMemoryMmap, + vecs: Vec<(GuestAddress, usize)>, + ) -> VirtioGpuResult { + let rutabaga_iovecs = sglist_to_rutabaga_iovecs(&vecs[..], mem).map_err(|()| ErrUnspec)?; + self.rutabaga.attach_backing(resource_id, rutabaga_iovecs)?; + Ok(OkNoData) + } + + fn detach_backing(&mut self, resource_id: u32) -> VirtioGpuResult { + self.rutabaga.detach_backing(resource_id)?; + Ok(OkNoData) + } + + fn update_cursor( + &mut self, + resource_id: u32, + cursor_pos: VhostUserGpuCursorPos, + hot_x: u32, + hot_y: u32, + ) -> VirtioGpuResult { + const CURSOR_WIDTH: u32 = 64; + const CURSOR_HEIGHT: u32 = 64; + + let mut data = Box::new( + [0; READ_RESOURCE_BYTES_PER_PIXEL as usize + * CURSOR_WIDTH as usize + * CURSOR_HEIGHT as usize], + ); + + let cursor_resource = self + .resources + .get(&resource_id) + .ok_or(ErrInvalidResourceId)?; + + if cursor_resource.width != CURSOR_WIDTH || cursor_resource.height != CURSOR_HEIGHT { + error!("Cursor resource has invalid dimensions"); + return Err(ErrInvalidParameter); + } + + self.read_2d_resource(*cursor_resource, &mut data[..]) + .map_err(|e| { + error!("Failed to read resource of cursor: {e}"); + ErrUnspec + })?; + + let cursor_update = VhostUserGpuCursorUpdate { + pos: cursor_pos, + hot_x, + hot_y, + }; + + self.gpu_backend + .cursor_update(&cursor_update, &data) + .map_err(|e| { + error!("Failed to update cursor pos from frontend: {}", e); + ErrUnspec + })?; + + Ok(OkNoData) + } + + fn move_cursor(&mut self, resource_id: u32, cursor: VhostUserGpuCursorPos) -> VirtioGpuResult { + if resource_id == 0 { + self.gpu_backend.cursor_pos_hide(&cursor).map_err(|e| { + error!("Failed to set cursor pos from frontend: {}", e); + ErrUnspec + })?; + } else { + self.gpu_backend.cursor_pos(&cursor).map_err(|e| { + error!("Failed to set cursor pos from frontend: {}", e); + ErrUnspec + })?; + } + + Ok(OkNoData) + } + + fn resource_assign_uuid(&self, _resource_id: u32) -> VirtioGpuResult { + error!("Not implemented: resource_assign_uuid"); + Err(ErrUnspec) + } + + fn get_capset_info(&self, index: u32) -> VirtioGpuResult { + let (capset_id, version, size) = self.rutabaga.get_capset_info(index)?; + Ok(OkCapsetInfo { + capset_id, + version, + size, + }) + } + + fn get_capset(&self, capset_id: u32, version: u32) -> VirtioGpuResult { + let capset = self.rutabaga.get_capset(capset_id, version)?; + Ok(OkCapset(capset)) + } + + fn create_context( + &mut self, + ctx_id: u32, + context_init: u32, + context_name: Option<&str>, + ) -> VirtioGpuResult { + self.rutabaga + .create_context(ctx_id, context_init, context_name)?; + Ok(OkNoData) + } + + fn destroy_context(&mut self, ctx_id: u32) -> VirtioGpuResult { + self.rutabaga.destroy_context(ctx_id)?; + Ok(OkNoData) + } + + fn context_attach_resource(&mut self, ctx_id: u32, resource_id: u32) -> VirtioGpuResult { + self.rutabaga.context_attach_resource(ctx_id, resource_id)?; + Ok(OkNoData) + } + + fn context_detach_resource(&mut self, ctx_id: u32, resource_id: u32) -> VirtioGpuResult { + self.rutabaga.context_detach_resource(ctx_id, resource_id)?; + Ok(OkNoData) + } + + fn submit_command( + &mut self, + ctx_id: u32, + commands: &mut [u8], + fence_ids: &[u64], + ) -> VirtioGpuResult { + self.rutabaga.submit_command(ctx_id, commands, fence_ids)?; + Ok(OkNoData) + } + + fn create_fence(&mut self, rutabaga_fence: RutabagaFence) -> VirtioGpuResult { + self.rutabaga.create_fence(rutabaga_fence)?; + Ok(OkNoData) + } + + fn process_fence( + &mut self, + ring: VirtioGpuRing, + fence_id: u64, + desc_index: u16, + len: u32, + ) -> bool { + // In case the fence is signaled immediately after creation, don't add a return + // FenceDescriptor. + let mut fence_state = self.fence_state.lock().unwrap(); + if fence_id > *fence_state.completed_fences.get(&ring).unwrap_or(&0) { + fence_state.descs.push(FenceDescriptor { + ring, + fence_id, + desc_index, + len, + }); + + false + } else { + true + } + } + + fn resource_create_blob( + &mut self, + _ctx_id: u32, + _resource_id: u32, + _resource_create_blob: ResourceCreateBlob, + _vecs: Vec<(GuestAddress, usize)>, + _mem: &GuestMemoryMmap, + ) -> VirtioGpuResult { + error!("Not implemented: resource_create_blob"); + Err(ErrUnspec) + } + + fn resource_map_blob(&mut self, _resource_id: u32, _offset: u64) -> VirtioGpuResult { + error!("Not implemented: resource_map_blob"); + Err(ErrUnspec) + } + + fn resource_unmap_blob(&mut self, _resource_id: u32) -> VirtioGpuResult { + error!("Not implemented: resource_unmap_blob"); + Err(ErrUnspec) + } + + fn get_event_poll_fd(&self) -> Option { + self.rutabaga.poll_descriptor().map(|fd| { + // SAFETY: Safe, the fd should be valid, because Rutabaga guarantees it. + // into_raw_descriptor() returns a RawFd and makes sure SafeDescriptor::drop + // doesn't run. + unsafe { EventFd::from_raw_fd(fd.into_raw_descriptor()) } + }) + } + + fn event_poll(&self) { + self.rutabaga.event_poll(); + } +} + +#[cfg(test)] +mod tests { + use std::{ + os::unix::net::UnixStream, + sync::{Arc, Mutex}, + }; + + use assert_matches::assert_matches; + use rusty_fork::rusty_fork_test; + use rutabaga_gfx::{ + RutabagaHandler, RUTABAGA_PIPE_BIND_RENDER_TARGET, RUTABAGA_PIPE_TEXTURE_2D, + }; + + use super::*; + use crate::protocol::VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM; + + const CREATE_RESOURCE_2D_720P: ResourceCreate3D = ResourceCreate3D { + target: RUTABAGA_PIPE_TEXTURE_2D, + format: VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM, + bind: RUTABAGA_PIPE_BIND_RENDER_TARGET, + width: 1280, + height: 720, + depth: 1, + array_size: 1, + last_level: 0, + nr_samples: 0, + flags: 0, + }; + + fn dummy_gpu_backend() -> GpuBackend { + let (_, backend) = UnixStream::pair().unwrap(); + GpuBackend::from_stream(backend) + } + + fn new_gpu() -> RutabagaVirtioGpu { + let builder = RutabagaVirtioGpu::configure_rutabaga_builder(GpuMode::VirglRenderer); + let rutabaga = builder.build(RutabagaHandler::new(|_| {}), None).unwrap(); + RutabagaVirtioGpu { + rutabaga, + gpu_backend: dummy_gpu_backend(), + resources: Default::default(), + fence_state: Arc::new(Mutex::new(Default::default())), + scanouts: Default::default(), + } + } + + rusty_fork_test! { + #[test] + fn test_update_cursor_fails() { + let mut virtio_gpu = new_gpu(); + + let cursor_pos = VhostUserGpuCursorPos { + scanout_id: 1, + x: 123, + y: 123, + }; + + // The resource doesn't exist + let result = virtio_gpu.update_cursor(1, cursor_pos, 0, 0); + assert_matches!(result, Err(ErrInvalidResourceId)); + + // Create a resource + virtio_gpu.resource_create_3d(1, CREATE_RESOURCE_2D_720P).unwrap(); + + // The resource exists, but the dimensions are wrong + let result = virtio_gpu.update_cursor(1, cursor_pos, 0, 0); + assert_matches!(result, Err(ErrInvalidParameter)); + } + + #[test] + fn test_create_and_unref_resources() { + let mut virtio_gpu = new_gpu(); + + // No resources exists, cannot unref anything: + assert!(virtio_gpu.resources.is_empty()); + let result = virtio_gpu.unref_resource(0); + assert_matches!(result, Err(_)); + + // Create a resource + let result = virtio_gpu.resource_create_3d(1, CREATE_RESOURCE_2D_720P); + assert_matches!(result, Ok(_)); + assert_eq!(virtio_gpu.resources.len(), 1); + + // Unref the created resource + let result = virtio_gpu.unref_resource(1); + assert_matches!(result, Ok(_)); + assert!(virtio_gpu.resources.is_empty()); + } + + #[test] + fn test_gpu_capset() { + let virtio_gpu = new_gpu(); + + let capset_info = virtio_gpu.get_capset_info(0); + assert_matches!(capset_info, Ok(OkCapsetInfo { .. })); + + let Ok(OkCapsetInfo {capset_id, version, ..}) = capset_info else { + unreachable!("Response should have been checked by assert") + }; + + let capset_info = virtio_gpu.get_capset(capset_id, version); + assert_matches!(capset_info, Ok(OkCapset(_))); + } + + #[test] + fn test_gpu_submit_command_fails() { + let mut virtio_gpu = new_gpu(); + let mut cmd_buf = [0; 10]; + let fence_ids: Vec = Vec::with_capacity(0); + virtio_gpu + .submit_command(1, &mut cmd_buf[..], &fence_ids) + .unwrap_err(); + } + } +}