diff --git a/staging/Cargo.lock b/staging/Cargo.lock index 50c77e00..a2d01814 100644 --- a/staging/Cargo.lock +++ b/staging/Cargo.lock @@ -114,11 +114,17 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "cfg_aliases" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" + [[package]] name = "clap" -version = "4.5.19" +version = "4.5.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7be5744db7978a28d9df86a214130d106a89ce49644cbc4e3f0c22c3fba30615" +checksum = "b97f376d85a664d5837dbae44bf546e6477a679ff6610010f17276f686d867e8" dependencies = [ "clap_builder", "clap_derive", @@ -126,9 +132,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.19" +version = "4.5.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5fbc17d3ef8278f55b282b2a2e75ae6f6c7d4bb70ed3d0382375104bfafdb4b" +checksum = "19bc80abd44e4bed93ca373a0704ccbd1b710dc5749406201bb018272808dc54" dependencies = [ "anstream", "anstyle", @@ -198,6 +204,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "downcast" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" + [[package]] name = "either" version = "1.13.0" @@ -298,6 +310,18 @@ version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "fragile" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" + [[package]] name = "futures" version = "0.3.31" @@ -532,6 +556,15 @@ dependencies = [ "autocfg", ] +[[package]] +name = "memoffset" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "488016bfae457b036d996092f6cb448677611ce4449e970ceaf42695203f218a" +dependencies = [ + "autocfg", +] + [[package]] name = "mio" version = "0.8.11" @@ -544,6 +577,32 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "mockall" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4c28b3fb6d753d28c20e826cd46ee611fda1cf3cde03a443a974043247c065a" +dependencies = [ + "cfg-if", + "downcast", + "fragile", + "mockall_derive", + "predicates", + "predicates-tree", +] + +[[package]] +name = "mockall_derive" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "341014e7f530314e9a1fdbc7400b244efea7122662c96bfa248c31da5bfb2020" +dependencies = [ + "cfg-if", + "proc-macro2", + "quote", + "syn 2.0.79", +] + [[package]] name = "nb" version = "1.1.0" @@ -584,7 +643,7 @@ dependencies = [ "bitflags 1.3.2", "cfg-if", "libc", - "memoffset", + "memoffset 0.7.1", "pin-utils", ] @@ -599,6 +658,19 @@ dependencies = [ "libc", ] +[[package]] +name = "nix" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4" +dependencies = [ + "bitflags 2.6.0", + "cfg-if", + "cfg_aliases", + "libc", + "memoffset 0.9.1", +] + [[package]] name = "num_cpus" version = "1.16.0" @@ -671,13 +743,45 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "pkg-config" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" + [[package]] name = "ppv-lite86" version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" dependencies = [ - "zerocopy", + "zerocopy 0.7.35", +] + +[[package]] +name = "predicates" +version = "3.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e9086cc7640c29a356d1a29fd134380bee9d8f79a17410aa76e7ad295f42c97" +dependencies = [ + "anstyle", + "predicates-core", +] + +[[package]] +name = "predicates-core" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae8177bee8e75d6846599c6b9ff679ed51e882816914eec639944d7c9aa11931" + +[[package]] +name = "predicates-tree" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41b740d195ed3166cd147c8047ec98db0e22ec019eb8eeb76d343b795304fb13" +dependencies = [ + "predicates-core", + "termtree", ] [[package]] @@ -691,9 +795,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.86" +version = "1.0.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" +checksum = "b3e4daa0dcf6feba26f985457cdf104d4b4256fc5a09547140f3631bb076b19a" dependencies = [ "unicode-ident", ] @@ -704,6 +808,12 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1475abae4f8ad4998590fe3acfe20104f0a5d48fc420c817cd2c09c3f56151f0" +[[package]] +name = "quick-error" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" + [[package]] name = "quote" version = "1.0.37" @@ -787,6 +897,17 @@ version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba39f3699c378cd8970968dcbff9c43159ea4cfbd88d43c00b22f2ef10a435d2" +[[package]] +name = "remain" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46aef80f842736de545ada6ec65b81ee91504efd6853f4b96de7414c42ae7443" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.79", +] + [[package]] name = "rstest" version = "0.23.0" @@ -839,6 +960,36 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "rusty-fork" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" +dependencies = [ + "fnv", + "quick-error", + "tempfile", + "wait-timeout", +] + +[[package]] +name = "rutabaga_gfx" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6628c6391bc654170f64fe8bfb7e5da3bf97f7ed3174c52c7a7349c012adec9c" +dependencies = [ + "anyhow", + "cfg-if", + "libc", + "log", + "nix 0.28.0", + "pkg-config", + "remain", + "thiserror", + "winapi", + "zerocopy 0.7.35", +] + [[package]] name = "scopeguard" version = "1.2.0" @@ -995,6 +1146,12 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "termtree" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" + [[package]] name = "thiserror" version = "1.0.64" @@ -1094,7 +1251,19 @@ dependencies = [ "bitflags 2.6.0", "libc", "uuid", - "vm-memory", + "vm-memory 0.15.0", + "vmm-sys-util", +] + +[[package]] +name = "vhost" +version = "0.12.0" +source = "git+https://github.com/rust-vmm/vhost.git?branch=main#4f160320a86a27579a3a3373b590dde2c27959a6" +dependencies = [ + "bitflags 2.6.0", + "libc", + "uuid", + "vm-memory 0.15.0", "vmm-sys-util", ] @@ -1109,11 +1278,11 @@ dependencies = [ "queues", "socketcan", "thiserror", - "vhost", - "vhost-user-backend", + "vhost 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", + "vhost-user-backend 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)", "virtio-bindings", "virtio-queue", - "vm-memory", + "vm-memory 0.15.0", "vmm-sys-util", ] @@ -1131,14 +1300,37 @@ dependencies = [ "nix 0.26.4", "queues", "thiserror", - "vhost", - "vhost-user-backend", + "vhost 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", + "vhost-user-backend 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)", "virtio-bindings", "virtio-queue", - "vm-memory", + "vm-memory 0.15.0", "vmm-sys-util", ] +[[package]] +name = "vhost-device-gpu" +version = "0.1.0" +dependencies = [ + "assert_matches", + "clap", + "env_logger 0.11.5", + "libc", + "log", + "mockall", + "rusty-fork", + "rutabaga_gfx", + "thiserror", + "vhost 0.12.0 (git+https://github.com/rust-vmm/vhost.git?branch=main)", + "vhost-user-backend 0.16.0 (git+https://github.com/rust-vmm/vhost.git?branch=main)", + "virtio-bindings", + "virtio-queue", + "vm-memory 0.14.1", + "vm-memory 0.15.0", + "vmm-sys-util", + "zerocopy 0.6.6", +] + [[package]] name = "vhost-device-video" version = "0.1.0" @@ -1156,11 +1348,11 @@ dependencies = [ "tempfile", "thiserror", "v4l2r", - "vhost", - "vhost-user-backend", + "vhost 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", + "vhost-user-backend 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)", "virtio-bindings", "virtio-queue", - "vm-memory", + "vm-memory 0.15.0", "vmm-sys-util", ] @@ -1172,10 +1364,24 @@ checksum = "73768c8584e0be5ed8feb063785910cabe3f1af6661a5953fd3247fa611ddfaf" dependencies = [ "libc", "log", - "vhost", + "vhost 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", + "virtio-bindings", + "virtio-queue", + "vm-memory 0.15.0", + "vmm-sys-util", +] + +[[package]] +name = "vhost-user-backend" +version = "0.16.0" +source = "git+https://github.com/rust-vmm/vhost.git?branch=main#4f160320a86a27579a3a3373b590dde2c27959a6" +dependencies = [ + "libc", + "log", + "vhost 0.12.0 (git+https://github.com/rust-vmm/vhost.git?branch=main)", "virtio-bindings", "virtio-queue", - "vm-memory", + "vm-memory 0.15.0", "vmm-sys-util", ] @@ -1193,10 +1399,24 @@ checksum = "ffb1761348d3b5e82131379b9373435b48dc8333100bff3f1cdf9cc541a0ad83" dependencies = [ "log", "virtio-bindings", - "vm-memory", + "vm-memory 0.15.0", "vmm-sys-util", ] +[[package]] +name = "vm-memory" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c3aba5064cc5f6f7740cddc8dae34d2d9a311cac69b60d942af7f3ab8fc49f4" +dependencies = [ + "arc-swap", + "bitflags 2.6.0", + "libc", + "thiserror", + "vmm-sys-util", + "winapi", +] + [[package]] name = "vm-memory" version = "0.15.0" @@ -1221,6 +1441,15 @@ dependencies = [ "libc", ] +[[package]] +name = "wait-timeout" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f200f5b12eb75f8c1ed65abd4b2db8a6e1b138a20de009dacee265a2498f3f6" +dependencies = [ + "libc", +] + [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" @@ -1415,6 +1644,16 @@ dependencies = [ "memchr", ] +[[package]] +name = "zerocopy" +version = "0.6.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "854e949ac82d619ee9a14c66a1b674ac730422372ccb759ce0c39cabcf2bf8e6" +dependencies = [ + "byteorder", + "zerocopy-derive 0.6.6", +] + [[package]] name = "zerocopy" version = "0.7.35" @@ -1422,7 +1661,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" dependencies = [ "byteorder", - "zerocopy-derive", + "zerocopy-derive 0.7.35", +] + +[[package]] +name = "zerocopy-derive" +version = "0.6.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "125139de3f6b9d625c39e2efdd73d41bdac468ccd556556440e322be0e1bbd91" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.79", ] [[package]] diff --git a/staging/Cargo.toml b/staging/Cargo.toml index 88d41685..1a970b77 100644 --- a/staging/Cargo.toml +++ b/staging/Cargo.toml @@ -1,6 +1,7 @@ [workspace] resolver = "2" members = [ + "vhost-device-gpu", "vhost-device-video", "vhost-device-can", "vhost-device-console", diff --git a/staging/coverage_config_x86_64.json b/staging/coverage_config_x86_64.json index 211c1fdb..2a525cd4 100644 --- a/staging/coverage_config_x86_64.json +++ b/staging/coverage_config_x86_64.json @@ -1,5 +1,5 @@ { - "coverage_score": 71.04, + "coverage_score": 80.21, "exclude_path": "", "crate_features": "" } diff --git a/staging/vhost-device-gpu/CHANGELOG.md b/staging/vhost-device-gpu/CHANGELOG.md new file mode 100644 index 00000000..7dc867d9 --- /dev/null +++ b/staging/vhost-device-gpu/CHANGELOG.md @@ -0,0 +1,14 @@ +# Changelog +## [Unreleased] + +### Added + +### Changed + +### Fixed + +### Deprecated + +## [0.1.0] + +First release \ No newline at end of file diff --git a/staging/vhost-device-gpu/Cargo.toml b/staging/vhost-device-gpu/Cargo.toml new file mode 100644 index 00000000..11222680 --- /dev/null +++ b/staging/vhost-device-gpu/Cargo.toml @@ -0,0 +1,37 @@ +[package] +name = "vhost-device-gpu" +version = "0.1.0" +authors = ["Dorinda Bassey ", "Matej Hrica "] +description = "A virtio-gpu device using the vhost-user protocol." +repository = "https://github.com/rust-vmm/vhost-device" +readme = "README.md" +keywords = ["gpu", "vhost", "virt", "backend"] +license = "Apache-2.0 OR BSD-3-Clause" +edition = "2021" +publish = false + +[features] +xen = ["vm-memory/xen", "vhost/xen", "vhost-user-backend/xen"] + +[dependencies] +clap = { version = "4.4", features = ["derive"] } +env_logger = "0.11.5" +libc = "0.2" +log = "0.4" +[target.'cfg(not(target_env = "musl"))'.dependencies] +rutabaga_gfx = { version = "0.1.4", features = ["gfxstream", "virgl_renderer"] } +thiserror = "1.0" +vhost = { git = "https://github.com/rust-vmm/vhost.git", package = "vhost", branch = "main", features = ["vhost-user-backend"] } +vhost-user-backend = { git = "https://github.com/rust-vmm/vhost.git", package = "vhost-user-backend", branch = "main", features = ["gpu-socket"] } +virtio-bindings = "0.2.2" +virtio-queue = "0.13.0" +vm-memory = "0.15.0" +vmm-sys-util = "0.12.1" +zerocopy = "0.6.3" + +[dev-dependencies] +assert_matches = "1.5" +virtio-queue = { version = "0.13", features = ["test-utils"] } +vm-memory = { version = "0.14.0", features = ["backend-mmap", "backend-atomic"] } +mockall = "0.13.0" +rusty-fork = "0.3.0" \ No newline at end of file diff --git a/staging/vhost-device-gpu/LICENSE-APACHE b/staging/vhost-device-gpu/LICENSE-APACHE new file mode 100644 index 00000000..1cd601d0 --- /dev/null +++ b/staging/vhost-device-gpu/LICENSE-APACHE @@ -0,0 +1 @@ +../../LICENSE-APACHE \ No newline at end of file diff --git a/staging/vhost-device-gpu/LICENSE-BSD-3-Clause b/staging/vhost-device-gpu/LICENSE-BSD-3-Clause new file mode 100644 index 00000000..a60f1af6 --- /dev/null +++ b/staging/vhost-device-gpu/LICENSE-BSD-3-Clause @@ -0,0 +1 @@ +../../LICENSE-BSD-3-Clause \ No newline at end of file diff --git a/staging/vhost-device-gpu/README.md b/staging/vhost-device-gpu/README.md new file mode 100644 index 00000000..0357de2c --- /dev/null +++ b/staging/vhost-device-gpu/README.md @@ -0,0 +1,67 @@ +# vhost-device-gpu - GPU emulation backend daemon + +## Synopsis +```shell + vhost-device-gpu --socket-path +``` + +## Description + A virtio-gpu device using the vhost-user protocol. + +## Options + +```text + -s, --socket-path + vhost-user Unix domain socket path + + -h, --help + Print help + + -V, --version + Print version +``` + +## Examples + +First start the daemon on the host machine: + +```shell +host# vhost-device-gpu --socket-path /tmp/gpu.socket +``` + +With QEMU, there are two device frontends you can use with this device. +You can either use `vhost-user-gpu-pci` or `vhost-user-vga`, which also +implements VGA, that allows you to see boot messages before the guest +initializes the GPU. You can also use different display outputs (for example +`gtk` or `dbus`). +By default, QEMU also adds another VGA output, use `-vga none` to make +sure it is disabled. + +1) Using `vhost-user-gpu-pci` Start QEMU with the following flags: + +```text +-chardev socket,id=vgpu,path=/tmp/gpu.socket \ +-device vhost-user-gpu-pci,chardev=vgpu,id=vgpu \ +-object memory-backend-memfd,share=on,id=mem0,size=4G, \ +-machine q35,memory-backend=mem0,accel=kvm \ +-display gtk,gl=on,show-cursor=on \ +-vga none +``` + +2) Using `vhost-user-vga` Start QEMU with the following flags: + +```text +-chardev socket,id=vgpu,path=/tmp/gpu.socket \ +-device vhost-user-vga,chardev=vgpu,id=vgpu \ +-object memory-backend-memfd,share=on,id=mem0,size=4G, \ +-machine q35,memory-backend=mem0,accel=kvm \ +-display gtk,gl=on,show-cursor=on \ +-vga none +``` + +## License + +This project is licensed under either of + +- [Apache License](http://www.apache.org/licenses/LICENSE-2.0), Version 2.0 +- [BSD-3-Clause License](https://opensource.org/licenses/BSD-3-Clause) diff --git a/staging/vhost-device-gpu/src/device.rs b/staging/vhost-device-gpu/src/device.rs new file mode 100644 index 00000000..75773d4f --- /dev/null +++ b/staging/vhost-device-gpu/src/device.rs @@ -0,0 +1,1628 @@ +// vhost device Gpu +// +// Copyright 2024 RedHat +// +// SPDX-License-Identifier: Apache-2.0 or BSD-3-Clause + +use crate::{ + protocol::{ + virtio_gpu_box, virtio_gpu_ctrl_hdr, virtio_gpu_ctx_create, virtio_gpu_ctx_resource, + virtio_gpu_cursor_pos, virtio_gpu_get_capset, virtio_gpu_get_capset_info, + virtio_gpu_get_edid, virtio_gpu_rect, virtio_gpu_resource_attach_backing, + virtio_gpu_resource_create_2d, virtio_gpu_resource_create_3d, + virtio_gpu_resource_detach_backing, virtio_gpu_resource_flush, virtio_gpu_resource_unref, + virtio_gpu_set_scanout, virtio_gpu_transfer_host_3d, virtio_gpu_transfer_to_host_2d, + virtio_gpu_update_cursor, GpuCommand, GpuCommandDecodeError, GpuResponse::ErrUnspec, + GpuResponseEncodeError, VirtioGpuConfig, VirtioGpuResult, CONTROL_QUEUE, CURSOR_QUEUE, + NUM_QUEUES, POLL_EVENT, QUEUE_SIZE, VIRTIO_GPU_FLAG_FENCE, VIRTIO_GPU_FLAG_INFO_RING_IDX, + VIRTIO_GPU_MAX_SCANOUTS, + }, + virtio_gpu::{RutabagaVirtioGpu, VirtioGpu, VirtioGpuRing}, + GpuConfig, GpuMode, +}; +use log::{debug, error, trace, warn}; +use rutabaga_gfx::{ + ResourceCreate3D, RutabagaFence, Transfer3D, RUTABAGA_PIPE_BIND_RENDER_TARGET, + RUTABAGA_PIPE_TEXTURE_2D, +}; +use std::{ + cell::RefCell, + io::ErrorKind, + io::{self, Result as IoResult}, + os::fd::AsRawFd, + sync::{self, Arc, Mutex}, +}; +use thiserror::Error as ThisError; +use vhost::vhost_user::{ + gpu_message::{VhostUserGpuCursorPos, VhostUserGpuEdidRequest}, + message::{VhostUserProtocolFeatures, VhostUserVirtioFeatures}, + GpuBackend, +}; +use vhost_user_backend::{VhostUserBackend, VringEpollHandler, VringRwLock, VringT}; +use virtio_bindings::{ + bindings::{ + virtio_config::{VIRTIO_F_NOTIFY_ON_EMPTY, VIRTIO_F_RING_RESET, VIRTIO_F_VERSION_1}, + virtio_ring::{VIRTIO_RING_F_EVENT_IDX, VIRTIO_RING_F_INDIRECT_DESC}, + }, + virtio_gpu::{ + VIRTIO_GPU_F_CONTEXT_INIT, VIRTIO_GPU_F_EDID, VIRTIO_GPU_F_RESOURCE_BLOB, + VIRTIO_GPU_F_VIRGL, + }, +}; +use virtio_queue::{QueueOwnedT, Reader, Writer}; +use vm_memory::{ByteValued, GuestAddressSpace, GuestMemoryAtomic, GuestMemoryMmap, Le32}; +use vmm_sys_util::{ + epoll::EventSet, + eventfd::{EventFd, EFD_NONBLOCK}, +}; + +type Result = std::result::Result; + +#[derive(Debug, ThisError)] +pub enum Error { + #[error("Failed to handle event, didn't match EPOLLIN")] + HandleEventNotEpollIn, + #[error("Failed to handle unknown event")] + HandleEventUnknown, + #[error("Descriptor read failed")] + DescriptorReadFailed, + #[error("Descriptor write failed")] + DescriptorWriteFailed, + #[error("Invalid command type {0}")] + InvalidCommandType(u32), + #[error("Failed to send used queue notification: {0}")] + NotificationFailed(io::Error), + #[error("Failed to create new EventFd")] + EventFdFailed, + #[error("Failed to create an iterator over a descriptor chain: {0}")] + CreateIteratorDescChain(virtio_queue::Error), + #[error("Failed to create descriptor chain Reader: {0}")] + CreateReader(virtio_queue::Error), + #[error("Failed to create descriptor chain Writer: {0}")] + CreateWriter(virtio_queue::Error), + #[error("Failed to decode gpu command: {0}")] + GpuCommandDecode(GpuCommandDecodeError), + #[error("Failed to encode gpu response: {0}")] + GpuResponseEncode(GpuResponseEncodeError), + #[error("Failed add used chain to queue: {0}")] + QueueAddUsed(virtio_queue::Error), + #[error("Epoll handler not available: {0}")] + EpollHandler(String), + #[error("Failed register epoll listener: {0}")] + RegisterEpollListener(io::Error), +} + +impl From for io::Error { + fn from(e: Error) -> Self { + io::Error::new(io::ErrorKind::Other, e) + } +} + +struct VhostUserGpuBackendInner { + virtio_cfg: VirtioGpuConfig, + event_idx: bool, + gpu_backend: Option, + pub exit_event: EventFd, + mem: Option>, + renderer: GpuMode, +} + +pub struct VhostUserGpuBackend { + inner: Mutex, + // this uses sync::Weak to avoid a reference cycle + epoll_handler: Mutex>>>, + poll_event_fd: Mutex>, +} + +impl VhostUserGpuBackend { + pub fn new(gpu_config: GpuConfig) -> Result> { + log::trace!("VhostUserGpuBackend::new(config = {:?})", &gpu_config); + let inner = VhostUserGpuBackendInner { + virtio_cfg: VirtioGpuConfig { + events_read: 0.into(), + events_clear: 0.into(), + num_scanouts: Le32::from(VIRTIO_GPU_MAX_SCANOUTS as u32), + num_capsets: RutabagaVirtioGpu::MAX_NUMBER_OF_CAPSETS.into(), + }, + event_idx: false, + gpu_backend: None, + exit_event: EventFd::new(EFD_NONBLOCK).map_err(|_| Error::EventFdFailed)?, + mem: None, + renderer: gpu_config.get_renderer(), + }; + + Ok(Arc::new(Self { + inner: Mutex::new(inner), + epoll_handler: Mutex::new(sync::Weak::new()), + poll_event_fd: Mutex::new(None), + })) + } + + pub fn set_epoll_handler(&self, epoll_handlers: &[Arc>>]) { + // We only expect 1 thread to which we want to register all handlers + assert_eq!(epoll_handlers.len(), 1); + let mut handler = match self.epoll_handler.lock() { + Ok(h) => h, + Err(poisoned) => poisoned.into_inner(), + }; + *handler = Arc::downgrade(&epoll_handlers[0]); + } +} + +impl VhostUserGpuBackendInner { + fn process_gpu_command( + &mut self, + virtio_gpu: &mut impl VirtioGpu, + mem: &GuestMemoryMmap, + hdr: virtio_gpu_ctrl_hdr, + cmd: GpuCommand, + ) -> VirtioGpuResult { + virtio_gpu.force_ctx_0(); + debug!("process_gpu_command: {cmd:?}"); + match cmd { + GpuCommand::GetDisplayInfo => virtio_gpu.display_info(), + GpuCommand::GetEdid(virtio_gpu_get_edid { scanout, .. }) => { + let edid_req: VhostUserGpuEdidRequest = VhostUserGpuEdidRequest { + scanout_id: scanout, + }; + virtio_gpu.get_edid(edid_req) + } + GpuCommand::ResourceCreate2d(virtio_gpu_resource_create_2d { + resource_id, + format, + width, + height, + }) => { + let resource_create_3d = ResourceCreate3D { + target: RUTABAGA_PIPE_TEXTURE_2D, + format, + bind: RUTABAGA_PIPE_BIND_RENDER_TARGET, + width, + height, + depth: 1, + array_size: 1, + last_level: 0, + nr_samples: 0, + flags: 0, + }; + + virtio_gpu.resource_create_3d(resource_id, resource_create_3d) + } + GpuCommand::ResourceUnref(virtio_gpu_resource_unref { resource_id, .. }) => { + virtio_gpu.unref_resource(resource_id) + } + GpuCommand::SetScanout(virtio_gpu_set_scanout { + r, + scanout_id, + resource_id, + }) => virtio_gpu.set_scanout(scanout_id, resource_id, r.into()), + GpuCommand::ResourceFlush(virtio_gpu_resource_flush { resource_id, r, .. }) => { + virtio_gpu.flush_resource(resource_id, r.into()) + } + GpuCommand::TransferToHost2d(virtio_gpu_transfer_to_host_2d { + resource_id, + r: + virtio_gpu_rect { + x, + y, + width, + height, + }, + offset, + .. + }) => { + let transfer = Transfer3D::new_2d(x, y, width, height, offset); + virtio_gpu.transfer_write(0, resource_id, transfer) + } + GpuCommand::ResourceAttachBacking( + virtio_gpu_resource_attach_backing { resource_id, .. }, + iovecs, + ) => virtio_gpu.attach_backing(resource_id, mem, iovecs), + GpuCommand::ResourceDetachBacking(virtio_gpu_resource_detach_backing { + resource_id, + .. + }) => virtio_gpu.detach_backing(resource_id), + GpuCommand::UpdateCursor(virtio_gpu_update_cursor { + pos: + virtio_gpu_cursor_pos { + scanout_id, x, y, .. + }, + resource_id, + hot_x, + hot_y, + .. + }) => { + let cursor_pos = VhostUserGpuCursorPos { scanout_id, x, y }; + virtio_gpu.update_cursor(resource_id, cursor_pos, hot_x, hot_y) + } + GpuCommand::MoveCursor(virtio_gpu_update_cursor { + pos: + virtio_gpu_cursor_pos { + scanout_id, x, y, .. + }, + resource_id, + .. + }) => { + let cursor = VhostUserGpuCursorPos { scanout_id, x, y }; + virtio_gpu.move_cursor(resource_id, cursor) + } + GpuCommand::ResourceAssignUuid(_info) => { + panic!("virtio_gpu: GpuCommand::ResourceAssignUuid unimplemented"); + } + GpuCommand::GetCapsetInfo(virtio_gpu_get_capset_info { capset_index, .. }) => { + virtio_gpu.get_capset_info(capset_index) + } + GpuCommand::GetCapset(virtio_gpu_get_capset { + capset_id, + capset_version, + }) => virtio_gpu.get_capset(capset_id, capset_version), + + GpuCommand::CtxCreate(virtio_gpu_ctx_create { + context_init, + debug_name, + .. + }) => { + let context_name: Option = String::from_utf8(debug_name.to_vec()).ok(); + virtio_gpu.create_context(hdr.ctx_id, context_init, context_name.as_deref()) + } + GpuCommand::CtxDestroy(_info) => virtio_gpu.destroy_context(hdr.ctx_id), + GpuCommand::CtxAttachResource(virtio_gpu_ctx_resource { resource_id, .. }) => { + virtio_gpu.context_attach_resource(hdr.ctx_id, resource_id) + } + GpuCommand::CtxDetachResource(virtio_gpu_ctx_resource { resource_id, .. }) => { + virtio_gpu.context_detach_resource(hdr.ctx_id, resource_id) + } + GpuCommand::ResourceCreate3d(virtio_gpu_resource_create_3d { + resource_id, + target, + format, + bind, + width, + height, + depth, + array_size, + last_level, + nr_samples, + flags, + .. + }) => { + let resource_create_3d = ResourceCreate3D { + target, + format, + bind, + width, + height, + depth, + array_size, + last_level, + nr_samples, + flags, + }; + + virtio_gpu.resource_create_3d(resource_id, resource_create_3d) + } + GpuCommand::TransferToHost3d(virtio_gpu_transfer_host_3d { + box_: virtio_gpu_box { x, y, z, w, h, d }, + offset, + resource_id, + level, + stride, + layer_stride, + }) => { + let ctx_id = hdr.ctx_id; + + let transfer = Transfer3D { + x, + y, + z, + w, + h, + d, + level, + stride, + layer_stride, + offset, + }; + + virtio_gpu.transfer_write(ctx_id, resource_id, transfer) + } + GpuCommand::TransferFromHost3d(virtio_gpu_transfer_host_3d { + box_: virtio_gpu_box { x, y, z, w, h, d }, + offset, + resource_id, + level, + stride, + layer_stride, + }) => { + let ctx_id = hdr.ctx_id; + + let transfer = Transfer3D { + x, + y, + z, + w, + h, + d, + level, + stride, + layer_stride, + offset, + }; + + virtio_gpu.transfer_read(ctx_id, resource_id, transfer, None) + } + GpuCommand::CmdSubmit3d { + fence_ids, + mut cmd_data, + } => virtio_gpu.submit_command(hdr.ctx_id, &mut cmd_data, &fence_ids), + GpuCommand::ResourceCreateBlob(_info) => { + panic!("virtio_gpu: GpuCommand::ResourceCreateBlob unimplemented"); + } + GpuCommand::SetScanoutBlob(_info) => { + panic!("virtio_gpu: GpuCommand::SetScanoutBlob unimplemented"); + } + GpuCommand::ResourceMapBlob(_info) => { + panic!("virtio_gpu: GpuCommand::ResourceMapBlob unimplemented"); + } + GpuCommand::ResourceUnmapBlob(_info) => { + panic!("virtio_gpu: GpuCommand::ResourceUnmapBlob unimplemented"); + } + } + } + + fn process_queue_chain( + &mut self, + virtio_gpu: &mut impl VirtioGpu, + vring: &VringRwLock, + head_index: u16, + reader: &mut Reader, + writer: &mut Writer, + signal_used_queue: &mut bool, + ) -> Result<()> { + let mut response = ErrUnspec; + let mem = self.mem.as_ref().unwrap().memory().into_inner(); + + let ctrl_hdr = match GpuCommand::decode(reader) { + Ok((ctrl_hdr, gpu_cmd)) => { + // TODO: consider having a method that return &'static str for logging purpose + let cmd_name = format!("{:?}", gpu_cmd); + let response_result = self.process_gpu_command(virtio_gpu, &mem, ctrl_hdr, gpu_cmd); + // Unwrap the response from inside Result and log information + response = match response_result { + Ok(response) => response, + Err(response) => { + debug!("GpuCommand {cmd_name} failed: {response:?}"); + response + } + }; + Some(ctrl_hdr) + } + Err(e) => { + warn!("Failed to decode GpuCommand: {e}"); + None + } + }; + + if writer.available_bytes() == 0 { + debug!("Command does not have descriptors for a response"); + vring.add_used(head_index, 0).map_err(Error::QueueAddUsed)?; + *signal_used_queue = true; + return Ok(()); + } + + let mut fence_id = 0; + let mut ctx_id = 0; + let mut flags = 0; + let mut ring_idx = 0; + + if let Some(ctrl_hdr) = ctrl_hdr { + if ctrl_hdr.flags & VIRTIO_GPU_FLAG_FENCE != 0 { + flags = ctrl_hdr.flags; + fence_id = ctrl_hdr.fence_id; + ctx_id = ctrl_hdr.ctx_id; + ring_idx = ctrl_hdr.ring_idx; + + let fence = RutabagaFence { + flags, + fence_id, + ctx_id, + ring_idx, + }; + if let Err(fence_response) = virtio_gpu.create_fence(fence) { + warn!("Failed to create fence: fence_id: {fence_id} fence_response: {fence_response}"); + response = fence_response; + } + } + } + + // Prepare the response now, even if it is going to wait until + // fence is complete. + let response_len = response + .encode(flags, fence_id, ctx_id, ring_idx, writer) + .map_err(Error::GpuResponseEncode)?; + + let mut add_to_queue = true; + if flags & VIRTIO_GPU_FLAG_FENCE != 0 { + let ring = match flags & VIRTIO_GPU_FLAG_INFO_RING_IDX { + 0 => VirtioGpuRing::Global, + _ => VirtioGpuRing::ContextSpecific { ctx_id, ring_idx }, + }; + debug!("Trying to process_fence for the command"); + add_to_queue = virtio_gpu.process_fence(ring, fence_id, head_index, response_len); + } + + if add_to_queue { + vring + .add_used(head_index, response_len) + .map_err(Error::QueueAddUsed)?; + trace!("add_used {}bytes", response_len); + *signal_used_queue = true; + } + Ok(()) + } + + /// Process the requests in the vring and dispatch replies + fn process_queue( + &mut self, + virtio_gpu: &mut impl VirtioGpu, + vring: &VringRwLock, + ) -> Result<()> { + let mem = self.mem.as_ref().unwrap().memory().into_inner(); + let desc_chains: Vec<_> = vring + .get_mut() + .get_queue_mut() + .iter(mem.clone()) + .map_err(Error::CreateIteratorDescChain)? + .collect(); + + let mut signal_used_queue = false; + for desc_chain in desc_chains { + let head_index = desc_chain.head_index(); + let mut reader = desc_chain + .clone() + .reader(&mem) + .map_err(Error::CreateReader)?; + let mut writer = desc_chain.writer(&mem).map_err(Error::CreateWriter)?; + + self.process_queue_chain( + virtio_gpu, + vring, + head_index, + &mut reader, + &mut writer, + &mut signal_used_queue, + )?; + } + + if signal_used_queue { + debug!("Notifying used queue"); + vring + .signal_used_queue() + .map_err(Error::NotificationFailed)?; + } + debug!("Processing control queue finished"); + + Ok(()) + } + + fn handle_event( + &mut self, + device_event: u16, + virtio_gpu: &mut impl VirtioGpu, + vrings: &[VringRwLock], + ) -> IoResult<()> { + match device_event { + CONTROL_QUEUE | CURSOR_QUEUE => { + let vring = &vrings + .get(device_event as usize) + .ok_or_else(|| Error::HandleEventUnknown)?; + if self.event_idx { + // vm-virtio's Queue implementation only checks avail_index + // once, so to properly support EVENT_IDX we need to keep + // calling process_queue() until it stops finding new + // requests on the queue. + loop { + vring.disable_notification().unwrap(); + self.process_queue(virtio_gpu, vring)?; + if !vring.enable_notification().unwrap() { + break; + } + } + } else { + // Without EVENT_IDX, a single call is enough. + self.process_queue(virtio_gpu, vring)?; + } + } + POLL_EVENT => { + trace!("Handling POLL_EVENT"); + virtio_gpu.event_poll() + } + _ => { + warn!("unhandled device_event: {}", device_event); + return Err(Error::HandleEventUnknown.into()); + } + } + + Ok(()) + } + + fn lazy_init_and_handle_event( + &mut self, + device_event: u16, + evset: EventSet, + vrings: &[VringRwLock], + _thread_id: usize, + ) -> IoResult> { + // We use thread_local here because it is the easiest way to handle VirtioGpu being !Send + thread_local! { + static VIRTIO_GPU_REF: RefCell> = const { RefCell::new(None) }; + } + + debug!("Handle event called"); + if evset != EventSet::IN { + return Err(Error::HandleEventNotEpollIn.into()); + }; + + let mut event_poll_fd = None; + VIRTIO_GPU_REF.with_borrow_mut(|maybe_virtio_gpu| { + let virtio_gpu = match maybe_virtio_gpu { + Some(virtio_gpu) => virtio_gpu, + None => { + let gpu_backend = self.gpu_backend.take().ok_or_else(|| { + io::Error::new( + ErrorKind::Other, + "set_gpu_socket() not called, GpuBackend missing", + ) + })?; + + // We currently pass the CONTROL_QUEUE vring to RutabagaVirtioGpu, because we only + // expect to process fences for that queue. + let control_vring = &vrings[CONTROL_QUEUE as usize]; + + // VirtioGpu::new can be called once per process (otherwise it panics), + // so if somehow another thread accidentally wants to create another gpu here, + // it will panic anyway + let virtio_gpu = + RutabagaVirtioGpu::new(control_vring, self.renderer, gpu_backend); + event_poll_fd = virtio_gpu.get_event_poll_fd(); + + maybe_virtio_gpu.insert(virtio_gpu) + } + }; + + self.handle_event(device_event, virtio_gpu, vrings) + })?; + + Ok(event_poll_fd) + } + + fn get_config(&self, offset: u32, size: u32) -> Vec { + let offset = offset as usize; + let size = size as usize; + + let buf = self.virtio_cfg.as_slice(); + + if offset + size > buf.len() { + return Vec::new(); + } + + buf[offset..offset + size].to_vec() + } +} + +/// VhostUserBackend trait methods +impl VhostUserBackend for VhostUserGpuBackend { + type Vring = VringRwLock; + type Bitmap = (); + + fn num_queues(&self) -> usize { + debug!("Num queues called"); + NUM_QUEUES + } + + fn max_queue_size(&self) -> usize { + debug!("Max queues called"); + QUEUE_SIZE + } + + fn features(&self) -> u64 { + 1 << VIRTIO_F_VERSION_1 + | 1 << VIRTIO_F_RING_RESET + | 1 << VIRTIO_F_NOTIFY_ON_EMPTY + | 1 << VIRTIO_RING_F_INDIRECT_DESC + | 1 << VIRTIO_RING_F_EVENT_IDX + | 1 << VIRTIO_GPU_F_VIRGL + | 1 << VIRTIO_GPU_F_EDID + | 1 << VIRTIO_GPU_F_RESOURCE_BLOB + | 1 << VIRTIO_GPU_F_CONTEXT_INIT + | VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits() + } + + fn protocol_features(&self) -> VhostUserProtocolFeatures { + debug!("Protocol features called"); + VhostUserProtocolFeatures::CONFIG | VhostUserProtocolFeatures::MQ + } + + fn set_event_idx(&self, enabled: bool) { + self.inner.lock().unwrap().event_idx = enabled; + debug!("Event idx set to: {}", enabled); + } + + fn update_memory(&self, mem: GuestMemoryAtomic) -> IoResult<()> { + debug!("Update memory called"); + self.inner.lock().unwrap().mem = Some(mem); + Ok(()) + } + + fn set_gpu_socket(&self, backend: GpuBackend) { + self.inner.lock().unwrap().gpu_backend = Some(backend); + } + + fn get_config(&self, offset: u32, size: u32) -> Vec { + self.inner.lock().unwrap().get_config(offset, size) + } + + fn exit_event(&self, _thread_index: usize) -> Option { + self.inner.lock().unwrap().exit_event.try_clone().ok() + } + + fn handle_event( + &self, + device_event: u16, + evset: EventSet, + vrings: &[Self::Vring], + thread_id: usize, + ) -> IoResult<()> { + let poll_event_fd = self.inner.lock().unwrap().lazy_init_and_handle_event( + device_event, + evset, + vrings, + thread_id, + )?; + + if let Some(poll_event_fd) = poll_event_fd { + let epoll_handler = match self.epoll_handler.lock() { + Ok(h) => h, + Err(poisoned) => poisoned.into_inner(), + }; + let epoll_handler = match epoll_handler.upgrade() { + Some(handler) => handler, + None => { + return Err( + Error::EpollHandler("Failed to upgrade epoll handler".to_string()).into(), + ); + } + }; + epoll_handler + .register_listener(poll_event_fd.as_raw_fd(), EventSet::IN, POLL_EVENT as u64) + .map_err(Error::RegisterEpollListener)?; + debug!("Registered POLL_EVENT on fd: {}", poll_event_fd.as_raw_fd()); + // store the fd, so it is not closed after exiting this scope + self.poll_event_fd.lock().unwrap().replace(poll_event_fd); + } + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + use crate::protocol::{ + virtio_gpu_mem_entry, + GpuResponse::{OkCapsetInfo, OkDisplayInfo, OkEdid, OkNoData}, + VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING, VIRTIO_GPU_CMD_RESOURCE_CREATE_2D, + VIRTIO_GPU_CMD_RESOURCE_FLUSH, VIRTIO_GPU_CMD_SET_SCANOUT, + VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D, VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D, + VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D, VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM, + VIRTIO_GPU_RESP_ERR_UNSPEC, VIRTIO_GPU_RESP_OK_NODATA, + }; + use crate::virtio_gpu::MockVirtioGpu; + use assert_matches::assert_matches; + use mockall::predicate; + use rusty_fork::rusty_fork_test; + use std::{ + fs::File, + io::{ErrorKind, Read}, + iter::zip, + mem, + os::{fd::FromRawFd, unix::net::UnixStream}, + sync::Arc, + thread, + time::Duration, + }; + use vhost::vhost_user::gpu_message::{VhostUserGpuScanout, VhostUserGpuUpdate}; + use vhost_user_backend::{VhostUserDaemon, VringRwLock, VringT}; + use virtio_bindings::virtio_ring::{VRING_DESC_F_NEXT, VRING_DESC_F_WRITE}; + use virtio_queue::{mock::MockSplitQueue, Descriptor, Queue, QueueT}; + use vm_memory::{ + ByteValued, Bytes, GuestAddress, GuestMemory, GuestMemoryAtomic, GuestMemoryMmap, + }; + + const SOCKET_PATH: &str = "vgpu.socket"; + const MEM_SIZE: usize = 2 * 1024 * 1024; // 2MiB + + const CURSOR_QUEUE_ADDR: GuestAddress = GuestAddress(0x0); + const CURSOR_QUEUE_DATA_ADDR: GuestAddress = GuestAddress(0x1_000); + const CURSOR_QUEUE_SIZE: u16 = 16; + const CONTROL_QUEUE_ADDR: GuestAddress = GuestAddress(0x2_000); + const CONTROL_QUEUE_DATA_ADDR: GuestAddress = GuestAddress(0x10_000); + const CONTROL_QUEUE_SIZE: u16 = 1024; + + fn init() -> (Arc, GuestMemoryAtomic) { + let backend = VhostUserGpuBackend::new(GpuConfig::new( + SOCKET_PATH.into(), + GpuMode::ModeVirglRenderer, + )) + .unwrap(); + let mem = GuestMemoryAtomic::new( + GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), MEM_SIZE)]).unwrap(), + ); + + backend.update_memory(mem.clone()).unwrap(); + (backend, mem) + } + + /// Arguments to create a descriptor chain for testing + struct TestingDescChainArgs<'a> { + readable_desc_bufs: &'a [&'a [u8]], + writable_desc_lengths: &'a [u32], + } + + fn gpu_backend_pair() -> (UnixStream, GpuBackend) { + let (frontend, backend) = UnixStream::pair().unwrap(); + let backend = GpuBackend::from_stream(backend); + + (frontend, backend) + } + + fn event_fd_into_file(event_fd: EventFd) -> File { + // SAFETY: We ensure that the `event_fd` is properly handled such that its file descriptor + // is not closed after `File` takes ownership of it. + unsafe { + let event_fd_raw = event_fd.as_raw_fd(); + mem::forget(event_fd); + File::from_raw_fd(event_fd_raw) + } + } + + #[test] + fn test_process_gpu_command() { + let (backend, mem) = init(); + let mut backend_inner = backend.inner.lock().unwrap(); + let hdr = virtio_gpu_ctrl_hdr::default(); + + let mut test_cmd = |cmd: GpuCommand, setup: fn(&mut MockVirtioGpu)| { + let mut mock_gpu = MockVirtioGpu::new(); + mock_gpu.expect_force_ctx_0().return_once(|| ()); + setup(&mut mock_gpu); + backend_inner.process_gpu_command(&mut mock_gpu, &mem.memory(), hdr, cmd) + }; + + let cmd = GpuCommand::GetDisplayInfo; + let result = test_cmd(cmd, |g| { + g.expect_display_info() + .return_once(|| Ok(OkDisplayInfo(vec![(1280, 720, true)]))); + }); + assert_matches!(result, Ok(OkDisplayInfo(_))); + + let cmd = GpuCommand::GetEdid(Default::default()); + let result = test_cmd(cmd, |g| { + g.expect_get_edid().return_once(|_| { + Ok(OkEdid { + blob: Box::new([0xff; 512]), + }) + }); + }); + assert_matches!(result, Ok(OkEdid { .. })); + + let cmd = GpuCommand::ResourceCreate2d(Default::default()); + let result = test_cmd(cmd, |g| { + g.expect_resource_create_3d() + .return_once(|_, _| Ok(OkNoData)); + }); + assert_matches!(result, Ok(OkNoData)); + + let cmd = GpuCommand::ResourceUnref(Default::default()); + let result = test_cmd(cmd, |g| { + g.expect_unref_resource().return_once(|_| Ok(OkNoData)); + }); + assert_matches!(result, Ok(OkNoData)); + + let cmd = GpuCommand::SetScanout(Default::default()); + let result = test_cmd(cmd, |g| { + g.expect_set_scanout().return_once(|_, _, _| Ok(OkNoData)); + }); + assert_matches!(result, Ok(OkNoData)); + + let cmd = GpuCommand::ResourceFlush(Default::default()); + let result = test_cmd(cmd, |g| { + g.expect_flush_resource().return_once(|_, _| Ok(OkNoData)); + }); + assert_matches!(result, Ok(OkNoData)); + + let cmd = GpuCommand::TransferToHost2d(Default::default()); + let result = test_cmd(cmd, |g| { + g.expect_transfer_write() + .return_once(|_, _, _| Ok(OkNoData)); + }); + assert_matches!(result, Ok(OkNoData)); + + let cmd = GpuCommand::ResourceAttachBacking(Default::default(), Default::default()); + let result = test_cmd(cmd, |g| { + g.expect_attach_backing() + .return_once(|_, _, _| Ok(OkNoData)); + }); + assert_matches!(result, Ok(OkNoData)); + + let cmd = GpuCommand::ResourceDetachBacking(Default::default()); + let result = test_cmd(cmd, |g| { + g.expect_detach_backing().return_once(|_| Ok(OkNoData)); + }); + assert_matches!(result, Ok(OkNoData)); + + let cmd = GpuCommand::GetCapsetInfo(Default::default()); + let result = test_cmd(cmd, |g| { + g.expect_get_capset_info().return_once(|_| { + Ok(OkCapsetInfo { + capset_id: 1, + version: 2, + size: 32, + }) + }); + }); + assert_matches!( + result, + Ok(OkCapsetInfo { + capset_id: 1, + version: 2, + size: 32 + }) + ); + + let cmd = GpuCommand::CtxCreate(Default::default()); + let result = test_cmd(cmd, |g| { + g.expect_create_context() + .return_once(|_, _, _| Ok(OkNoData)); + }); + assert_matches!(result, Ok(OkNoData)); + + let cmd = GpuCommand::CtxDestroy(Default::default()); + let result = test_cmd(cmd, |g| { + g.expect_destroy_context().return_once(|_| Ok(OkNoData)); + }); + assert_matches!(result, Ok(OkNoData)); + + let cmd = GpuCommand::CtxAttachResource(Default::default()); + let result = test_cmd(cmd, |g| { + g.expect_context_attach_resource() + .return_once(|_, _| Ok(OkNoData)); + }); + assert_matches!(result, Ok(OkNoData)); + + let cmd = GpuCommand::CtxDetachResource(Default::default()); + let result = test_cmd(cmd, |g| { + g.expect_context_detach_resource() + .return_once(|_, _| Ok(OkNoData)); + }); + assert_matches!(result, Ok(OkNoData)); + + let cmd = GpuCommand::ResourceCreate3d(Default::default()); + let result = test_cmd(cmd, |g| { + g.expect_resource_create_3d() + .return_once(|_, _| Ok(OkNoData)); + }); + assert_matches!(result, Ok(OkNoData)); + + let cmd = GpuCommand::TransferToHost3d(Default::default()); + let result = test_cmd(cmd, |g| { + g.expect_transfer_write() + .return_once(|_, _, _| Ok(OkNoData)); + }); + assert_matches!(result, Ok(OkNoData)); + + let cmd = GpuCommand::TransferFromHost3d(Default::default()); + let result = test_cmd(cmd, |g| { + g.expect_transfer_read() + .return_once(|_, _, _, _| Ok(OkNoData)); + }); + assert_matches!(result, Ok(OkNoData)); + + let cmd = GpuCommand::CmdSubmit3d { + cmd_data: vec![0xff; 512], + fence_ids: vec![], + }; + let result = test_cmd(cmd, |g| { + g.expect_submit_command() + .return_once(|_, _, _| Ok(OkNoData)); + }); + assert_matches!(result, Ok(OkNoData)); + + let cmd = GpuCommand::UpdateCursor(Default::default()); + let result = test_cmd(cmd, |g| { + g.expect_update_cursor() + .return_once(|_, _, _, _| Ok(OkNoData)); + }); + assert_matches!(result, Ok(OkNoData)); + + let cmd = GpuCommand::MoveCursor(Default::default()); + let result = test_cmd(cmd, |g| { + g.expect_move_cursor().return_once(|_, _| Ok(OkNoData)); + }); + assert_matches!(result, Ok(OkNoData)); + + let cmd = GpuCommand::MoveCursor(Default::default()); + let result = test_cmd(cmd, |g| { + g.expect_move_cursor().return_once(|_, _| Ok(OkNoData)); + }); + assert_matches!(result, Ok(OkNoData)); + } + + fn make_descriptors_into_a_chain(start_idx: u16, descriptors: &mut [Descriptor]) { + let last_idx = start_idx + descriptors.len() as u16 - 1; + for (idx, desc) in zip(start_idx.., descriptors.iter_mut()) { + if idx == last_idx { + desc.set_flags(desc.flags() & !VRING_DESC_F_NEXT as u16); + } else { + desc.set_flags(desc.flags() | VRING_DESC_F_NEXT as u16); + desc.set_next(idx + 1); + }; + } + } + + // Creates a vring from the specified descriptor chains + // For each created device-writable descriptor chain a Vec<(GuestAddress, usize)> is returned + // representing the descriptors of that chain. + fn create_vring( + mem: &GuestMemoryAtomic, + chains: &[TestingDescChainArgs], + queue_addr_start: GuestAddress, + data_addr_start: GuestAddress, + queue_size: u16, + ) -> (VringRwLock, Vec>, EventFd) { + let mem_handle = mem.memory(); + mem.memory() + .check_address(queue_addr_start) + .expect("Invalid start adress"); + + let mut output_bufs = Vec::new(); + let vq = MockSplitQueue::create(&*mem_handle, queue_addr_start, queue_size); + // Address of the buffer associated with the descriptor + let mut next_addr = data_addr_start.0; + let mut chain_index_start = 0; + let mut descriptors = Vec::new(); + + for chain in chains { + for buf in chain.readable_desc_bufs { + mem.memory() + .check_address(GuestAddress(next_addr)) + .expect("Readable descriptor's buffer address is not valid!"); + let desc = Descriptor::new( + next_addr, + buf.len() + .try_into() + .expect("Buffer too large to fit into descriptor"), + 0, + 0, + ); + mem_handle.write(buf, desc.addr()).unwrap(); + descriptors.push(desc); + next_addr += buf.len() as u64; + } + let mut writable_descriptor_adresses = Vec::new(); + for desc_len in chain.writable_desc_lengths.iter().copied() { + mem.memory() + .check_address(GuestAddress(next_addr)) + .expect("Writable descriptor's buffer address is not valid!"); + let desc = Descriptor::new(next_addr, desc_len, VRING_DESC_F_WRITE as u16, 0); + writable_descriptor_adresses.push(desc.addr()); + descriptors.push(desc); + next_addr += desc_len as u64; + } + output_bufs.push(writable_descriptor_adresses); + make_descriptors_into_a_chain( + chain_index_start as u16, + &mut descriptors[chain_index_start..], + ); + chain_index_start = descriptors.len(); + } + + assert!(descriptors.len() < queue_size as usize); + if !descriptors.is_empty() { + vq.build_multiple_desc_chains(&descriptors) + .expect("Failed to build descriptor chain"); + } + + let queue: Queue = vq.create_queue().unwrap(); + let vring = VringRwLock::new(mem.clone(), queue_size).unwrap(); + let signal_used_queue_evt = EventFd::new(EFD_NONBLOCK).unwrap(); + let signal_used_queue_evt_clone = signal_used_queue_evt.try_clone().unwrap(); + vring + .set_queue_info(queue.desc_table(), queue.avail_ring(), queue.used_ring()) + .unwrap(); + vring.set_call(Some(event_fd_into_file(signal_used_queue_evt_clone))); + + vring.set_enabled(true); + vring.set_queue_ready(true); + + (vring, output_bufs, signal_used_queue_evt) + } + + fn create_control_vring( + mem: &GuestMemoryAtomic, + chains: &[TestingDescChainArgs], + ) -> (VringRwLock, Vec>, EventFd) { + create_vring( + mem, + chains, + CONTROL_QUEUE_ADDR, + CONTROL_QUEUE_DATA_ADDR, + CONTROL_QUEUE_SIZE, + ) + } + + fn create_cursor_vring( + mem: &GuestMemoryAtomic, + chains: &[TestingDescChainArgs], + ) -> (VringRwLock, Vec>, EventFd) { + create_vring( + mem, + chains, + CURSOR_QUEUE_ADDR, + CURSOR_QUEUE_DATA_ADDR, + CURSOR_QUEUE_SIZE, + ) + } + + #[test] + fn test_handle_event_executes_gpu_commands() { + let (backend, mem) = init(); + backend.update_memory(mem.clone()).unwrap(); + let mut backend_inner = backend.inner.lock().unwrap(); + + let hdr = virtio_gpu_ctrl_hdr { + type_: VIRTIO_GPU_CMD_RESOURCE_CREATE_2D, + ..Default::default() + }; + + let cmd = virtio_gpu_resource_create_2d { + resource_id: 1, + format: VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM, + width: 1920, + height: 1080, + }; + + let chain1 = TestingDescChainArgs { + readable_desc_bufs: &[hdr.as_slice(), cmd.as_slice()], + writable_desc_lengths: &[mem::size_of::() as u32], + }; + + let chain2 = TestingDescChainArgs { + readable_desc_bufs: &[hdr.as_slice(), cmd.as_slice()], + writable_desc_lengths: &[mem::size_of::() as u32], + }; + + let (control_vring, outputs, control_signal_used_queue_evt) = + create_control_vring(&mem, &[chain1, chain2]); + let (cursor_vring, _, cursor_signal_used_queue_evt) = create_cursor_vring(&mem, &[]); + + let mem = mem.memory().into_inner(); + + let mut mock_gpu = MockVirtioGpu::new(); + let seq = &mut mockall::Sequence::new(); + + mock_gpu + .expect_force_ctx_0() + .return_const(()) + .once() + .in_sequence(seq); + + mock_gpu + .expect_resource_create_3d() + .with(predicate::eq(1), predicate::always()) + .returning(|_, _| Ok(OkNoData)) + .once() + .in_sequence(seq); + + mock_gpu + .expect_force_ctx_0() + .return_const(()) + .once() + .in_sequence(seq); + + mock_gpu + .expect_resource_create_3d() + .with(predicate::eq(1), predicate::always()) + .returning(|_, _| Err(ErrUnspec)) + .once() + .in_sequence(seq); + + assert_eq!( + cursor_signal_used_queue_evt.read().unwrap_err().kind(), + ErrorKind::WouldBlock + ); + + backend_inner + .handle_event(0, &mut mock_gpu, &[control_vring.clone(), cursor_vring]) + .unwrap(); + + let expected_hdr1 = virtio_gpu_ctrl_hdr { + type_: VIRTIO_GPU_RESP_OK_NODATA, + ..Default::default() + }; + + let expected_hdr2 = virtio_gpu_ctrl_hdr { + type_: VIRTIO_GPU_RESP_ERR_UNSPEC, + ..Default::default() + }; + control_signal_used_queue_evt + .read() + .expect("Expected device to signal used queue!"); + assert_eq!( + cursor_signal_used_queue_evt.read().unwrap_err().kind(), + ErrorKind::WouldBlock, + "Unexpected signal_used_queue on cursor queue!" + ); + + let result_hdr1: virtio_gpu_ctrl_hdr = mem.memory().read_obj(outputs[0][0]).unwrap(); + assert_eq!(result_hdr1, expected_hdr1); + + let result_hdr2: virtio_gpu_ctrl_hdr = mem.memory().read_obj(outputs[1][0]).unwrap(); + assert_eq!(result_hdr2, expected_hdr2); + } + + #[test] + fn test_command_with_fence_ready_immediately() { + let (backend, mem) = init(); + backend.update_memory(mem.clone()).unwrap(); + let mut backend_inner = backend.inner.lock().unwrap(); + + const FENCE_ID: u64 = 123; + + let hdr = virtio_gpu_ctrl_hdr { + type_: VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D, + flags: VIRTIO_GPU_FLAG_FENCE, + fence_id: FENCE_ID, + ctx_id: 0, + ring_idx: 0, + padding: Default::default(), + }; + + let cmd = virtio_gpu_transfer_host_3d::default(); + + let chain = TestingDescChainArgs { + readable_desc_bufs: &[hdr.as_slice(), cmd.as_slice()], + writable_desc_lengths: &[mem::size_of::() as u32], + }; + + let (control_vring, outputs, control_signal_used_queue_evt) = + create_control_vring(&mem, &[chain]); + let (cursor_vring, _, _) = create_cursor_vring(&mem, &[]); + + let mut mock_gpu = MockVirtioGpu::new(); + let seq = &mut mockall::Sequence::new(); + + mock_gpu + .expect_force_ctx_0() + .return_const(()) + .once() + .in_sequence(seq); + + mock_gpu + .expect_transfer_write() + .returning(|_, _, _| Ok(OkNoData)) + .once() + .in_sequence(seq); + + mock_gpu + .expect_create_fence() + .withf(|fence| fence.fence_id == FENCE_ID) + .returning(|_| Ok(OkNoData)) + .once() + .in_sequence(seq); + + mock_gpu + .expect_process_fence() + .with( + predicate::eq(VirtioGpuRing::Global), + predicate::eq(FENCE_ID), + predicate::eq(0), + predicate::eq(mem::size_of_val(&hdr) as u32), + ) + .return_const(true) + .once() + .in_sequence(seq); + + backend_inner + .handle_event(0, &mut mock_gpu, &[control_vring.clone(), cursor_vring]) + .unwrap(); + + let expected_hdr = virtio_gpu_ctrl_hdr { + type_: VIRTIO_GPU_RESP_OK_NODATA, + flags: VIRTIO_GPU_FLAG_FENCE, + fence_id: FENCE_ID, + ctx_id: 0, + ring_idx: 0, + padding: Default::default(), + }; + + control_signal_used_queue_evt + .read() + .expect("Expected device to call signal_used_queue!"); + + let result_hdr1: virtio_gpu_ctrl_hdr = mem.memory().read_obj(outputs[0][0]).unwrap(); + assert_eq!(result_hdr1, expected_hdr); + } + + #[test] + fn test_command_with_fence_not_ready() { + let (backend, mem) = init(); + backend.update_memory(mem.clone()).unwrap(); + let mut backend_inner = backend.inner.lock().unwrap(); + + const FENCE_ID: u64 = 123; + const CTX_ID: u32 = 1; + const RING_IDX: u8 = 2; + + let hdr = virtio_gpu_ctrl_hdr { + type_: VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D, + flags: VIRTIO_GPU_FLAG_FENCE | VIRTIO_GPU_FLAG_INFO_RING_IDX, + fence_id: FENCE_ID, + ctx_id: CTX_ID, + ring_idx: RING_IDX, + padding: Default::default(), + }; + + let cmd = virtio_gpu_transfer_host_3d::default(); + + let chain = TestingDescChainArgs { + readable_desc_bufs: &[hdr.as_slice(), cmd.as_slice()], + writable_desc_lengths: &[mem::size_of::() as u32], + }; + + let (control_vring, _, control_signal_used_queue_evt) = + create_control_vring(&mem, &[chain]); + let (cursor_vring, _, _) = create_cursor_vring(&mem, &[]); + + let mut mock_gpu = MockVirtioGpu::new(); + let seq = &mut mockall::Sequence::new(); + + mock_gpu + .expect_force_ctx_0() + .return_const(()) + .once() + .in_sequence(seq); + + mock_gpu + .expect_transfer_read() + .returning(|_, _, _, _| Ok(OkNoData)) + .once() + .in_sequence(seq); + + mock_gpu + .expect_create_fence() + .withf(|fence| fence.fence_id == FENCE_ID) + .returning(|_| Ok(OkNoData)) + .once() + .in_sequence(seq); + + mock_gpu + .expect_process_fence() + .with( + predicate::eq(VirtioGpuRing::ContextSpecific { + ctx_id: CTX_ID, + ring_idx: RING_IDX, + }), + predicate::eq(FENCE_ID), + predicate::eq(0), + predicate::eq(mem::size_of_val(&hdr) as u32), + ) + .return_const(false) + .once() + .in_sequence(seq); + + backend_inner + .handle_event(0, &mut mock_gpu, &[control_vring.clone(), cursor_vring]) + .unwrap(); + + assert_eq!( + control_signal_used_queue_evt.read().unwrap_err().kind(), + ErrorKind::WouldBlock + ); + } + + rusty_fork_test! { + #[test] + fn test_verify_backend() { + let gpu_config = GpuConfig::new(SOCKET_PATH.into(), GpuMode::ModeVirglRenderer); + let backend = VhostUserGpuBackend::new(gpu_config).unwrap(); + + assert_eq!(backend.num_queues(), NUM_QUEUES); + assert_eq!(backend.max_queue_size(), QUEUE_SIZE); + assert_eq!(backend.features(), 0x1017100001B); + assert_eq!( + backend.protocol_features(), + VhostUserProtocolFeatures::CONFIG | VhostUserProtocolFeatures::MQ + ); + assert_eq!(backend.queues_per_thread(), vec![0xffff_ffff]); + assert_eq!(backend.get_config(0, 0), vec![]); + backend.set_gpu_socket(gpu_backend_pair().1); + + backend.set_event_idx(true); + assert!(backend.inner.lock().unwrap().event_idx); + + assert!(backend.exit_event(0).is_some()); + + let mem = GuestMemoryAtomic::new( + GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x1000)]).unwrap(), + ); + backend.update_memory(mem.clone()).unwrap(); + + let vring = VringRwLock::new(mem, 0x1000).unwrap(); + vring.set_queue_info(0x100, 0x200, 0x300).unwrap(); + vring.set_queue_ready(true); + + assert_eq!( + backend + .handle_event(0, EventSet::OUT, &[vring.clone()], 0) + .unwrap_err() + .kind(), + io::ErrorKind::Other + ); + + assert_eq!( + backend + .handle_event(1, EventSet::IN, &[vring.clone()], 0) + .unwrap_err() + .kind(), + io::ErrorKind::Other + ); + + // Hit the loop part + backend.set_event_idx(true); + backend + .handle_event(0, EventSet::IN, &[vring.clone()], 0) + .unwrap(); + + // Hit the non-loop part + backend.set_event_idx(false); + backend.handle_event(0, EventSet::IN, &[vring], 0).unwrap(); + } + } + + mod test_image { + use super::*; + const GREEN_PIXEL: u32 = 0x00FF00FF; + const RED_PIXEL: u32 = 0xFF0000FF; + const BYTES_PER_PIXEL: usize = 4; + + pub fn write(mem: &GuestMemoryMmap, image_addr: GuestAddress, width: u32, height: u32) { + let mut image_addr: u64 = image_addr.0; + for i in 0..width * height { + let pixel = if i % 2 == 0 { RED_PIXEL } else { GREEN_PIXEL }; + let pixel = pixel.to_be_bytes(); + + mem.memory() + .write_slice(&pixel, GuestAddress(image_addr)) + .unwrap(); + image_addr += BYTES_PER_PIXEL as u64; + } + } + + pub fn assert(data: &[u8], width: u32, height: u32) { + assert_eq!(data.len(), (width * height) as usize * BYTES_PER_PIXEL); + for (i, pixel) in data.chunks(BYTES_PER_PIXEL).enumerate() { + let expected_pixel = if i % 2 == 0 { RED_PIXEL } else { GREEN_PIXEL }; + assert_eq!( + pixel, + expected_pixel.to_be_bytes(), + "Wrong pixel at index {i}" + ); + } + } + } + + fn split_into_mem_entries( + addr: GuestAddress, + len: u32, + chunk_size: u32, + ) -> Vec { + let mut entries = Vec::new(); + let mut addr = addr.0; + let mut remaining = len; + + while remaining >= chunk_size { + entries.push(virtio_gpu_mem_entry { + addr, + length: chunk_size, + padding: Default::default(), + }); + addr += chunk_size as u64; + remaining -= chunk_size; + } + + if remaining != 0 { + entries.push(virtio_gpu_mem_entry { + addr, + length: remaining, + padding: Default::default(), + }) + } + + entries + } + + fn new_hdr(type_: u32) -> virtio_gpu_ctrl_hdr { + virtio_gpu_ctrl_hdr { + type_, + ..Default::default() + } + } + + rusty_fork_test! { + /// This test uses multiple gpu commands, it crates a resource, writes a test image into it and + /// then present the display output. + #[test] + fn test_display_output() { + let (backend, mem) = init(); + let (mut gpu_frontend, gpu_backend) = gpu_backend_pair(); + gpu_frontend + .set_read_timeout(Some(Duration::from_secs(10))) + .unwrap(); + gpu_frontend + .set_write_timeout(Some(Duration::from_secs(10))) + .unwrap(); + + backend.set_gpu_socket(gpu_backend); + + // Unfortunately there is no way to crate a VringEpollHandler directly (the ::new is not public) + // So we create a daemon to create the epoll handler for us here + let daemon = VhostUserDaemon::new( + "vhost-device-gpu-backend".to_string(), + backend.clone(), + mem.clone(), + ) + .expect("Could not create daemon"); + let epoll_handlers = daemon.get_epoll_handlers(); + backend.set_epoll_handler(&epoll_handlers); + mem::drop(daemon); + + const IMAGE_ADDR: GuestAddress = GuestAddress(0x30_000); + const IMAGE_WIDTH: u32 = 640; + const IMAGE_HEIGHT: u32 = 480; + const RESP_SIZE: u32 = mem::size_of::() as u32; + + let image_rect = virtio_gpu_rect { + x: 0, + y: 0, + width: IMAGE_WIDTH, + height: IMAGE_HEIGHT, + }; + + // Construct a command to create a resource + let hdr = new_hdr(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D); + let cmd = virtio_gpu_resource_create_2d { + resource_id: 1, + format: VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM, // RGBA8888 + width: IMAGE_WIDTH, + height: IMAGE_HEIGHT, + }; + let create_resource_cmd = TestingDescChainArgs { + readable_desc_bufs: &[hdr.as_slice(), cmd.as_slice()], + writable_desc_lengths: &[RESP_SIZE], + }; + + // Construct a command to attach backing memory location(s) to the resource + let hdr = new_hdr(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING); + let mem_entries = split_into_mem_entries(IMAGE_ADDR, IMAGE_WIDTH * IMAGE_HEIGHT * 4, 4096); + let cmd = virtio_gpu_resource_attach_backing { + resource_id: 1, + nr_entries: mem_entries.len() as u32, + }; + let mut readable_desc_bufs = vec![hdr.as_slice(), cmd.as_slice()]; + readable_desc_bufs.extend(mem_entries.iter().map(|entry| entry.as_slice())); + let attach_backing_cmd = TestingDescChainArgs { + readable_desc_bufs: &readable_desc_bufs, + writable_desc_lengths: &[RESP_SIZE], + }; + + // Construct a command to transfer the resource data from the attached memory to gpu + let hdr = new_hdr(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D); + let cmd = virtio_gpu_transfer_to_host_2d { + r: image_rect, + offset: 0, + resource_id: 1, + padding: Default::default(), + }; + let transfer_to_host_cmd = TestingDescChainArgs { + readable_desc_bufs: &[hdr.as_slice(), cmd.as_slice()], + writable_desc_lengths: &[RESP_SIZE], + }; + + // Construct a command to set the scanout (display) output + let hdr = new_hdr(VIRTIO_GPU_CMD_SET_SCANOUT); + let cmd = virtio_gpu_set_scanout { + r: image_rect, + resource_id: 1, + scanout_id: 1, + }; + let set_scanout_cmd = TestingDescChainArgs { + readable_desc_bufs: &[hdr.as_slice(), cmd.as_slice()], + writable_desc_lengths: &[RESP_SIZE], + }; + + // Construct a command to flush the resource + let hdr = new_hdr(VIRTIO_GPU_CMD_RESOURCE_FLUSH); + let cmd = virtio_gpu_resource_flush { + r: image_rect, + resource_id: 1, + padding: Default::default(), + }; + let flush_resource_cmd = TestingDescChainArgs { + readable_desc_bufs: &[hdr.as_slice(), cmd.as_slice()], + writable_desc_lengths: &[RESP_SIZE], + }; + + // Create a control queue with all the commands defined above + let commands = [ + create_resource_cmd, + attach_backing_cmd, + transfer_to_host_cmd, + set_scanout_cmd, + flush_resource_cmd, + ]; + let (control_vring, _, _) = create_control_vring(&mem, &commands); + + // Create an empty cursor queue with no commands + let (cursor_vring, _, _) = create_cursor_vring(&mem, &[]); + + // Write the test image in guest memory + test_image::write(&mem.memory(), IMAGE_ADDR, IMAGE_WIDTH, IMAGE_HEIGHT); + + const EXPECTED_SCANOUT_REQUEST: VhostUserGpuScanout = VhostUserGpuScanout { + scanout_id: 1, + width: IMAGE_WIDTH, + height: IMAGE_HEIGHT, + }; + + const EXPECTED_UPDATE_REQUEST: VhostUserGpuUpdate = VhostUserGpuUpdate { + scanout_id: 1, + x: 0, + y: 0, + width: IMAGE_WIDTH, + height: IMAGE_HEIGHT, + }; + + // This simulates the frontend vmm. Here we check the issued frontend requests and if the + // output matches the test image. + let frontend_thread = thread::spawn(move || { + let mut scanout_request_hdr = [0; 12]; + let mut scanout_request = VhostUserGpuScanout::default(); + let mut update_request_hdr = [0; 12]; + let mut update_request = VhostUserGpuUpdate::default(); + let mut result_img = vec![0xdd; (IMAGE_WIDTH * IMAGE_HEIGHT * 4) as usize]; + + gpu_frontend.read_exact(&mut scanout_request_hdr).unwrap(); + gpu_frontend + .read_exact(scanout_request.as_mut_slice()) + .unwrap(); + gpu_frontend.read_exact(&mut update_request_hdr).unwrap(); + gpu_frontend + .read_exact(update_request.as_mut_slice()) + .unwrap(); + gpu_frontend.read_exact(&mut result_img).unwrap(); + + assert_eq!(scanout_request, EXPECTED_SCANOUT_REQUEST); + assert_eq!(update_request, EXPECTED_UPDATE_REQUEST); + test_image::assert(&result_img, IMAGE_WIDTH, IMAGE_HEIGHT); + }); + + backend + .handle_event(0, EventSet::IN, &[control_vring, cursor_vring], 0) + .unwrap(); + + frontend_thread.join().unwrap(); + } + } +} diff --git a/staging/vhost-device-gpu/src/lib.rs b/staging/vhost-device-gpu/src/lib.rs new file mode 100644 index 00000000..8c840d93 --- /dev/null +++ b/staging/vhost-device-gpu/src/lib.rs @@ -0,0 +1,58 @@ +// Copyright 2024 Red Hat Inc +// +// SPDX-License-Identifier: Apache-2.0 or BSD-3-Clause + +pub mod device; +pub mod protocol; +pub mod virtio_gpu; + +use std::path::PathBuf; + +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum GpuMode { + ModeVirglRenderer, + ModeGfxstream, +} + +#[derive(Debug, Clone)] +/// This structure is the public API through which an external program +/// is allowed to configure the backend. +pub struct GpuConfig { + /// vhost-user Unix domain socket + socket_path: PathBuf, + renderer: GpuMode, +} + +impl GpuConfig { + /// Create a new instance of the GpuConfig struct, containing the + /// parameters to be fed into the gpu-backend server. + pub const fn new(socket_path: PathBuf, renderer: GpuMode) -> Self { + Self { + socket_path, + renderer, + } + } + + /// Return the path of the unix domain socket which is listening to + /// requests from the guest. + pub fn get_socket_path(&self) -> PathBuf { + PathBuf::from(&self.socket_path.as_path()) + } + + pub fn get_renderer(&self) -> GpuMode { + self.renderer + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_gpu_config() { + // Test the creation of GpuConfig struct + let socket_path = PathBuf::from("/tmp/socket"); + let gpu_config = GpuConfig::new(socket_path.clone(), GpuMode::ModeVirglRenderer); + assert_eq!(gpu_config.get_socket_path(), socket_path); + } +} diff --git a/staging/vhost-device-gpu/src/main.rs b/staging/vhost-device-gpu/src/main.rs new file mode 100644 index 00000000..35a570a3 --- /dev/null +++ b/staging/vhost-device-gpu/src/main.rs @@ -0,0 +1,132 @@ +// VIRTIO GPU Emulation via vhost-user +// +// Copyright 2024 Red Hat Inc +// +// SPDX-License-Identifier: Apache-2.0 or BSD-3-Clause + +use log::{error, info}; +use std::{path::PathBuf, process::exit}; + +use clap::{Parser, ValueEnum}; +use thiserror::Error as ThisError; +use vhost_device_gpu::{ + device::{self, VhostUserGpuBackend}, + GpuConfig, GpuMode, +}; +use vhost_user_backend::VhostUserDaemon; +use vm_memory::{GuestMemoryAtomic, GuestMemoryMmap}; + +type Result = std::result::Result; + +#[derive(Debug, ThisError)] +pub(crate) enum Error { + #[error("Could not create backend: {0}")] + CouldNotCreateBackend(device::Error), + #[error("Could not create daemon: {0}")] + CouldNotCreateDaemon(vhost_user_backend::Error), + #[error("Fatal error: {0}")] + ServeFailed(vhost_user_backend::Error), +} + +#[derive(Parser, Debug)] +#[clap(author, version, about, long_about = None)] +struct GpuArgs { + /// vhost-user Unix domain socket. + #[clap(short, long, value_name = "SOCKET")] + socket_path: PathBuf, + #[clap(short, long, value_enum)] + renderer: RenderMode, +} + +#[derive(Debug, Clone, ValueEnum)] +enum RenderMode { + Gfxstream, + Virglrenderer, +} + +impl From for GpuMode { + fn from(mode: RenderMode) -> Self { + match mode { + RenderMode::Gfxstream => GpuMode::ModeGfxstream, + RenderMode::Virglrenderer => GpuMode::ModeVirglRenderer, + } + } +} + +impl TryFrom for GpuConfig { + type Error = Error; + + fn try_from(args: GpuArgs) -> Result { + let socket_path = args.socket_path; + let renderer: GpuMode = args.renderer.into(); + + Ok(GpuConfig::new(socket_path, renderer)) + } +} + +fn start_backend(config: GpuConfig) -> Result<()> { + info!("Starting backend"); + let socket = config.get_socket_path(); + let backend = VhostUserGpuBackend::new(config).map_err(Error::CouldNotCreateBackend)?; + + let mut daemon = VhostUserDaemon::new( + "vhost-device-gpu-backend".to_string(), + backend.clone(), + GuestMemoryAtomic::new(GuestMemoryMmap::new()), + ) + .map_err(Error::CouldNotCreateDaemon)?; + + backend.set_epoll_handler(&daemon.get_epoll_handlers()); + + daemon.serve(socket).map_err(Error::ServeFailed)?; + Ok(()) +} + +fn main() { + env_logger::init(); + + if let Err(e) = start_backend(GpuConfig::try_from(GpuArgs::parse()).unwrap()) { + error!("{e}"); + exit(1); + } +} + +#[cfg(test)] +mod tests { + use assert_matches::assert_matches; + use std::path::Path; + + use super::*; + + impl GpuArgs { + pub(crate) fn from_args(path: &Path) -> GpuArgs { + GpuArgs { + socket_path: path.to_path_buf(), + renderer: RenderMode::Gfxstream, + } + } + } + + #[test] + fn test_parse_successful() { + let socket_name = Path::new("vgpu.sock"); + + let cmd_args = GpuArgs::from_args(socket_name); + let config = GpuConfig::try_from(cmd_args); + + assert!(config.is_ok()); + + let config = config.unwrap(); + assert_eq!(config.get_socket_path(), socket_name); + } + + #[test] + fn test_fail_listener() { + // This will fail the listeners and thread will panic. + let socket_name = Path::new("~/path/not/present/gpu"); + let cmd_args = GpuArgs::from_args(socket_name); + let config = GpuConfig::try_from(cmd_args).unwrap(); + + assert_matches!(start_backend(config).unwrap_err(), Error::ServeFailed(_)); + } +} diff --git a/staging/vhost-device-gpu/src/protocol.rs b/staging/vhost-device-gpu/src/protocol.rs new file mode 100644 index 00000000..ea38e839 --- /dev/null +++ b/staging/vhost-device-gpu/src/protocol.rs @@ -0,0 +1,1390 @@ +// Copyright 2024 Red Hat Inc +// Copyright 2019 The ChromiumOS Authors +// +// SPDX-License-Identifier: Apache-2.0 or BSD-3-Clause + +#![allow(non_camel_case_types)] + +use log::trace; +use std::{ + cmp::min, + convert::From, + ffi::CStr, + fmt::{self, Display}, + io::{self, Read, Write}, + marker::PhantomData, + mem::{size_of, size_of_val}, +}; + +use rutabaga_gfx::RutabagaError; +use thiserror::Error; +pub use virtio_bindings::virtio_gpu::{ + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE as VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_CTX_CREATE as VIRTIO_GPU_CMD_CTX_CREATE, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_CTX_DESTROY as VIRTIO_GPU_CMD_CTX_DESTROY, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE as VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_GET_CAPSET as VIRTIO_GPU_CMD_GET_CAPSET, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_GET_CAPSET_INFO as VIRTIO_GPU_CMD_GET_CAPSET_INFO, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_GET_DISPLAY_INFO as VIRTIO_GPU_CMD_GET_DISPLAY_INFO, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_GET_EDID as VIRTIO_GPU_CMD_GET_EDID, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_MOVE_CURSOR as VIRTIO_GPU_CMD_MOVE_CURSOR, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID as VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING as VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_RESOURCE_CREATE_2D as VIRTIO_GPU_CMD_RESOURCE_CREATE_2D, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_RESOURCE_CREATE_3D as VIRTIO_GPU_CMD_RESOURCE_CREATE_3D, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB as VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING as VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_RESOURCE_FLUSH as VIRTIO_GPU_CMD_RESOURCE_FLUSH, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB as VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB as VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_RESOURCE_UNREF as VIRTIO_GPU_CMD_RESOURCE_UNREF, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_SET_SCANOUT as VIRTIO_GPU_CMD_SET_SCANOUT, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_SET_SCANOUT_BLOB as VIRTIO_GPU_CMD_SET_SCANOUT_BLOB, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_SUBMIT_3D as VIRTIO_GPU_CMD_SUBMIT_3D, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D as VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D as VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D as VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_UPDATE_CURSOR as VIRTIO_GPU_CMD_UPDATE_CURSOR, + virtio_gpu_ctrl_type_VIRTIO_GPU_RESP_ERR_INVALID_CONTEXT_ID as VIRTIO_GPU_RESP_ERR_INVALID_CONTEXT_ID, + virtio_gpu_ctrl_type_VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER as VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER, + virtio_gpu_ctrl_type_VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID as VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID, + virtio_gpu_ctrl_type_VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID as VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID, + virtio_gpu_ctrl_type_VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY as VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY, + virtio_gpu_ctrl_type_VIRTIO_GPU_RESP_ERR_UNSPEC as VIRTIO_GPU_RESP_ERR_UNSPEC, + virtio_gpu_ctrl_type_VIRTIO_GPU_RESP_OK_CAPSET as VIRTIO_GPU_RESP_OK_CAPSET, + virtio_gpu_ctrl_type_VIRTIO_GPU_RESP_OK_CAPSET_INFO as VIRTIO_GPU_RESP_OK_CAPSET_INFO, + virtio_gpu_ctrl_type_VIRTIO_GPU_RESP_OK_DISPLAY_INFO as VIRTIO_GPU_RESP_OK_DISPLAY_INFO, + virtio_gpu_ctrl_type_VIRTIO_GPU_RESP_OK_EDID as VIRTIO_GPU_RESP_OK_EDID, + virtio_gpu_ctrl_type_VIRTIO_GPU_RESP_OK_MAP_INFO as VIRTIO_GPU_RESP_OK_MAP_INFO, + virtio_gpu_ctrl_type_VIRTIO_GPU_RESP_OK_NODATA as VIRTIO_GPU_RESP_OK_NODATA, + virtio_gpu_ctrl_type_VIRTIO_GPU_RESP_OK_RESOURCE_UUID as VIRTIO_GPU_RESP_OK_RESOURCE_UUID, +}; +use virtio_queue::{Reader, Writer}; +use vm_memory::{ByteValued, GuestAddress, Le32}; +use zerocopy::{AsBytes, FromBytes}; + +use crate::device::{self, Error}; + +pub const QUEUE_SIZE: usize = 1024; +pub const NUM_QUEUES: usize = 2; + +pub const CONTROL_QUEUE: u16 = 0; +pub const CURSOR_QUEUE: u16 = 1; +pub const POLL_EVENT: u16 = NUM_QUEUES as u16 + 1; + +pub const VIRTIO_GPU_MAX_SCANOUTS: usize = 16; + +/* CHROMIUM(b/277982577): success responses */ +pub const VIRTIO_GPU_RESP_OK_RESOURCE_PLANE_INFO: u32 = 0x11FF; + +/* Create a OS-specific handle from guest memory (not upstreamed). */ +pub const VIRTIO_GPU_BLOB_FLAG_CREATE_GUEST_HANDLE: u32 = 0x0008; + +pub const VIRTIO_GPU_FLAG_FENCE: u32 = 1 << 0; +pub const VIRTIO_GPU_FLAG_INFO_RING_IDX: u32 = 1 << 1; + +/// Virtio Gpu Configuration +#[derive(Copy, Clone, Debug, Default, PartialEq, Eq)] +#[repr(C)] +pub struct VirtioGpuConfig { + /// Signals pending events to the driver + pub events_read: Le32, + /// Clears pending events in the device + pub events_clear: Le32, + /// Maximum number of scanouts supported by the device + pub num_scanouts: Le32, + /// Maximum number of capability sets supported by the device + pub num_capsets: Le32, +} + +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for VirtioGpuConfig {} + +#[derive(Debug, PartialEq, Eq)] +pub struct InvalidCommandType(u32); + +impl std::fmt::Display for InvalidCommandType { + fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(fmt, "Invalid command type {}", self.0) + } +} + +impl From for crate::device::Error { + fn from(val: InvalidCommandType) -> Self { + Self::InvalidCommandType(val.0) + } +} + +impl std::error::Error for InvalidCommandType {} + +#[derive(Copy, Clone, Debug, Default, AsBytes, FromBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_ctrl_hdr { + pub type_: u32, + pub flags: u32, + pub fence_id: u64, + pub ctx_id: u32, + pub ring_idx: u8, + pub padding: [u8; 3], +} +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_ctrl_hdr {} + +/* data passed in the cursor vq */ + +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_cursor_pos { + pub scanout_id: u32, + pub x: u32, + pub y: u32, + pub padding: u32, +} +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_cursor_pos {} + +/* VIRTIO_GPU_CMD_UPDATE_CURSOR, VIRTIO_GPU_CMD_MOVE_CURSOR */ +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_update_cursor { + pub pos: virtio_gpu_cursor_pos, /* update & move */ + pub resource_id: u32, /* update only */ + pub hot_x: u32, /* update only */ + pub hot_y: u32, /* update only */ + pub padding: u32, +} +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_update_cursor {} + +/* data passed in the control vq, 2d related */ + +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_rect { + pub x: u32, + pub y: u32, + pub width: u32, + pub height: u32, +} +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_rect {} + +/* VIRTIO_GPU_CMD_GET_EDID */ +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_get_edid { + pub scanout: u32, + pub padding: u32, +} +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_get_edid {} + +/* VIRTIO_GPU_CMD_RESOURCE_UNREF */ +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_resource_unref { + pub resource_id: u32, + pub padding: u32, +} +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_resource_unref {} + +/* VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: create a 2d resource with a format */ +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_resource_create_2d { + pub resource_id: u32, + pub format: u32, + pub width: u32, + pub height: u32, +} +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_resource_create_2d {} + +/* VIRTIO_GPU_CMD_SET_SCANOUT */ +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_set_scanout { + pub r: virtio_gpu_rect, + pub scanout_id: u32, + pub resource_id: u32, +} +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_set_scanout {} + +/* VIRTIO_GPU_CMD_RESOURCE_FLUSH */ +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_resource_flush { + pub r: virtio_gpu_rect, + pub resource_id: u32, + pub padding: u32, +} +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_resource_flush {} + +/* VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: simple transfer to_host */ +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_transfer_to_host_2d { + pub r: virtio_gpu_rect, + pub offset: u64, + pub resource_id: u32, + pub padding: u32, +} +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_transfer_to_host_2d {} + +#[derive(Copy, Clone, Debug, Default, AsBytes, FromBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_mem_entry { + pub addr: u64, + pub length: u32, + pub padding: u32, +} +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_mem_entry {} + +/* VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING */ +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_resource_attach_backing { + pub resource_id: u32, + pub nr_entries: u32, +} +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_resource_attach_backing {} + +/* VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING */ +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_resource_detach_backing { + pub resource_id: u32, + pub padding: u32, +} +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_resource_detach_backing {} + +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_display_one { + pub r: virtio_gpu_rect, + pub enabled: u32, + pub flags: u32, +} +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_display_one {} + +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes)] +#[repr(C)] +pub struct virtio_gpu_resp_display_info { + pub hdr: virtio_gpu_ctrl_hdr, + pub pmodes: [virtio_gpu_display_one; VIRTIO_GPU_MAX_SCANOUTS], +} +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_resp_display_info {} + +const EDID_BLOB_MAX_SIZE: usize = 1024; + +#[derive(Debug, Copy, Clone)] +#[repr(C)] +pub struct virtio_gpu_resp_edid { + pub hdr: virtio_gpu_ctrl_hdr, + pub size: u32, + pub padding: u32, + pub edid: [u8; EDID_BLOB_MAX_SIZE], +} +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_resp_edid {} + +/* data passed in the control vq, 3d related */ + +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_box { + pub x: u32, + pub y: u32, + pub z: u32, + pub w: u32, + pub h: u32, + pub d: u32, +} +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_box {} + +/* VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D, VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D */ +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_transfer_host_3d { + pub box_: virtio_gpu_box, + pub offset: u64, + pub resource_id: u32, + pub level: u32, + pub stride: u32, + pub layer_stride: u32, +} +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_transfer_host_3d {} + +/* VIRTIO_GPU_CMD_RESOURCE_CREATE_3D */ +pub const VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP: u32 = 1 << 0; +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_resource_create_3d { + pub resource_id: u32, + pub target: u32, + pub format: u32, + pub bind: u32, + pub width: u32, + pub height: u32, + pub depth: u32, + pub array_size: u32, + pub last_level: u32, + pub nr_samples: u32, + pub flags: u32, + pub padding: u32, +} +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_resource_create_3d {} + +/* VIRTIO_GPU_CMD_CTX_CREATE */ +pub const VIRTIO_GPU_CONTEXT_INIT_CAPSET_ID_MASK: u32 = 1 << 0; +#[derive(Copy, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_ctx_create { + pub nlen: u32, + pub context_init: u32, + pub debug_name: [u8; 64], +} +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_ctx_create {} + +impl Default for virtio_gpu_ctx_create { + fn default() -> Self { + // SAFETY: trivially safe + unsafe { ::std::mem::zeroed() } + } +} + +impl Clone for virtio_gpu_ctx_create { + fn clone(&self) -> virtio_gpu_ctx_create { + *self + } +} + +impl fmt::Debug for virtio_gpu_ctx_create { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let debug_name = CStr::from_bytes_with_nul(&self.debug_name[..min(64, self.nlen as usize)]) + .map_or_else( + |err| format!("Err({})", err), + |c_str| c_str.to_string_lossy().into_owned(), + ); + f.debug_struct(stringify!("virtio_gpu_ctx_create")) + .field("debug_name", &debug_name) + .field("context_init", &self.context_init) + .finish() + } +} + +/* VIRTIO_GPU_CMD_CTX_DESTROY */ +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_ctx_destroy {} +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_ctx_destroy {} + +/* VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE, VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE */ +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_ctx_resource { + pub resource_id: u32, + pub padding: u32, +} +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_ctx_resource {} + +/* VIRTIO_GPU_CMD_SUBMIT_3D */ +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_cmd_submit { + pub size: u32, + + // The in-fence IDs are prepended to the cmd_buf and memory layout + // of the VIRTIO_GPU_CMD_SUBMIT_3D buffer looks like this: + // _________________ + // | CMD_SUBMIT_3D | + // ----------------- + // | header | + // | in-fence IDs | + // | cmd_buf | + // ----------------- + // + // This makes in-fence IDs naturally aligned to the sizeof(u64) inside + // of the virtio buffer. + pub num_in_fences: u32, +} +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_cmd_submit {} + +pub const VIRTIO_GPU_CAPSET_VIRGL: u32 = 1; +pub const VIRTIO_GPU_CAPSET_VIRGL2: u32 = 2; +pub const VIRTIO_GPU_CAPSET_GFXSTREAM: u32 = 3; +pub const VIRTIO_GPU_CAPSET_VENUS: u32 = 4; +pub const VIRTIO_GPU_CAPSET_CROSS_DOMAIN: u32 = 5; + +/* VIRTIO_GPU_CMD_GET_CAPSET_INFO */ +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_get_capset_info { + pub capset_index: u32, + pub padding: u32, +} +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_get_capset_info {} + +/* VIRTIO_GPU_RESP_OK_CAPSET_INFO */ +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes)] +#[repr(C)] +pub struct virtio_gpu_resp_capset_info { + pub hdr: virtio_gpu_ctrl_hdr, + pub capset_id: u32, + pub capset_max_version: u32, + pub capset_max_size: u32, + pub padding: u32, +} +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_resp_capset_info {} + +/* VIRTIO_GPU_CMD_GET_CAPSET */ +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_get_capset { + pub capset_id: u32, + pub capset_version: u32, +} +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_get_capset {} + +/* VIRTIO_GPU_RESP_OK_CAPSET */ +#[derive(Copy, Clone, Debug, Default)] +#[repr(C)] +pub struct virtio_gpu_resp_capset { + pub hdr: virtio_gpu_ctrl_hdr, + pub capset_data: PhantomData<[u8]>, +} +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_resp_capset {} + +/* VIRTIO_GPU_RESP_OK_RESOURCE_PLANE_INFO */ +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes)] +#[repr(C)] +pub struct virtio_gpu_resp_resource_plane_info { + pub hdr: virtio_gpu_ctrl_hdr, + pub count: u32, + pub padding: u32, + pub format_modifier: u64, + pub strides: [u32; 4], + pub offsets: [u32; 4], +} +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_resp_resource_plane_info {} + +pub const PLANE_INFO_MAX_COUNT: usize = 4; + +pub const VIRTIO_GPU_EVENT_DISPLAY: u32 = 1 << 0; + +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_resource_create_blob { + pub resource_id: u32, + pub blob_mem: u32, + pub blob_flags: u32, + pub nr_entries: u32, + pub blob_id: u64, + pub size: u64, +} +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_resource_create_blob {} + +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_resource_map_blob { + pub resource_id: u32, + pub padding: u32, + pub offset: u64, +} +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_resource_map_blob {} + +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_resource_unmap_blob { + pub resource_id: u32, + pub padding: u32, +} +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_resource_unmap_blob {} + +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_resp_map_info { + pub hdr: virtio_gpu_ctrl_hdr, + pub map_info: u32, + pub padding: u32, +} +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_resp_map_info {} + +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_resource_assign_uuid { + pub resource_id: u32, + pub padding: u32, +} +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_resource_assign_uuid {} + +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes)] +#[repr(C)] +pub struct virtio_gpu_resp_resource_uuid { + pub hdr: virtio_gpu_ctrl_hdr, + pub uuid: [u8; 16], +} +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_resp_resource_uuid {} + +/* VIRTIO_GPU_CMD_SET_SCANOUT_BLOB */ +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_set_scanout_blob { + pub r: virtio_gpu_rect, + pub scanout_id: u32, + pub resource_id: u32, + pub width: u32, + pub height: u32, + pub format: u32, + pub padding: u32, + pub strides: [u32; 4], + pub offsets: [u32; 4], +} +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for virtio_gpu_set_scanout_blob {} + +/* simple formats for fbcon/X use */ +pub const VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM: u32 = 1; +pub const VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM: u32 = 2; +pub const VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM: u32 = 3; +pub const VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM: u32 = 4; +pub const VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM: u32 = 67; +pub const VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM: u32 = 68; +pub const VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM: u32 = 121; +pub const VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM: u32 = 134; + +/// A virtio gpu command and associated metadata specific to each command. +#[derive(Clone, PartialEq, Eq)] +pub enum GpuCommand { + GetDisplayInfo, + GetEdid(virtio_gpu_get_edid), + ResourceCreate2d(virtio_gpu_resource_create_2d), + ResourceUnref(virtio_gpu_resource_unref), + SetScanout(virtio_gpu_set_scanout), + SetScanoutBlob(virtio_gpu_set_scanout_blob), + ResourceFlush(virtio_gpu_resource_flush), + TransferToHost2d(virtio_gpu_transfer_to_host_2d), + ResourceAttachBacking( + virtio_gpu_resource_attach_backing, + Vec<(GuestAddress, usize)>, + ), + ResourceDetachBacking(virtio_gpu_resource_detach_backing), + GetCapsetInfo(virtio_gpu_get_capset_info), + GetCapset(virtio_gpu_get_capset), + CtxCreate(virtio_gpu_ctx_create), + CtxDestroy(virtio_gpu_ctx_destroy), + CtxAttachResource(virtio_gpu_ctx_resource), + CtxDetachResource(virtio_gpu_ctx_resource), + ResourceCreate3d(virtio_gpu_resource_create_3d), + TransferToHost3d(virtio_gpu_transfer_host_3d), + TransferFromHost3d(virtio_gpu_transfer_host_3d), + CmdSubmit3d { + cmd_data: Vec, + fence_ids: Vec, + }, + ResourceCreateBlob(virtio_gpu_resource_create_blob), + ResourceMapBlob(virtio_gpu_resource_map_blob), + ResourceUnmapBlob(virtio_gpu_resource_unmap_blob), + UpdateCursor(virtio_gpu_update_cursor), + MoveCursor(virtio_gpu_update_cursor), + ResourceAssignUuid(virtio_gpu_resource_assign_uuid), +} + +/// An error indicating something went wrong decoding a `GpuCommand`. These correspond to +/// `VIRTIO_GPU_CMD_*`. +#[derive(Error, Debug)] +pub enum GpuCommandDecodeError { + /// The type of the command was invalid. + #[error("invalid command type ({0})")] + InvalidType(u32), + /// An I/O error occurred. + #[error("an I/O error occurred: {0}")] + IO(io::Error), + #[error("Descriptor read failed")] + DescriptorReadFailed, +} + +impl From for GpuCommandDecodeError { + fn from(e: io::Error) -> GpuCommandDecodeError { + GpuCommandDecodeError::IO(e) + } +} + +impl From for GpuCommandDecodeError { + fn from(_: device::Error) -> Self { + GpuCommandDecodeError::DescriptorReadFailed + } +} + +impl From for GpuResponseEncodeError { + fn from(_: device::Error) -> Self { + GpuResponseEncodeError::DescriptorWriteFailed + } +} + +impl fmt::Debug for GpuCommand { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + use self::GpuCommand::*; + match self { + GetDisplayInfo => f.debug_struct("GetDisplayInfo").finish(), + GetEdid(_info) => f.debug_struct("GetEdid").finish(), + ResourceCreate2d(_info) => f.debug_struct("ResourceCreate2d").finish(), + ResourceUnref(_info) => f.debug_struct("ResourceUnref").finish(), + SetScanout(_info) => f.debug_struct("SetScanout").finish(), + SetScanoutBlob(_info) => f.debug_struct("SetScanoutBlob").finish(), + ResourceFlush(_info) => f.debug_struct("ResourceFlush").finish(), + TransferToHost2d(_info) => f.debug_struct("TransferToHost2d").finish(), + ResourceAttachBacking(_info, _vecs) => f.debug_struct("ResourceAttachBacking").finish(), + ResourceDetachBacking(_info) => f.debug_struct("ResourceDetachBacking").finish(), + GetCapsetInfo(_info) => f.debug_struct("GetCapsetInfo").finish(), + GetCapset(_info) => f.debug_struct("GetCapset").finish(), + CtxCreate(_info) => f.debug_struct("CtxCreate").finish(), + CtxDestroy(_info) => f.debug_struct("CtxDestroy").finish(), + CtxAttachResource(_info) => f.debug_struct("CtxAttachResource").finish(), + CtxDetachResource(_info) => f.debug_struct("CtxDetachResource").finish(), + ResourceCreate3d(_info) => f.debug_struct("ResourceCreate3d").finish(), + TransferToHost3d(_info) => f.debug_struct("TransferToHost3d").finish(), + TransferFromHost3d(_info) => f.debug_struct("TransferFromHost3d").finish(), + CmdSubmit3d { .. } => f.debug_struct("CmdSubmit3d").finish(), + ResourceCreateBlob(_info) => f.debug_struct("ResourceCreateBlob").finish(), + ResourceMapBlob(_info) => f.debug_struct("ResourceMapBlob").finish(), + ResourceUnmapBlob(_info) => f.debug_struct("ResourceUnmapBlob").finish(), + UpdateCursor(_info) => f.debug_struct("UpdateCursor").finish(), + MoveCursor(_info) => f.debug_struct("MoveCursor").finish(), + ResourceAssignUuid(_info) => f.debug_struct("ResourceAssignUuid").finish(), + } + } +} + +impl GpuCommand { + /// Decodes a command from the given chunk of memory. + pub fn decode( + reader: &mut Reader, + ) -> Result<(virtio_gpu_ctrl_hdr, GpuCommand), GpuCommandDecodeError> { + use self::GpuCommand::*; + let hdr = reader + .read_obj::() + .map_err(|_| Error::DescriptorReadFailed)?; + trace!("Decoding GpuCommand 0x{:0x}", hdr.type_); + let cmd = match hdr.type_ { + VIRTIO_GPU_CMD_GET_DISPLAY_INFO => GetDisplayInfo, + VIRTIO_GPU_CMD_GET_EDID => { + GetEdid(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_RESOURCE_CREATE_2D => { + ResourceCreate2d(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_RESOURCE_UNREF => { + ResourceUnref(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_SET_SCANOUT => { + SetScanout(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_SET_SCANOUT_BLOB => { + SetScanoutBlob(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_RESOURCE_FLUSH => { + ResourceFlush(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D => { + TransferToHost2d(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING => { + let info: virtio_gpu_resource_attach_backing = + reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?; + let mut entries = Vec::with_capacity(info.nr_entries as usize); + for _ in 0..info.nr_entries { + let entry: virtio_gpu_mem_entry = + reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?; + entries.push((GuestAddress(entry.addr), entry.length as usize)) + } + ResourceAttachBacking(info, entries) + } + VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING => { + ResourceDetachBacking(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_GET_CAPSET_INFO => { + GetCapsetInfo(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_GET_CAPSET => { + GetCapset(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_CTX_CREATE => { + CtxCreate(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_CTX_DESTROY => { + CtxDestroy(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE => { + CtxAttachResource(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE => { + CtxDetachResource(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_RESOURCE_CREATE_3D => { + ResourceCreate3d(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D => { + TransferToHost3d(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D => { + TransferFromHost3d(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_SUBMIT_3D => { + let info: virtio_gpu_cmd_submit = + reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?; + + let mut cmd_data = vec![0; info.size as usize]; + let mut fence_ids: Vec = Vec::with_capacity(info.num_in_fences as usize); + + for _ in 0..info.num_in_fences { + let fence_id = reader + .read_obj::() + .map_err(|_| Error::DescriptorReadFailed)?; + fence_ids.push(fence_id); + } + + reader + .read_exact(&mut cmd_data[..]) + .map_err(|_| Error::DescriptorReadFailed)?; + + CmdSubmit3d { + cmd_data, + fence_ids, + } + } + VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB => { + ResourceCreateBlob(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB => { + ResourceMapBlob(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB => { + ResourceUnmapBlob(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_UPDATE_CURSOR => { + UpdateCursor(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_MOVE_CURSOR => { + MoveCursor(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID => { + ResourceAssignUuid(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + _ => return Err(GpuCommandDecodeError::InvalidType(hdr.type_)), + }; + + Ok((hdr, cmd)) + } +} + +#[derive(Debug, PartialEq, Eq)] +pub struct GpuResponsePlaneInfo { + pub stride: u32, + pub offset: u32, +} + +/// A response to a `GpuCommand`. These correspond to `VIRTIO_GPU_RESP_*`. +#[derive(Debug)] +pub enum GpuResponse { + OkNoData, + OkDisplayInfo(Vec<(u32, u32, bool)>), + OkEdid { + /// The EDID display data blob (as specified by VESA) + blob: Box<[u8]>, + }, + OkCapsetInfo { + capset_id: u32, + version: u32, + size: u32, + }, + OkCapset(Vec), + OkResourcePlaneInfo { + format_modifier: u64, + plane_info: Vec, + }, + OkResourceUuid { + uuid: [u8; 16], + }, + OkMapInfo { + map_info: u32, + }, + ErrUnspec, + ErrRutabaga(RutabagaError), + ErrScanout { + num_scanouts: u32, + }, + ErrOutOfMemory, + ErrInvalidScanoutId, + ErrInvalidResourceId, + ErrInvalidContextId, + ErrInvalidParameter, +} + +impl From for GpuResponse { + fn from(e: RutabagaError) -> GpuResponse { + GpuResponse::ErrRutabaga(e) + } +} + +impl Display for GpuResponse { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + use self::GpuResponse::*; + match self { + ErrRutabaga(e) => write!(f, "renderer error: {}", e), + ErrScanout { num_scanouts } => write!(f, "non-zero scanout: {}", num_scanouts), + _ => Ok(()), + } + } +} + +/// An error indicating something went wrong decoding a `GpuCommand`. +#[derive(Error, Debug)] +pub enum GpuResponseEncodeError { + /// An I/O error occurred. + #[error("an I/O error occurred: {0}")] + IO(io::Error), + /// More displays than are valid were in a `OkDisplayInfo`. + #[error("{0} is more displays than are valid")] + TooManyDisplays(usize), + /// More planes than are valid were in a `OkResourcePlaneInfo`. + #[error("{0} is more planes than are valid")] + TooManyPlanes(usize), + #[error("Descriptor write failed")] + DescriptorWriteFailed, +} + +impl From for GpuResponseEncodeError { + fn from(e: io::Error) -> GpuResponseEncodeError { + GpuResponseEncodeError::IO(e) + } +} + +pub type VirtioGpuResult = std::result::Result; + +impl GpuResponse { + /// Encodes a this `GpuResponse` into `resp` and the given set of metadata. + pub fn encode( + &self, + flags: u32, + fence_id: u64, + ctx_id: u32, + ring_idx: u8, + writer: &mut Writer, + ) -> Result { + let hdr = virtio_gpu_ctrl_hdr { + type_: self.get_type(), + flags, + fence_id, + ctx_id, + ring_idx, + padding: Default::default(), + }; + let len = match *self { + GpuResponse::OkDisplayInfo(ref info) => { + if info.len() > VIRTIO_GPU_MAX_SCANOUTS { + return Err(GpuResponseEncodeError::TooManyDisplays(info.len())); + } + let mut disp_info = virtio_gpu_resp_display_info { + hdr, + pmodes: Default::default(), + }; + for (disp_mode, &(width, height, enabled)) in disp_info.pmodes.iter_mut().zip(info) + { + disp_mode.r.width = width; + disp_mode.r.height = height; + disp_mode.enabled = enabled as u32; + } + writer + .write_obj(disp_info) + .map_err(|_| Error::DescriptorWriteFailed)?; + size_of_val(&disp_info) + } + GpuResponse::OkEdid { ref blob } => { + let mut edid_info = virtio_gpu_resp_edid { + hdr, + size: blob.len() as u32, + edid: [0; EDID_BLOB_MAX_SIZE], + padding: Default::default(), + }; + edid_info.edid.copy_from_slice(blob); + writer + .write_obj(edid_info) + .map_err(|_| Error::DescriptorWriteFailed)?; + size_of_val(&edid_info) + } + GpuResponse::OkCapsetInfo { + capset_id, + version, + size, + } => { + writer + .write_obj(virtio_gpu_resp_capset_info { + hdr, + capset_id, + capset_max_version: version, + capset_max_size: size, + padding: 0u32, + }) + .map_err(|_| Error::DescriptorWriteFailed)?; + size_of::() + } + GpuResponse::OkCapset(ref data) => { + writer + .write_obj(hdr) + .map_err(|_| Error::DescriptorWriteFailed)?; + writer + .write(data) + .map_err(|_| Error::DescriptorWriteFailed)?; + size_of_val(&hdr) + data.len() + } + GpuResponse::OkResourcePlaneInfo { + format_modifier, + ref plane_info, + } => { + if plane_info.len() > PLANE_INFO_MAX_COUNT { + return Err(GpuResponseEncodeError::TooManyPlanes(plane_info.len())); + } + let mut strides = [u32::default(); PLANE_INFO_MAX_COUNT]; + let mut offsets = [u32::default(); PLANE_INFO_MAX_COUNT]; + for (plane_index, plane) in plane_info.iter().enumerate() { + strides[plane_index] = plane.stride; + offsets[plane_index] = plane.offset; + } + let plane_info = virtio_gpu_resp_resource_plane_info { + hdr, + count: plane_info.len() as u32, + padding: 0u32, + format_modifier, + strides, + offsets, + }; + if writer.available_bytes() >= size_of_val(&plane_info) { + size_of_val(&plane_info) + } else { + // In case there is too little room in the response slice to store the + // entire virtio_gpu_resp_resource_plane_info, convert response to a regular + // VIRTIO_GPU_RESP_OK_NODATA and attempt to return that. + writer + .write_obj(virtio_gpu_ctrl_hdr { + type_: VIRTIO_GPU_RESP_OK_NODATA, + ..hdr + }) + .map_err(|_| Error::DescriptorWriteFailed)?; + size_of_val(&hdr) + } + } + GpuResponse::OkResourceUuid { uuid } => { + let resp_info = virtio_gpu_resp_resource_uuid { hdr, uuid }; + + writer + .write_obj(resp_info) + .map_err(|_| Error::DescriptorWriteFailed)?; + size_of_val(&resp_info) + } + GpuResponse::OkMapInfo { map_info } => { + let resp_info = virtio_gpu_resp_map_info { + hdr, + map_info, + padding: Default::default(), + }; + + writer + .write_obj(resp_info) + .map_err(|_| Error::DescriptorWriteFailed)?; + size_of_val(&resp_info) + } + _ => { + writer + .write_obj(hdr) + .map_err(|_| Error::DescriptorWriteFailed)?; + size_of_val(&hdr) + } + }; + Ok(len as u32) + } + + /// Gets the `VIRTIO_GPU_*` enum value that corresponds to this variant. + pub fn get_type(&self) -> u32 { + match self { + GpuResponse::OkNoData => VIRTIO_GPU_RESP_OK_NODATA, + GpuResponse::OkDisplayInfo(_) => VIRTIO_GPU_RESP_OK_DISPLAY_INFO, + GpuResponse::OkEdid { .. } => VIRTIO_GPU_RESP_OK_EDID, + GpuResponse::OkCapsetInfo { .. } => VIRTIO_GPU_RESP_OK_CAPSET_INFO, + GpuResponse::OkCapset(_) => VIRTIO_GPU_RESP_OK_CAPSET, + GpuResponse::OkResourcePlaneInfo { .. } => VIRTIO_GPU_RESP_OK_RESOURCE_PLANE_INFO, + GpuResponse::OkResourceUuid { .. } => VIRTIO_GPU_RESP_OK_RESOURCE_UUID, + GpuResponse::OkMapInfo { .. } => VIRTIO_GPU_RESP_OK_MAP_INFO, + GpuResponse::ErrUnspec => VIRTIO_GPU_RESP_ERR_UNSPEC, + GpuResponse::ErrRutabaga(_) => VIRTIO_GPU_RESP_ERR_UNSPEC, + GpuResponse::ErrScanout { num_scanouts: _ } => VIRTIO_GPU_RESP_ERR_UNSPEC, + GpuResponse::ErrOutOfMemory => VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY, + GpuResponse::ErrInvalidScanoutId => VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID, + GpuResponse::ErrInvalidResourceId => VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID, + GpuResponse::ErrInvalidContextId => VIRTIO_GPU_RESP_ERR_INVALID_CONTEXT_ID, + GpuResponse::ErrInvalidParameter => VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use virtio_bindings::virtio_ring::VRING_DESC_F_WRITE; + use virtio_queue::mock::MockSplitQueue; + use virtio_queue::Descriptor; + use vm_memory::GuestMemoryMmap; + + #[test] + fn test_virtio_gpu_config() { + // Test VirtioGpuConfig size + assert_eq!(std::mem::size_of::(), 16); + } + + #[test] + fn test_invalid_command_type_display() { + let error = InvalidCommandType(42); + assert_eq!(format!("{}", error), "Invalid command type 42"); + } + + #[test] + fn test_gpu_response_display() { + let err_rutabaga = GpuResponse::ErrRutabaga(RutabagaError::InvalidContextId); + assert_eq!( + format!("{}", err_rutabaga), + "renderer error: invalid context id" + ); + + let err_scanout = GpuResponse::ErrScanout { num_scanouts: 3 }; + assert_eq!(format!("{}", err_scanout), "non-zero scanout: 3"); + } + + #[test] + fn test_invalid_type_error() { + let error = GpuCommandDecodeError::InvalidType(42); + assert_eq!(format!("{}", error), "invalid command type (42)"); + } + + // Test io_error conversion to gpu command decode error + #[test] + fn test_io_error() { + let io_error = io::Error::new(io::ErrorKind::Other, "Test IO error"); + let gpu_error: GpuCommandDecodeError = io_error.into(); + match gpu_error { + GpuCommandDecodeError::IO(_) => (), + _ => panic!("Expected IO error"), + } + } + + //Test vhu_error conversion to gpu command decode/encode error + #[test] + fn test_device_error() { + let device_error = device::Error::DescriptorReadFailed; + let gpu_error: GpuCommandDecodeError = device_error.into(); + match gpu_error { + GpuCommandDecodeError::DescriptorReadFailed => (), + _ => panic!("Expected DescriptorReadFailed error"), + } + let device_error = device::Error::DescriptorWriteFailed; + let gpu_error: GpuResponseEncodeError = device_error.into(); + match gpu_error { + GpuResponseEncodeError::DescriptorWriteFailed => (), + _ => panic!("Expected DescriptorWriteFailed error"), + } + } + + #[test] + fn test_debug() { + let get_display_info = GpuCommand::GetDisplayInfo; + let get_edid = GpuCommand::GetEdid(virtio_gpu_get_edid::default()); + let resource_create_2d = + GpuCommand::ResourceCreate2d(virtio_gpu_resource_create_2d::default()); + let resource_unref = GpuCommand::ResourceUnref(virtio_gpu_resource_unref::default()); + let set_scanout = GpuCommand::SetScanout(virtio_gpu_set_scanout::default()); + let set_scanout_blob = GpuCommand::SetScanoutBlob(virtio_gpu_set_scanout_blob::default()); + let resource_flush = GpuCommand::ResourceFlush(virtio_gpu_resource_flush::default()); + let transfer_to_host_2d = + GpuCommand::TransferToHost2d(virtio_gpu_transfer_to_host_2d::default()); + //let resource_attach_backing = GpuCommand::ResourceAttachBacking(virtio_gpu_resource_attach_backing::default(), vec![1]); + let resource_detach_backing = + GpuCommand::ResourceDetachBacking(virtio_gpu_resource_detach_backing::default()); + let get_capset_info = GpuCommand::GetCapsetInfo(virtio_gpu_get_capset_info::default()); + let get_capset = GpuCommand::GetCapset(virtio_gpu_get_capset::default()); + let ctx_create = GpuCommand::CtxCreate(virtio_gpu_ctx_create::default()); + let ctx_destroy = GpuCommand::CtxDestroy(virtio_gpu_ctx_destroy::default()); + let ctx_attach_resource = GpuCommand::CtxAttachResource(virtio_gpu_ctx_resource::default()); + let ctx_detach_resource = GpuCommand::CtxDetachResource(virtio_gpu_ctx_resource::default()); + let resource_create_3d = + GpuCommand::ResourceCreate3d(virtio_gpu_resource_create_3d::default()); + let transfer_to_host_3d = + GpuCommand::TransferToHost3d(virtio_gpu_transfer_host_3d::default()); + let transfer_from_host_3d = + GpuCommand::TransferFromHost3d(virtio_gpu_transfer_host_3d::default()); + let cmd_submit_3d = GpuCommand::CmdSubmit3d { + cmd_data: Vec::new(), + fence_ids: Vec::new(), + }; + let resource_create_blob = + GpuCommand::ResourceCreateBlob(virtio_gpu_resource_create_blob::default()); + let resource_map_blob = + GpuCommand::ResourceMapBlob(virtio_gpu_resource_map_blob::default()); + let resource_unmap_blob = + GpuCommand::ResourceUnmapBlob(virtio_gpu_resource_unmap_blob::default()); + let update_cursor = GpuCommand::UpdateCursor(virtio_gpu_update_cursor::default()); + let move_cursor = GpuCommand::MoveCursor(virtio_gpu_update_cursor::default()); + let resource_assign_uuid = + GpuCommand::ResourceAssignUuid(virtio_gpu_resource_assign_uuid::default()); + + let expected_debug_output_display = "GetDisplayInfo"; + let expected_debug_output_edid = "GetEdid"; + let expected_debug_output_create2d = "ResourceCreate2d"; + let expected_debug_output_unref = "ResourceUnref"; + let expected_debug_output_scanout = "SetScanout"; + let expected_debug_output_scanout_blob = "SetScanoutBlob"; + let expected_debug_output_flush = "ResourceFlush"; + let expected_debug_output_transfer_to_host_2d = "TransferToHost2d"; + let expected_debug_output_detach_backing = "ResourceDetachBacking"; + let expected_debug_output_get_capset_info = "GetCapsetInfo"; + let expected_debug_output_get_capset = "GetCapset"; + let expected_debug_output_ctx_create = "CtxCreate"; + let expected_debug_output_ctx_destroy = "CtxDestroy"; + let expected_debug_output_ctx_attach_resource = "CtxAttachResource"; + let expected_debug_output_ctx_detach_resource = "CtxDetachResource"; + let expected_debug_output_resource_create_3d = "ResourceCreate3d"; + let expected_debug_output_transfer_to_host_3d = "TransferToHost3d"; + let expected_debug_output_transfer_from_host_3d = "TransferFromHost3d"; + let expected_debug_output_cmd_submit_3d = "CmdSubmit3d"; + let expected_debug_output_create_blob = "ResourceCreateBlob"; + let expected_debug_output_map_blob = "ResourceMapBlob"; + let expected_debug_output_unmap_blob = "ResourceUnmapBlob"; + let expected_debug_output_update_cursor = "UpdateCursor"; + let expected_debug_output_move_cursor = "MoveCursor"; + let expected_debug_output_assign_uuid = "ResourceAssignUuid"; + + assert_eq!( + format!("{:?}", get_display_info), + expected_debug_output_display + ); + assert_eq!(format!("{:?}", get_edid), expected_debug_output_edid); + assert_eq!( + format!("{:?}", resource_create_2d), + expected_debug_output_create2d + ); + assert_eq!(format!("{:?}", resource_unref), expected_debug_output_unref); + assert_eq!(format!("{:?}", set_scanout), expected_debug_output_scanout); + assert_eq!( + format!("{:?}", set_scanout_blob), + expected_debug_output_scanout_blob + ); + assert_eq!(format!("{:?}", resource_flush), expected_debug_output_flush); + assert_eq!( + format!("{:?}", transfer_to_host_2d), + expected_debug_output_transfer_to_host_2d + ); + assert_eq!( + format!("{:?}", resource_detach_backing), + expected_debug_output_detach_backing + ); + assert_eq!( + format!("{:?}", get_capset_info), + expected_debug_output_get_capset_info + ); + assert_eq!( + format!("{:?}", get_capset), + expected_debug_output_get_capset + ); + assert_eq!( + format!("{:?}", ctx_create), + expected_debug_output_ctx_create + ); + assert_eq!( + format!("{:?}", ctx_destroy), + expected_debug_output_ctx_destroy + ); + assert_eq!( + format!("{:?}", ctx_attach_resource), + expected_debug_output_ctx_attach_resource + ); + assert_eq!( + format!("{:?}", ctx_detach_resource), + expected_debug_output_ctx_detach_resource + ); + assert_eq!( + format!("{:?}", resource_create_3d), + expected_debug_output_resource_create_3d + ); + assert_eq!( + format!("{:?}", transfer_to_host_3d), + expected_debug_output_transfer_to_host_3d + ); + assert_eq!( + format!("{:?}", transfer_from_host_3d), + expected_debug_output_transfer_from_host_3d + ); + assert_eq!( + format!("{:?}", cmd_submit_3d), + expected_debug_output_cmd_submit_3d + ); + assert_eq!( + format!("{:?}", resource_create_blob), + expected_debug_output_create_blob + ); + assert_eq!( + format!("{:?}", resource_map_blob), + expected_debug_output_map_blob + ); + assert_eq!( + format!("{:?}", resource_unmap_blob), + expected_debug_output_unmap_blob + ); + assert_eq!( + format!("{:?}", update_cursor), + expected_debug_output_update_cursor + ); + assert_eq!( + format!("{:?}", move_cursor), + expected_debug_output_move_cursor + ); + assert_eq!( + format!("{:?}", resource_assign_uuid), + expected_debug_output_assign_uuid + ); + + let bytes = b"test_debug\0"; + let original = virtio_gpu_ctx_create { + debug_name: { + let mut debug_name = [0; 64]; + debug_name[..bytes.len()].copy_from_slice(bytes); + debug_name + }, + context_init: 0, + nlen: bytes.len() as u32, + }; + + let debug_string = format!("{:?}", original); + + assert_eq!( + debug_string, + "\"virtio_gpu_ctx_create\" { debug_name: \"test_debug\", context_init: 0 }" + ); + } + + #[test] + fn test_gpu_response_encode() { + let mem = GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 16384)]).unwrap(); + + let vq = MockSplitQueue::new(&mem, 8); + let desc_chain = vq + .build_desc_chain(&[Descriptor::new(0x1000, 8192, VRING_DESC_F_WRITE as u16, 0)]) + .unwrap(); + + let mut writer = desc_chain + .clone() + .writer(&mem) + .map_err(Error::CreateWriter) + .unwrap(); + + let resp = GpuResponse::OkNoData; + let resp_ok_nodata = GpuResponse::encode(&resp, 0, 0, 0, 0, &mut writer).unwrap(); + assert_eq!(resp_ok_nodata, 24); + + let resp = GpuResponse::OkDisplayInfo(vec![(0, 0, false)]); + let resp_display_info = GpuResponse::encode(&resp, 0, 0, 0, 0, &mut writer).unwrap(); + assert_eq!(resp_display_info, 408); + + let edid_data: Box<[u8]> = Box::new([0u8; 1024]); + let resp = GpuResponse::OkEdid { blob: edid_data }; + let resp_edid = GpuResponse::encode(&resp, 0, 0, 0, 0, &mut writer).unwrap(); + assert_eq!(resp_edid, 1056); + + let resp = GpuResponse::OkCapset(vec![]); + let resp_capset = GpuResponse::encode(&resp, 0, 0, 0, 0, &mut writer).unwrap(); + assert_eq!(resp_capset, 24); + + let resp = GpuResponse::OkCapsetInfo { + capset_id: 0, + version: 0, + size: 0, + }; + let resp_capset = GpuResponse::encode(&resp, 0, 0, 0, 0, &mut writer).unwrap(); + assert_eq!(resp_capset, 40); + + let resp = GpuResponse::OkResourcePlaneInfo { + format_modifier: 0, + plane_info: vec![], + }; + let resp_resource_planeinfo = GpuResponse::encode(&resp, 0, 0, 0, 0, &mut writer).unwrap(); + assert_eq!(resp_resource_planeinfo, 72); + + let resp = GpuResponse::OkResourceUuid { uuid: [0u8; 16] }; + let resp_resource_uuid = GpuResponse::encode(&resp, 0, 0, 0, 0, &mut writer).unwrap(); + assert_eq!(resp_resource_uuid, 40); + + let resp = GpuResponse::OkMapInfo { map_info: 0 }; + let resp_map_info = GpuResponse::encode(&resp, 0, 0, 0, 0, &mut writer).unwrap(); + assert_eq!(resp_map_info, 32); + } +} diff --git a/staging/vhost-device-gpu/src/virtio_gpu.rs b/staging/vhost-device-gpu/src/virtio_gpu.rs new file mode 100644 index 00000000..147af590 --- /dev/null +++ b/staging/vhost-device-gpu/src/virtio_gpu.rs @@ -0,0 +1,927 @@ +// Copyright 2024 Red Hat Inc +// Copyright 2019 The ChromiumOS Authors +// +// SPDX-License-Identifier: Apache-2.0 or BSD-3-Clause + +use log::{debug, error, trace}; +use std::{ + collections::BTreeMap, + io::IoSliceMut, + os::fd::FromRawFd, + result::Result, + sync::{Arc, Mutex}, +}; + +use libc::c_void; +use rutabaga_gfx::{ + ResourceCreate3D, ResourceCreateBlob, Rutabaga, RutabagaBuilder, RutabagaComponentType, + RutabagaFence, RutabagaFenceHandler, RutabagaIntoRawDescriptor, RutabagaIovec, Transfer3D, +}; +use vhost::vhost_user::{ + gpu_message::{ + VhostUserGpuCursorPos, VhostUserGpuCursorUpdate, VhostUserGpuEdidRequest, + VhostUserGpuScanout, VhostUserGpuUpdate, + }, + GpuBackend, +}; +use vhost_user_backend::{VringRwLock, VringT}; +use vm_memory::{GuestAddress, GuestMemory, GuestMemoryMmap, VolatileSlice}; +use vmm_sys_util::eventfd::EventFd; + +use crate::protocol::{ + virtio_gpu_rect, GpuResponse, GpuResponse::*, GpuResponsePlaneInfo, VirtioGpuResult, + VIRTIO_GPU_FLAG_INFO_RING_IDX, VIRTIO_GPU_MAX_SCANOUTS, +}; +use crate::{device::Error, GpuMode}; + +fn sglist_to_rutabaga_iovecs( + vecs: &[(GuestAddress, usize)], + mem: &GuestMemoryMmap, +) -> Result, ()> { + if vecs + .iter() + .any(|&(addr, len)| mem.get_slice(addr, len).is_err()) + { + return Err(()); + } + + let mut rutabaga_iovecs: Vec = Vec::new(); + for &(addr, len) in vecs { + let slice = mem.get_slice(addr, len).unwrap(); + rutabaga_iovecs.push(RutabagaIovec { + base: slice.ptr_guard_mut().as_ptr() as *mut c_void, + len, + }); + } + Ok(rutabaga_iovecs) +} + +#[derive(Default, Debug)] +pub struct Rectangle { + pub x: u32, + pub y: u32, + pub width: u32, + pub height: u32, +} + +impl From for Rectangle { + fn from(r: virtio_gpu_rect) -> Self { + Self { + x: r.x, + y: r.y, + width: r.width, + height: r.height, + } + } +} + +#[cfg_attr(test, mockall::automock)] +// We need to specify some lifetimes explicitly, for mockall::automock atribute to compile +#[allow(clippy::needless_lifetimes)] +pub trait VirtioGpu { + /// Uses the hypervisor to unmap the blob resource. + fn resource_unmap_blob(&mut self, resource_id: u32) -> VirtioGpuResult; + + /// Uses the hypervisor to map the rutabaga blob resource. + /// + /// When sandboxing is disabled, external_blob is unset and opaque fds are mapped by + /// rutabaga as ExternalMapping. + /// When sandboxing is enabled, external_blob is set and opaque fds must be mapped in the + /// hypervisor process by Vulkano using metadata provided by Rutabaga::vulkan_info(). + fn resource_map_blob(&mut self, resource_id: u32, offset: u64) -> VirtioGpuResult; + + /// Creates a blob resource using rutabaga. + fn resource_create_blob( + &mut self, + ctx_id: u32, + resource_id: u32, + resource_create_blob: ResourceCreateBlob, + vecs: Vec<(GuestAddress, usize)>, + mem: &GuestMemoryMmap, + ) -> VirtioGpuResult; + + fn process_fence( + &mut self, + ring: VirtioGpuRing, + fence_id: u64, + desc_index: u16, + len: u32, + ) -> bool; + + /// Creates a fence with the RutabagaFence that can be used to determine when the previous + /// command completed. + fn create_fence(&mut self, rutabaga_fence: RutabagaFence) -> VirtioGpuResult; + + /// Submits a command buffer to a rutabaga context. + fn submit_command( + &mut self, + ctx_id: u32, + commands: &mut [u8], + fence_ids: &[u64], + ) -> VirtioGpuResult; + + /// Detaches a resource from a rutabaga context. + fn context_detach_resource(&mut self, ctx_id: u32, resource_id: u32) -> VirtioGpuResult; + + /// Attaches a resource to a rutabaga context. + fn context_attach_resource(&mut self, ctx_id: u32, resource_id: u32) -> VirtioGpuResult; + + /// Destroys a rutabaga context. + fn destroy_context(&mut self, ctx_id: u32) -> VirtioGpuResult; + fn force_ctx_0(&self); + + /// Gets the list of supported display resolutions + fn display_info(&self) -> VirtioGpuResult; + + /// Gets the EDID for the specified scanout ID. If that scanout is not enabled, it would return + /// the EDID of a default display. + fn get_edid(&self, edid_req: VhostUserGpuEdidRequest) -> VirtioGpuResult; + + /// Sets the given resource id as the source of scanout to the display. + fn set_scanout( + &mut self, + scanout_id: u32, + resource_id: u32, + rect: Rectangle, + ) -> VirtioGpuResult; + + /// Creates a 3D resource with the given properties and resource_id. + fn resource_create_3d( + &mut self, + resource_id: u32, + resource_create_3d: ResourceCreate3D, + ) -> VirtioGpuResult; + + /// Releases guest kernel reference on the resource. + fn unref_resource(&mut self, resource_id: u32) -> VirtioGpuResult; + + /// If the resource is the scanout resource, flush it to the display. + fn flush_resource(&mut self, resource_id: u32, rect: Rectangle) -> VirtioGpuResult; + + /// Copies data to host resource from the attached iovecs. Can also be used to flush caches. + fn transfer_write( + &mut self, + ctx_id: u32, + resource_id: u32, + transfer: Transfer3D, + ) -> VirtioGpuResult; + + /// Copies data from the host resource to: + /// 1) To the optional volatile slice + /// 2) To the host resource's attached iovecs + /// + /// Can also be used to invalidate caches. + fn transfer_read<'a>( + &mut self, + ctx_id: u32, + resource_id: u32, + transfer: Transfer3D, + buf: Option>, + ) -> VirtioGpuResult; + + /// Attaches backing memory to the given resource, represented by a `Vec` of `(address, size)` + /// tuples in the guest's physical address space. Converts to RutabagaIovec from the memory + /// mapping. + fn attach_backing( + &mut self, + resource_id: u32, + mem: &GuestMemoryMmap, + vecs: Vec<(GuestAddress, usize)>, + ) -> VirtioGpuResult; + + /// Detaches any previously attached iovecs from the resource. + fn detach_backing(&mut self, resource_id: u32) -> VirtioGpuResult; + + /// Updates the cursor's memory to the given resource_id, and sets its position to the given + /// coordinates. + fn update_cursor( + &mut self, + resource_id: u32, + cursor_pos: VhostUserGpuCursorPos, + hot_x: u32, + hot_y: u32, + ) -> VirtioGpuResult; + + /// Moves the cursor's position to the given coordinates. + fn move_cursor(&mut self, resource_id: u32, cursor: VhostUserGpuCursorPos) -> VirtioGpuResult; + + /// Returns a uuid for the resource. + fn resource_assign_uuid(&self, resource_id: u32) -> VirtioGpuResult; + + /// Gets rutabaga's capset information associated with `index`. + fn get_capset_info(&self, index: u32) -> VirtioGpuResult; + + /// Gets a capset from rutabaga. + fn get_capset(&self, capset_id: u32, version: u32) -> VirtioGpuResult; + + /// Creates a rutabaga context. + fn create_context<'a>( + &mut self, + ctx_id: u32, + context_init: u32, + context_name: Option<&'a str>, + ) -> VirtioGpuResult; + + /// Get an EventFd descriptor, that signals when to call event_poll. + fn get_event_poll_fd(&self) -> Option; + + /// Polls the Rutabaga backend. + fn event_poll(&self); +} + +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)] +pub enum VirtioGpuRing { + Global, + ContextSpecific { ctx_id: u32, ring_idx: u8 }, +} + +struct FenceDescriptor { + ring: VirtioGpuRing, + fence_id: u64, + desc_index: u16, + len: u32, +} + +#[derive(Default)] +pub struct FenceState { + descs: Vec, + completed_fences: BTreeMap, +} + +#[derive(Copy, Clone, Debug, Default)] +struct AssociatedScanouts(u32); + +impl AssociatedScanouts { + fn enable(&mut self, scanout_id: u32) { + self.0 |= 1 << scanout_id; + } + + fn disable(&mut self, scanout_id: u32) { + self.0 ^= 1 << scanout_id; + } + + fn iter_enabled(self) -> impl Iterator { + (0..VIRTIO_GPU_MAX_SCANOUTS) + .filter(move |i| ((self.0 >> i) & 1) == 1) + .map(|n| n as u32) + } +} + +#[derive(Default, Copy, Clone)] +pub struct VirtioGpuResource { + id: u32, + width: u32, + height: u32, + /// Stores information about which scanouts are associated with the given resource. + /// Resource could be used for multiple scanouts (the displays are mirrored). + scanouts: AssociatedScanouts, +} + +impl VirtioGpuResource { + fn calculate_size(&self) -> Result { + let width = self.width as usize; + let height = self.height as usize; + let size = width + .checked_mul(height) + .ok_or("Multiplication of width and height overflowed")? + .checked_mul(READ_RESOURCE_BYTES_PER_PIXEL) + .ok_or("Multiplication of result and bytes_per_pixel overflowed")?; + + Ok(size) + } +} + +impl VirtioGpuResource { + /// Creates a new VirtioGpuResource with 2D/3D metadata + pub fn new(resource_id: u32, width: u32, height: u32) -> VirtioGpuResource { + VirtioGpuResource { + id: resource_id, + width, + height, + scanouts: Default::default(), + } + } +} + +pub struct VirtioGpuScanout { + resource_id: u32, +} + +pub struct RutabagaVirtioGpu { + pub(crate) rutabaga: Rutabaga, + pub(crate) gpu_backend: GpuBackend, + pub(crate) resources: BTreeMap, + pub(crate) fence_state: Arc>, + pub(crate) scanouts: [Option; VIRTIO_GPU_MAX_SCANOUTS], +} + +const READ_RESOURCE_BYTES_PER_PIXEL: usize = 4; + +impl RutabagaVirtioGpu { + // TODO: this depends on Rutabaga builder, so this will need to be handled at runtime eventually + pub const MAX_NUMBER_OF_CAPSETS: u32 = 3; + + fn create_fence_handler( + queue_ctl: VringRwLock, + fence_state: Arc>, + ) -> RutabagaFenceHandler { + RutabagaFenceHandler::new(move |completed_fence: RutabagaFence| { + debug!( + "XXX - fence called: id={}, ring_idx={}", + completed_fence.fence_id, completed_fence.ring_idx + ); + + let mut fence_state = fence_state.lock().unwrap(); + let mut i = 0; + + let ring = match completed_fence.flags & VIRTIO_GPU_FLAG_INFO_RING_IDX { + 0 => VirtioGpuRing::Global, + _ => VirtioGpuRing::ContextSpecific { + ctx_id: completed_fence.ctx_id, + ring_idx: completed_fence.ring_idx, + }, + }; + + while i < fence_state.descs.len() { + debug!("XXX - fence_id: {}", fence_state.descs[i].fence_id); + if fence_state.descs[i].ring == ring + && fence_state.descs[i].fence_id <= completed_fence.fence_id + { + let completed_desc = fence_state.descs.remove(i); + debug!( + "XXX - found fence: desc_index={}", + completed_desc.desc_index + ); + + queue_ctl + .add_used(completed_desc.desc_index, completed_desc.len) + .unwrap(); + + queue_ctl + .signal_used_queue() + .map_err(Error::NotificationFailed) + .unwrap(); + debug!("Notification sent"); + } else { + i += 1; + } + } + // Update the last completed fence for this context + fence_state + .completed_fences + .insert(ring, completed_fence.fence_id); + }) + } + + pub fn new(queue_ctl: &VringRwLock, renderer: GpuMode, gpu_backend: GpuBackend) -> Self { + let component = match renderer { + GpuMode::ModeVirglRenderer => RutabagaComponentType::VirglRenderer, + GpuMode::ModeGfxstream => RutabagaComponentType::Gfxstream, + }; + let builder = RutabagaBuilder::new(component, 0) + .set_use_egl(true) + .set_use_gles(true) + .set_use_glx(true) + .set_use_surfaceless(true) + .set_use_external_blob(true); + + let fence_state = Arc::new(Mutex::new(Default::default())); + let fence = Self::create_fence_handler(queue_ctl.clone(), fence_state.clone()); + let rutabaga = builder + .build(fence, None) + .expect("Rutabaga initialization failed!"); + + Self { + rutabaga, + gpu_backend, + resources: Default::default(), + fence_state, + scanouts: Default::default(), + } + } + + fn result_from_query(&mut self, resource_id: u32) -> GpuResponse { + let Ok(query) = self.rutabaga.query(resource_id) else { + return OkNoData; + }; + let mut plane_info = Vec::with_capacity(4); + for plane_index in 0..4 { + plane_info.push(GpuResponsePlaneInfo { + stride: query.strides[plane_index], + offset: query.offsets[plane_index], + }); + } + let format_modifier = query.modifier; + OkResourcePlaneInfo { + format_modifier, + plane_info, + } + } + + fn read_2d_resource( + &mut self, + resource: VirtioGpuResource, + output: &mut [u8], + ) -> Result<(), String> { + let minimal_buffer_size = resource.calculate_size()?; + assert!(output.len() >= minimal_buffer_size); + + let transfer = Transfer3D { + x: 0, + y: 0, + z: 0, + w: resource.width, + h: resource.height, + d: 1, + level: 0, + stride: resource.width * READ_RESOURCE_BYTES_PER_PIXEL as u32, + layer_stride: 0, + offset: 0, + }; + + // ctx_id 0 seems to be special, crosvm uses it for this purpose too + self.rutabaga + .transfer_read(0, resource.id, transfer, Some(IoSliceMut::new(output))) + .map_err(|e| format!("{e}"))?; + + Ok(()) + } +} + +impl VirtioGpu for RutabagaVirtioGpu { + fn force_ctx_0(&self) { + self.rutabaga.force_ctx_0() + } + + fn display_info(&self) -> VirtioGpuResult { + let backend_display_info = self.gpu_backend.get_display_info().map_err(|e| { + error!("Failed to get display info: {e:?}"); + ErrUnspec + })?; + + let display_info = backend_display_info + .pmodes + .iter() + .map(|display| (display.r.width, display.r.height, display.enabled == 1)) + .collect::>(); + + debug!("Displays: {:?}", display_info); + Ok(OkDisplayInfo(display_info)) + } + + fn get_edid(&self, edid_req: VhostUserGpuEdidRequest) -> VirtioGpuResult { + debug!("edid request: {edid_req:?}"); + let edid = self.gpu_backend.get_edid(&edid_req).map_err(|e| { + error!("Failed to get edid from frontend: {}", e); + ErrUnspec + })?; + + Ok(OkEdid { + blob: Box::from(&edid.edid[..edid.size as usize]), + }) + } + + fn set_scanout( + &mut self, + scanout_id: u32, + resource_id: u32, + rect: Rectangle, + ) -> VirtioGpuResult { + let scanout = self + .scanouts + .get_mut(scanout_id as usize) + .ok_or(ErrInvalidScanoutId)?; + + // If a resource is already associated with this scanout, make sure to disable this scanout for that resource + if let Some(resource_id) = scanout.as_ref().map(|scanout| scanout.resource_id) { + let resource = self + .resources + .get_mut(&resource_id) + .ok_or(ErrInvalidResourceId)?; + + resource.scanouts.disable(scanout_id); + } + + // Virtio spec: "The driver can use resource_id = 0 to disable a scanout." + if resource_id == 0 { + *scanout = None; + debug!("Disabling scanout scanout_id={scanout_id}"); + self.gpu_backend + .set_scanout(&VhostUserGpuScanout { + scanout_id, + width: 0, + height: 0, + }) + .map_err(|e| { + error!("Failed to set_scanout: {e:?}"); + ErrUnspec + })?; + return Ok(OkNoData); + } + + debug!("Enabling scanout scanout_id={scanout_id}, resource_id={resource_id}: {rect:?}"); + + // QEMU doesn't like (it lags) when we call set_scanout while the scanout is enabled + if scanout.is_none() { + self.gpu_backend + .set_scanout(&VhostUserGpuScanout { + scanout_id, + width: rect.width, + height: rect.height, + }) + .map_err(|e| { + error!("Failed to set_scanout: {e:?}"); + ErrUnspec + })?; + } + + let resource = self + .resources + .get_mut(&resource_id) + .ok_or(ErrInvalidResourceId)?; + + resource.scanouts.enable(scanout_id); + *scanout = Some(VirtioGpuScanout { resource_id }); + Ok(OkNoData) + } + + fn resource_create_3d( + &mut self, + resource_id: u32, + resource_create_3d: ResourceCreate3D, + ) -> VirtioGpuResult { + self.rutabaga + .resource_create_3d(resource_id, resource_create_3d)?; + + let resource = VirtioGpuResource::new( + resource_id, + resource_create_3d.width, + resource_create_3d.height, + ); + + debug_assert!( + !self.resources.contains_key(&resource_id), + "Resource ID {} already exists in the resources map.", + resource_id + ); + + // Rely on rutabaga to check for duplicate resource ids. + self.resources.insert(resource_id, resource); + Ok(self.result_from_query(resource_id)) + } + + fn unref_resource(&mut self, resource_id: u32) -> VirtioGpuResult { + self.rutabaga.unref_resource(resource_id)?; + Ok(OkNoData) + } + + /// If the resource is the scanout resource, flush it to the display. + fn flush_resource(&mut self, resource_id: u32, _rect: Rectangle) -> VirtioGpuResult { + if resource_id == 0 { + return Ok(OkNoData); + } + + let resource = *self + .resources + .get(&resource_id) + .ok_or(ErrInvalidResourceId)?; + + for scanout_id in resource.scanouts.iter_enabled() { + let resource_size = resource.calculate_size().map_err(|e| { + error!( + "Resource {id} size calculation failed: {e}", + id = resource.id + ); + ErrUnspec + })?; + + let mut data = vec![0; resource_size]; + + // Gfxstream doesn't support transfer_read for portion of the resource. So we always + // read the whole resource, even if the guest specified to flush only a portion of it. + // + // The function stream_renderer_transfer_read_iov seems to ignore the stride and + // transfer_box parameters and expects the provided buffer to fit the whole resource. + if let Err(e) = self.read_2d_resource(resource, &mut data) { + log::error!("Failed to read resource {resource_id} for scanout {scanout_id}: {e}"); + continue; + } + + self.gpu_backend + .update_scanout( + &VhostUserGpuUpdate { + scanout_id, + x: 0, + y: 0, + width: resource.width, + height: resource.height, + }, + &data, + ) + .map_err(|e| { + error!("Failed to update_scanout: {e:?}"); + ErrUnspec + })? + } + + Ok(OkNoData) + } + + fn transfer_write( + &mut self, + ctx_id: u32, + resource_id: u32, + transfer: Transfer3D, + ) -> VirtioGpuResult { + trace!("transfer_write ctx_id {ctx_id}, resource_id {resource_id}, {transfer:?}"); + + self.rutabaga + .transfer_write(ctx_id, resource_id, transfer)?; + Ok(OkNoData) + } + + fn transfer_read( + &mut self, + ctx_id: u32, + resource_id: u32, + transfer: Transfer3D, + buf: Option, + ) -> VirtioGpuResult { + let buf = buf.map(|vs| { + IoSliceMut::new( + // SAFETY: trivially safe + unsafe { std::slice::from_raw_parts_mut(vs.ptr_guard_mut().as_ptr(), vs.len()) }, + ) + }); + self.rutabaga + .transfer_read(ctx_id, resource_id, transfer, buf)?; + Ok(OkNoData) + } + + fn attach_backing( + &mut self, + resource_id: u32, + mem: &GuestMemoryMmap, + vecs: Vec<(GuestAddress, usize)>, + ) -> VirtioGpuResult { + let rutabaga_iovecs = sglist_to_rutabaga_iovecs(&vecs[..], mem).map_err(|_| ErrUnspec)?; + self.rutabaga.attach_backing(resource_id, rutabaga_iovecs)?; + Ok(OkNoData) + } + + fn detach_backing(&mut self, resource_id: u32) -> VirtioGpuResult { + self.rutabaga.detach_backing(resource_id)?; + Ok(OkNoData) + } + + fn update_cursor( + &mut self, + resource_id: u32, + cursor_pos: VhostUserGpuCursorPos, + hot_x: u32, + hot_y: u32, + ) -> VirtioGpuResult { + const CURSOR_WIDTH: u32 = 64; + const CURSOR_HEIGHT: u32 = 64; + + let mut data = Box::new( + [0; READ_RESOURCE_BYTES_PER_PIXEL * CURSOR_WIDTH as usize * CURSOR_HEIGHT as usize], + ); + + let cursor_resource = self + .resources + .get(&resource_id) + .ok_or(ErrInvalidResourceId)?; + + if cursor_resource.width != CURSOR_WIDTH || cursor_resource.height != CURSOR_HEIGHT { + error!("Cursor resource has invalid dimensions"); + return Err(ErrInvalidParameter); + } + + self.read_2d_resource(*cursor_resource, &mut data[..]) + .map_err(|e| { + error!("Failed to read resource of cursor: {e}"); + ErrUnspec + })?; + + let cursor_update = VhostUserGpuCursorUpdate { + pos: cursor_pos, + hot_x, + hot_y, + }; + + self.gpu_backend + .cursor_update(&cursor_update, &data) + .map_err(|e| { + error!("Failed to update cursor pos from frontend: {}", e); + ErrUnspec + })?; + + Ok(OkNoData) + } + + fn move_cursor(&mut self, resource_id: u32, cursor: VhostUserGpuCursorPos) -> VirtioGpuResult { + if resource_id == 0 { + self.gpu_backend.cursor_pos_hide(&cursor).map_err(|e| { + error!("Failed to set cursor pos from frontend: {}", e); + ErrUnspec + })?; + } else { + self.gpu_backend.cursor_pos(&cursor).map_err(|e| { + error!("Failed to set cursor pos from frontend: {}", e); + ErrUnspec + })?; + } + + Ok(OkNoData) + } + + fn resource_assign_uuid(&self, resource_id: u32) -> VirtioGpuResult { + if !self.resources.contains_key(&resource_id) { + return Err(ErrInvalidResourceId); + } + + // TODO(stevensd): use real uuids once the virtio wayland protocol is updated to + // handle more than 32 bits. For now, the virtwl driver knows that the uuid is + // actually just the resource id. + let mut uuid: [u8; 16] = [0; 16]; + for (idx, byte) in resource_id.to_be_bytes().iter().enumerate() { + uuid[12 + idx] = *byte; + } + Ok(OkResourceUuid { uuid }) + } + + fn get_capset_info(&self, index: u32) -> VirtioGpuResult { + let (capset_id, version, size) = self.rutabaga.get_capset_info(index)?; + Ok(OkCapsetInfo { + capset_id, + version, + size, + }) + } + + fn get_capset(&self, capset_id: u32, version: u32) -> VirtioGpuResult { + let capset = self.rutabaga.get_capset(capset_id, version)?; + Ok(OkCapset(capset)) + } + + fn create_context( + &mut self, + ctx_id: u32, + context_init: u32, + context_name: Option<&str>, + ) -> VirtioGpuResult { + self.rutabaga + .create_context(ctx_id, context_init, context_name)?; + Ok(OkNoData) + } + + fn destroy_context(&mut self, ctx_id: u32) -> VirtioGpuResult { + self.rutabaga.destroy_context(ctx_id)?; + Ok(OkNoData) + } + + fn context_attach_resource(&mut self, ctx_id: u32, resource_id: u32) -> VirtioGpuResult { + self.rutabaga.context_attach_resource(ctx_id, resource_id)?; + Ok(OkNoData) + } + + fn context_detach_resource(&mut self, ctx_id: u32, resource_id: u32) -> VirtioGpuResult { + self.rutabaga.context_detach_resource(ctx_id, resource_id)?; + Ok(OkNoData) + } + + fn submit_command( + &mut self, + ctx_id: u32, + commands: &mut [u8], + fence_ids: &[u64], + ) -> VirtioGpuResult { + self.rutabaga.submit_command(ctx_id, commands, fence_ids)?; + Ok(OkNoData) + } + + fn create_fence(&mut self, rutabaga_fence: RutabagaFence) -> VirtioGpuResult { + self.rutabaga.create_fence(rutabaga_fence)?; + Ok(OkNoData) + } + + fn process_fence( + &mut self, + ring: VirtioGpuRing, + fence_id: u64, + desc_index: u16, + len: u32, + ) -> bool { + // In case the fence is signaled immediately after creation, don't add a return + // FenceDescriptor. + let mut fence_state = self.fence_state.lock().unwrap(); + if fence_id > *fence_state.completed_fences.get(&ring).unwrap_or(&0) { + fence_state.descs.push(FenceDescriptor { + ring, + fence_id, + desc_index, + len, + }); + + false + } else { + true + } + } + + fn resource_create_blob( + &mut self, + _ctx_id: u32, + _resource_id: u32, + _resource_create_blob: ResourceCreateBlob, + _vecs: Vec<(GuestAddress, usize)>, + _mem: &GuestMemoryMmap, + ) -> VirtioGpuResult { + error!("Not implemented: resource_create_blob"); + Err(ErrUnspec) + } + + fn resource_map_blob(&mut self, _resource_id: u32, _offset: u64) -> VirtioGpuResult { + error!("Not implemented: resource_map_blob"); + Err(ErrUnspec) + } + + fn resource_unmap_blob(&mut self, _resource_id: u32) -> VirtioGpuResult { + error!("Not implemented: resource_unmap_blob"); + Err(ErrUnspec) + } + + fn get_event_poll_fd(&self) -> Option { + self.rutabaga.poll_descriptor().map(|fd| { + // SAFETY: Safe, the fd should be valid, because Rutabaga guarantees it. + // into_raw_descriptor() returns a RawFd and makes sure SafeDescriptor::drop doesn't run. + unsafe { EventFd::from_raw_fd(fd.into_raw_descriptor()) } + }) + } + + fn event_poll(&self) { + self.rutabaga.event_poll() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::os::unix::net::UnixStream; + use std::sync::{Arc, Mutex}; + + use assert_matches::assert_matches; + use rusty_fork::rusty_fork_test; + use rutabaga_gfx::{RutabagaBuilder, RutabagaComponentType, RutabagaHandler}; + + fn dummy_gpu_backend() -> GpuBackend { + let (_, backend) = UnixStream::pair().unwrap(); + GpuBackend::from_stream(backend) + } + + fn new_gpu() -> RutabagaVirtioGpu { + let rutabaga = RutabagaBuilder::new(RutabagaComponentType::VirglRenderer, 0) + .set_use_egl(true) + .set_use_gles(true) + .set_use_glx(true) + .set_use_egl(true) + .set_use_surfaceless(true) + .build(RutabagaHandler::new(|_| {}), None) + .unwrap(); + RutabagaVirtioGpu { + rutabaga, + gpu_backend: dummy_gpu_backend(), + resources: Default::default(), + fence_state: Arc::new(Mutex::new(Default::default())), + scanouts: Default::default(), + } + } + + rusty_fork_test! { + #[test] + fn test_gpu_capset() { + let virtio_gpu = new_gpu(); + + let capset_info = virtio_gpu.get_capset_info(0); + assert_matches!(capset_info, Ok(OkCapsetInfo { .. })); + + let Ok(OkCapsetInfo {capset_id, version, ..}) = capset_info else { + unreachable!("Response should have been checked by assert") + }; + + let capset_info = virtio_gpu.get_capset(capset_id, version); + assert_matches!(capset_info, Ok(OkCapset(_))); + } + + #[test] + fn test_gpu_submit_command_fails() { + let mut virtio_gpu = new_gpu(); + let mut cmd_buf = [0; 10]; + let fence_ids: Vec = Vec::with_capacity(0); + virtio_gpu + .submit_command(1, &mut cmd_buf[..], &fence_ids) + .unwrap_err(); + } + } +}