diff --git a/staging/Cargo.lock b/staging/Cargo.lock index 1c7b571b..8f614a29 100644 --- a/staging/Cargo.lock +++ b/staging/Cargo.lock @@ -96,6 +96,12 @@ version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + [[package]] name = "cfg-if" version = "1.0.0" @@ -169,6 +175,19 @@ dependencies = [ "regex", ] +[[package]] +name = "env_logger" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd405aab171cb85d6735e5c8d9db038c17d3ca007a4d2c25f337935c3d90580" +dependencies = [ + "humantime", + "is-terminal", + "log", + "regex", + "termcolor", +] + [[package]] name = "env_logger" version = "0.11.5" @@ -310,6 +329,17 @@ dependencies = [ "slab", ] +[[package]] +name = "getrandom" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + [[package]] name = "glob" version = "0.3.1" @@ -350,6 +380,17 @@ dependencies = [ "hashbrown", ] +[[package]] +name = "is-terminal" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f23ff5ef2b80d608d61efee834934d862cd92461afc0560dedf493e4c033738b" +dependencies = [ + "hermit-abi", + "libc", + "windows-sys", +] + [[package]] name = "is_terminal_polyfill" version = "1.70.1" @@ -380,6 +421,15 @@ version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" +[[package]] +name = "memoffset" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "488016bfae457b036d996092f6cb448677611ce4449e970ceaf42695203f218a" +dependencies = [ + "autocfg", +] + [[package]] name = "nix" version = "0.27.1" @@ -389,6 +439,7 @@ dependencies = [ "bitflags 2.6.0", "cfg-if", "libc", + "memoffset", ] [[package]] @@ -434,6 +485,18 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "pkg-config" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" + +[[package]] +name = "ppv-lite86" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" + [[package]] name = "proc-macro-crate" version = "3.1.0" @@ -461,6 +524,36 @@ dependencies = [ "proc-macro2", ] +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha", + "rand_core", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom", +] + [[package]] name = "regex" version = "1.10.5" @@ -496,6 +589,17 @@ version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba39f3699c378cd8970968dcbff9c43159ea4cfbd88d43c00b22f2ef10a435d2" +[[package]] +name = "remain" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46aef80f842736de545ada6ec65b81ee91504efd6853f4b96de7414c42ae7443" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "rstest" version = "0.21.0" @@ -548,6 +652,22 @@ dependencies = [ "windows-sys", ] +[[package]] +name = "rutabaga_gfx" +version = "0.1.3" +dependencies = [ + "anyhow", + "cfg-if", + "libc", + "log", + "nix", + "pkg-config", + "remain", + "thiserror", + "winapi", + "zerocopy 0.7.34", +] + [[package]] name = "semver" version = "1.0.23" @@ -592,6 +712,15 @@ dependencies = [ "windows-sys", ] +[[package]] +name = "termcolor" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" +dependencies = [ + "winapi-util", +] + [[package]] name = "thiserror" version = "1.0.63" @@ -641,6 +770,28 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" +[[package]] +name = "uuid" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314" +dependencies = [ + "getrandom", + "rand", + "uuid-macro-internal", +] + +[[package]] +name = "uuid-macro-internal" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee1cd046f83ea2c4e920d6ee9f7c3537ef928d75dce5d84a87c2c5d6b3999a3a" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "v4l2r" version = "0.0.1" @@ -666,6 +817,38 @@ dependencies = [ "vmm-sys-util", ] +[[package]] +name = "vhost" +version = "0.11.0" +source = "git+https://github.com/mtjhrc/vhost.git?branch=gpu-socket-final#b3611667743d7b789edb5685ac712482458353e8" +dependencies = [ + "bitflags 2.6.0", + "libc", + "uuid", + "vm-memory", + "vmm-sys-util", +] + +[[package]] +name = "vhost-device-gpu" +version = "0.1.0" +dependencies = [ + "assert_matches", + "clap", + "env_logger 0.10.2", + "libc", + "log", + "rutabaga_gfx", + "thiserror", + "vhost 0.11.0 (git+https://github.com/mtjhrc/vhost.git?branch=gpu-socket-final)", + "vhost-user-backend 0.15.0 (git+https://github.com/mtjhrc/vhost.git?branch=gpu-socket-final)", + "virtio-bindings", + "virtio-queue", + "vm-memory", + "vmm-sys-util", + "zerocopy 0.6.6", +] + [[package]] name = "vhost-device-video" version = "0.1.0" @@ -673,7 +856,7 @@ dependencies = [ "assert_matches", "bitflags 2.6.0", "clap", - "env_logger", + "env_logger 0.11.5", "epoll", "futures-executor", "libc", @@ -683,8 +866,8 @@ dependencies = [ "tempfile", "thiserror", "v4l2r", - "vhost", - "vhost-user-backend", + "vhost 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", + "vhost-user-backend 0.15.0 (registry+https://github.com/rust-lang/crates.io-index)", "virtio-bindings", "virtio-queue", "vm-memory", @@ -699,7 +882,21 @@ checksum = "1f0ffb1dd8e00a708a0e2c32d5efec5812953819888591fff9ff68236b8a5096" dependencies = [ "libc", "log", - "vhost", + "vhost 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", + "virtio-bindings", + "virtio-queue", + "vm-memory", + "vmm-sys-util", +] + +[[package]] +name = "vhost-user-backend" +version = "0.15.0" +source = "git+https://github.com/mtjhrc/vhost.git?branch=gpu-socket-final#547e12ea21d67fe1127255ff5064f9117810b304" +dependencies = [ + "libc", + "log", + "vhost 0.11.0 (git+https://github.com/mtjhrc/vhost.git?branch=gpu-socket-final)", "virtio-bindings", "virtio-queue", "vm-memory", @@ -748,6 +945,12 @@ dependencies = [ "libc", ] +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + [[package]] name = "winapi" version = "0.3.9" @@ -764,6 +967,15 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" +[[package]] +name = "winapi-util" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d4cc384e1e73b93bafa6fb4f1df8c41695c8a91cf9c4c64358067d15a7b6c6b" +dependencies = [ + "windows-sys", +] + [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" @@ -851,3 +1063,45 @@ checksum = "f593a95398737aeed53e489c785df13f3618e41dbcd6718c6addbf1395aa6876" dependencies = [ "memchr", ] + +[[package]] +name = "zerocopy" +version = "0.6.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "854e949ac82d619ee9a14c66a1b674ac730422372ccb759ce0c39cabcf2bf8e6" +dependencies = [ + "byteorder", + "zerocopy-derive 0.6.6", +] + +[[package]] +name = "zerocopy" +version = "0.7.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae87e3fcd617500e5d106f0380cf7b77f3c6092aae37191433159dda23cfb087" +dependencies = [ + "byteorder", + "zerocopy-derive 0.7.34", +] + +[[package]] +name = "zerocopy-derive" +version = "0.6.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "125139de3f6b9d625c39e2efdd73d41bdac468ccd556556440e322be0e1bbd91" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15e934569e47891f7d9411f1a451d947a60e000ab3bd24fbb970f000387d1b3b" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] diff --git a/staging/Cargo.toml b/staging/Cargo.toml index be199ad1..fc3b8af3 100644 --- a/staging/Cargo.toml +++ b/staging/Cargo.toml @@ -1,5 +1,6 @@ [workspace] resolver = "2" members = [ + "vhost-device-gpu", "vhost-device-video", ] diff --git a/staging/vhost-device-gpu/CHANGELOG.md b/staging/vhost-device-gpu/CHANGELOG.md new file mode 100644 index 00000000..7dc867d9 --- /dev/null +++ b/staging/vhost-device-gpu/CHANGELOG.md @@ -0,0 +1,14 @@ +# Changelog +## [Unreleased] + +### Added + +### Changed + +### Fixed + +### Deprecated + +## [0.1.0] + +First release \ No newline at end of file diff --git a/staging/vhost-device-gpu/Cargo.toml b/staging/vhost-device-gpu/Cargo.toml new file mode 100644 index 00000000..c69ddda5 --- /dev/null +++ b/staging/vhost-device-gpu/Cargo.toml @@ -0,0 +1,34 @@ +[package] +name = "vhost-device-gpu" +version = "0.1.0" +authors = ["Dorinda Bassey ", "Matej Hrica "] +description = "A virtio-gpu device using the vhost-user protocol." +repository = "https://github.com/rust-vmm/vhost-device" +readme = "README.md" +keywords = ["gpu", "vhost", "virt", "backend"] +license = "Apache-2.0 OR BSD-3-Clause" +edition = "2021" +publish = false + +[features] +xen = ["vm-memory/xen", "vhost/xen", "vhost-user-backend/xen"] + +[dependencies] +clap = { version = "4.4", features = ["derive"] } +env_logger = "0.10" +libc = "0.2" +log = "0.4" +rutabaga_gfx = { path = "rutabaga_gfx", features = ["gfxstream", "virgl_renderer"] } +thiserror = "1.0" +vhost = { git = "https://github.com/mtjhrc/vhost.git", package = "vhost", branch = "gpu-socket-final", features = ["vhost-user-backend"] } +vhost-user-backend = { git = "https://github.com/mtjhrc/vhost.git", package = "vhost-user-backend", branch = "gpu-socket-final", features = ["gpu-socket"] } +virtio-bindings = "0.2.2" +virtio-queue = "0.12.0" +vm-memory = "0.14.0" +vmm-sys-util = "0.12.1" +zerocopy = "0.6.3" + +[dev-dependencies] +assert_matches = "1.5" +virtio-queue = { version = "0.12", features = ["test-utils"] } +vm-memory = { version = "0.14.0", features = ["backend-mmap", "backend-atomic"] } diff --git a/staging/vhost-device-gpu/LICENSE-APACHE b/staging/vhost-device-gpu/LICENSE-APACHE new file mode 100644 index 00000000..1cd601d0 --- /dev/null +++ b/staging/vhost-device-gpu/LICENSE-APACHE @@ -0,0 +1 @@ +../../LICENSE-APACHE \ No newline at end of file diff --git a/staging/vhost-device-gpu/LICENSE-BSD-3-Clause b/staging/vhost-device-gpu/LICENSE-BSD-3-Clause new file mode 100644 index 00000000..a60f1af6 --- /dev/null +++ b/staging/vhost-device-gpu/LICENSE-BSD-3-Clause @@ -0,0 +1 @@ +../../LICENSE-BSD-3-Clause \ No newline at end of file diff --git a/staging/vhost-device-gpu/README.md b/staging/vhost-device-gpu/README.md new file mode 100644 index 00000000..0357de2c --- /dev/null +++ b/staging/vhost-device-gpu/README.md @@ -0,0 +1,67 @@ +# vhost-device-gpu - GPU emulation backend daemon + +## Synopsis +```shell + vhost-device-gpu --socket-path +``` + +## Description + A virtio-gpu device using the vhost-user protocol. + +## Options + +```text + -s, --socket-path + vhost-user Unix domain socket path + + -h, --help + Print help + + -V, --version + Print version +``` + +## Examples + +First start the daemon on the host machine: + +```shell +host# vhost-device-gpu --socket-path /tmp/gpu.socket +``` + +With QEMU, there are two device frontends you can use with this device. +You can either use `vhost-user-gpu-pci` or `vhost-user-vga`, which also +implements VGA, that allows you to see boot messages before the guest +initializes the GPU. You can also use different display outputs (for example +`gtk` or `dbus`). +By default, QEMU also adds another VGA output, use `-vga none` to make +sure it is disabled. + +1) Using `vhost-user-gpu-pci` Start QEMU with the following flags: + +```text +-chardev socket,id=vgpu,path=/tmp/gpu.socket \ +-device vhost-user-gpu-pci,chardev=vgpu,id=vgpu \ +-object memory-backend-memfd,share=on,id=mem0,size=4G, \ +-machine q35,memory-backend=mem0,accel=kvm \ +-display gtk,gl=on,show-cursor=on \ +-vga none +``` + +2) Using `vhost-user-vga` Start QEMU with the following flags: + +```text +-chardev socket,id=vgpu,path=/tmp/gpu.socket \ +-device vhost-user-vga,chardev=vgpu,id=vgpu \ +-object memory-backend-memfd,share=on,id=mem0,size=4G, \ +-machine q35,memory-backend=mem0,accel=kvm \ +-display gtk,gl=on,show-cursor=on \ +-vga none +``` + +## License + +This project is licensed under either of + +- [Apache License](http://www.apache.org/licenses/LICENSE-2.0), Version 2.0 +- [BSD-3-Clause License](https://opensource.org/licenses/BSD-3-Clause) diff --git a/staging/vhost-device-gpu/rutabaga_gfx/build.rs b/staging/vhost-device-gpu/rutabaga_gfx/build.rs index 6f3e0dfd..f1d76138 100644 --- a/staging/vhost-device-gpu/rutabaga_gfx/build.rs +++ b/staging/vhost-device-gpu/rutabaga_gfx/build.rs @@ -13,6 +13,11 @@ fn main() -> Result<(), pkg_config::Error> { pkg_config::Config::new().probe("epoxy")?; pkg_config::Config::new().probe("libdrm")?; pkg_config::Config::new().probe("virglrenderer")?; + pkg_config::Config::new().probe("gfxstream_backend")?; + pkg_config::Config::new().probe("aemu_base")?; + pkg_config::Config::new().probe("aemu_host_common")?; + pkg_config::Config::new().probe("aemu_logging")?; + pkg_config::Config::new().probe("aemu_snapshot")?; Ok(()) } diff --git a/staging/vhost-device-gpu/src/device.rs b/staging/vhost-device-gpu/src/device.rs new file mode 100644 index 00000000..d365987a --- /dev/null +++ b/staging/vhost-device-gpu/src/device.rs @@ -0,0 +1,1197 @@ +// vhost device Gpu +// +// Copyright 2024 RedHat +// +// SPDX-License-Identifier: Apache-2.0 or BSD-3-Clause + +use log::{debug, error, trace, warn}; +use std::{ + cell::RefCell, + io::{self, Result as IoResult}, + os::fd::AsRawFd, + sync::{self, Arc, Mutex}, +}; + +use rutabaga_gfx::{ + ResourceCreate3D, RutabagaFence, Transfer3D, RUTABAGA_PIPE_BIND_RENDER_TARGET, + RUTABAGA_PIPE_TEXTURE_2D, +}; +use thiserror::Error as ThisError; +use vhost::vhost_user::{ + gpu_message::{VhostUserGpuCursorPos, VhostUserGpuEdidRequest}, + message::{VhostUserProtocolFeatures, VhostUserVirtioFeatures}, + GpuBackend, +}; +use vhost_user_backend::{VhostUserBackend, VringEpollHandler, VringRwLock, VringT}; +use virtio_bindings::{ + bindings::{ + virtio_config::{VIRTIO_F_NOTIFY_ON_EMPTY, VIRTIO_F_RING_RESET, VIRTIO_F_VERSION_1}, + virtio_ring::{VIRTIO_RING_F_EVENT_IDX, VIRTIO_RING_F_INDIRECT_DESC}, + }, + virtio_gpu::{ + VIRTIO_GPU_F_CONTEXT_INIT, VIRTIO_GPU_F_EDID, VIRTIO_GPU_F_RESOURCE_BLOB, + VIRTIO_GPU_F_VIRGL, + }, +}; +use virtio_queue::{QueueOwnedT, Reader, Writer}; +use vm_memory::{ByteValued, GuestAddressSpace, GuestMemoryAtomic, GuestMemoryMmap, Le32}; +use vmm_sys_util::{ + epoll::EventSet, + eventfd::{EventFd, EFD_NONBLOCK}, +}; + +use crate::{ + protocol::{ + virtio_gpu_box, virtio_gpu_ctrl_hdr, virtio_gpu_ctx_create, virtio_gpu_ctx_resource, + virtio_gpu_cursor_pos, virtio_gpu_get_capset, virtio_gpu_get_capset_info, + virtio_gpu_get_edid, virtio_gpu_rect, virtio_gpu_resource_assign_uuid, + virtio_gpu_resource_attach_backing, virtio_gpu_resource_create_2d, + virtio_gpu_resource_create_3d, virtio_gpu_resource_detach_backing, + virtio_gpu_resource_flush, virtio_gpu_resource_unref, virtio_gpu_set_scanout, + virtio_gpu_transfer_host_3d, virtio_gpu_transfer_to_host_2d, virtio_gpu_update_cursor, + GpuCommand, GpuCommandDecodeError, + GpuResponse::{self, ErrUnspec}, + GpuResponseEncodeError, VirtioGpuConfig, VirtioGpuResult, CONTROL_QUEUE, CURSOR_QUEUE, + NUM_QUEUES, POLL_EVENT, QUEUE_SIZE, VIRTIO_GPU_FLAG_FENCE, VIRTIO_GPU_FLAG_INFO_RING_IDX, + VIRTIO_GPU_MAX_SCANOUTS, + }, + virtio_gpu::{RutabagaVirtioGpu, VirtioGpu, VirtioGpuRing}, + GpuConfig, GpuMode, +}; + +type Result = std::result::Result; + +#[derive(Debug, ThisError)] +pub enum Error { + #[error("Failed to handle event, didn't match EPOLLIN")] + HandleEventNotEpollIn, + #[error("Failed to handle unknown event")] + HandleEventUnknown, + #[error("Descriptor read failed")] + DescriptorReadFailed, + #[error("Descriptor write failed")] + DescriptorWriteFailed, + #[error("Invalid command type {0}")] + InvalidCommandType(u32), + #[error("Failed to send used queue notification: {0}")] + NotificationFailed(io::Error), + #[error("Failed to create new EventFd")] + EventFdFailed, + #[error("Failed to create an iterator over a descriptor chain: {0}")] + CreateIteratorDescChain(virtio_queue::Error), + #[error("Failed to create descriptor chain Reader: {0}")] + CreateReader(virtio_queue::Error), + #[error("Failed to create descriptor chain Writer: {0}")] + CreateWriter(virtio_queue::Error), + #[error("Failed to decode gpu command: {0}")] + GpuCommandDecode(GpuCommandDecodeError), + #[error("Failed to encode gpu response: {0}")] + GpuResponseEncode(GpuResponseEncodeError), + #[error("Failed add used chain to queue: {0}")] + QueueAddUsed(virtio_queue::Error), + #[error("Epoll handler not available: {0}")] + EpollHandler(String), + #[error("Failed register epoll listener: {0}")] + RegisterEpollListener(io::Error), +} + +impl From for io::Error { + fn from(e: Error) -> Self { + io::Error::new(io::ErrorKind::Other, e) + } +} + +struct VhostUserGpuBackendInner { + virtio_cfg: VirtioGpuConfig, + event_idx: bool, + gpu_backend: Option, + pub exit_event: EventFd, + mem: Option>, + renderer: GpuMode, +} + +pub struct VhostUserGpuBackend { + inner: Mutex, + // this uses sync::Weak to avoid a reference cycle + epoll_handler: Mutex>>>, + poll_event_fd: Mutex>, +} + +impl VhostUserGpuBackend { + pub fn new(gpu_config: GpuConfig) -> Result> { + log::trace!("VhostUserGpuBackend::new(config = {:?})", &gpu_config); + let inner = VhostUserGpuBackendInner { + virtio_cfg: VirtioGpuConfig { + events_read: 0.into(), + events_clear: 0.into(), + num_scanouts: Le32::from(VIRTIO_GPU_MAX_SCANOUTS as u32), + num_capsets: RutabagaVirtioGpu::MAX_NUMBER_OF_CAPSETS.into(), + }, + event_idx: false, + gpu_backend: None, + exit_event: EventFd::new(EFD_NONBLOCK).map_err(|_| Error::EventFdFailed)?, + mem: None, + renderer: gpu_config.get_renderer(), + }; + + Ok(Arc::new(Self { + inner: Mutex::new(inner), + epoll_handler: Mutex::new(sync::Weak::new()), + poll_event_fd: Mutex::new(None), + })) + } + + pub fn set_epoll_handler(&self, epoll_handlers: &[Arc>>]) { + // We only expect 1 thread to which we want to register all handlers + assert_eq!(epoll_handlers.len(), 1); + let mut handler = match self.epoll_handler.lock() { + Ok(h) => h, + Err(poisoned) => poisoned.into_inner(), + }; + *handler = Arc::downgrade(&epoll_handlers[0]); + } +} + +impl VhostUserGpuBackendInner { + fn process_gpu_command( + &mut self, + virtio_gpu: &mut RutabagaVirtioGpu, + mem: &GuestMemoryMmap, + hdr: virtio_gpu_ctrl_hdr, + cmd: GpuCommand, + ) -> VirtioGpuResult { + virtio_gpu.force_ctx_0(); + debug!("process_gpu_command: {cmd:?}"); + match cmd { + GpuCommand::GetDisplayInfo => { + if let Some(gpu_backend) = self.gpu_backend.as_mut() { + match gpu_backend.get_display_info() { + Ok(display_info) => { + let virtio_display = virtio_gpu.display_info(display_info); + debug!("Displays: {:?}", virtio_display); + Ok(GpuResponse::OkDisplayInfo(virtio_display)) + } + Err(err) => { + error!("Failed to get display info: {:?}", err); + Err(ErrUnspec) + } + } + } else { + error!("{cmd:?} Failed to get GPU backend"); + Err(ErrUnspec) + } + } + GpuCommand::GetEdid(virtio_gpu_get_edid { scanout, .. }) => { + let edid_req: VhostUserGpuEdidRequest = VhostUserGpuEdidRequest { + scanout_id: scanout, + }; + if let Some(gpu_backend) = self.gpu_backend.as_mut() { + virtio_gpu.get_edid(gpu_backend, edid_req) + } else { + error!("{cmd:?} Failed to get GPU backend"); + Err(ErrUnspec) + } + } + GpuCommand::ResourceCreate2d(virtio_gpu_resource_create_2d { + resource_id, + format, + width, + height, + }) => { + let resource_create_3d = ResourceCreate3D { + target: RUTABAGA_PIPE_TEXTURE_2D, + format, + bind: RUTABAGA_PIPE_BIND_RENDER_TARGET, + width, + height, + depth: 1, + array_size: 1, + last_level: 0, + nr_samples: 0, + flags: 0, + }; + + virtio_gpu.resource_create_3d(resource_id, resource_create_3d) + } + GpuCommand::ResourceUnref(virtio_gpu_resource_unref { resource_id, .. }) => { + virtio_gpu.unref_resource(resource_id) + } + GpuCommand::SetScanout(virtio_gpu_set_scanout { + r, + scanout_id, + resource_id, + }) => { + if let Some(gpu_backend) = self.gpu_backend.as_mut() { + virtio_gpu.set_scanout(gpu_backend, scanout_id, resource_id, r.into()) + } else { + error!("{cmd:?} Failed to get GPU backend"); + Err(ErrUnspec) + } + } + GpuCommand::ResourceFlush(virtio_gpu_resource_flush { resource_id, r, .. }) => { + if let Some(gpu_backend) = self.gpu_backend.as_mut() { + virtio_gpu.flush_resource(resource_id, gpu_backend, r.into()) + } else { + error!("{cmd:?} Failed to get GPU backend"); + Err(ErrUnspec) + } + } + GpuCommand::TransferToHost2d(virtio_gpu_transfer_to_host_2d { + resource_id, + r: + virtio_gpu_rect { + x, + y, + width, + height, + }, + offset, + .. + }) => { + let transfer = Transfer3D::new_2d(x, y, width, height, offset); + virtio_gpu.transfer_write(0, resource_id, transfer) + } + GpuCommand::ResourceAttachBacking( + virtio_gpu_resource_attach_backing { resource_id, .. }, + iovecs, + ) => virtio_gpu.attach_backing(resource_id, mem, iovecs), + GpuCommand::ResourceDetachBacking(virtio_gpu_resource_detach_backing { + resource_id, + .. + }) => virtio_gpu.detach_backing(resource_id), + GpuCommand::UpdateCursor(virtio_gpu_update_cursor { + pos: + virtio_gpu_cursor_pos { + scanout_id, x, y, .. + }, + resource_id, + hot_x, + hot_y, + .. + }) => { + if let Some(gpu_backend) = self.gpu_backend.as_mut() { + let cursor_pos = VhostUserGpuCursorPos { scanout_id, x, y }; + virtio_gpu.update_cursor(resource_id, gpu_backend, cursor_pos, hot_x, hot_y) + } else { + error!("{cmd:?} Failed to get GPU backend"); + Err(ErrUnspec) + } + } + GpuCommand::MoveCursor(virtio_gpu_update_cursor { + pos: + virtio_gpu_cursor_pos { + scanout_id, x, y, .. + }, + resource_id, + .. + }) => { + if let Some(gpu_backend) = self.gpu_backend.as_mut() { + let cursor = VhostUserGpuCursorPos { scanout_id, x, y }; + virtio_gpu.move_cursor(resource_id, gpu_backend, cursor) + } else { + error!("{cmd:?} Failed to get GPU backend"); + Err(ErrUnspec) + } + } + GpuCommand::ResourceAssignUuid(virtio_gpu_resource_assign_uuid { + resource_id, .. + }) => virtio_gpu.resource_assign_uuid(resource_id), + GpuCommand::GetCapsetInfo(virtio_gpu_get_capset_info { capset_index, .. }) => { + virtio_gpu.get_capset_info(capset_index) + } + GpuCommand::GetCapset(virtio_gpu_get_capset { + capset_id, + capset_version, + }) => virtio_gpu.get_capset(capset_id, capset_version), + + GpuCommand::CtxCreate(virtio_gpu_ctx_create { + context_init, + debug_name, + .. + }) => { + let context_name: Option = String::from_utf8(debug_name.to_vec()).ok(); + virtio_gpu.create_context(hdr.ctx_id, context_init, context_name.as_deref()) + } + GpuCommand::CtxDestroy(_info) => virtio_gpu.destroy_context(hdr.ctx_id), + GpuCommand::CtxAttachResource(virtio_gpu_ctx_resource { resource_id, .. }) => { + virtio_gpu.context_attach_resource(hdr.ctx_id, resource_id) + } + GpuCommand::CtxDetachResource(virtio_gpu_ctx_resource { resource_id, .. }) => { + virtio_gpu.context_detach_resource(hdr.ctx_id, resource_id) + } + GpuCommand::ResourceCreate3d(virtio_gpu_resource_create_3d { + resource_id, + target, + format, + bind, + width, + height, + depth, + array_size, + last_level, + nr_samples, + flags, + .. + }) => { + let resource_create_3d = ResourceCreate3D { + target, + format, + bind, + width, + height, + depth, + array_size, + last_level, + nr_samples, + flags, + }; + + virtio_gpu.resource_create_3d(resource_id, resource_create_3d) + } + GpuCommand::TransferToHost3d(virtio_gpu_transfer_host_3d { + box_: virtio_gpu_box { x, y, z, w, h, d }, + offset, + resource_id, + level, + stride, + layer_stride, + }) => { + let ctx_id = hdr.ctx_id; + + let transfer = Transfer3D { + x, + y, + z, + w, + h, + d, + level, + stride, + layer_stride, + offset, + }; + + virtio_gpu.transfer_write(ctx_id, resource_id, transfer) + } + GpuCommand::TransferFromHost3d(virtio_gpu_transfer_host_3d { + box_: virtio_gpu_box { x, y, z, w, h, d }, + offset, + resource_id, + level, + stride, + layer_stride, + }) => { + let ctx_id = hdr.ctx_id; + + let transfer = Transfer3D { + x, + y, + z, + w, + h, + d, + level, + stride, + layer_stride, + offset, + }; + + virtio_gpu.transfer_read(ctx_id, resource_id, transfer, None) + } + GpuCommand::CmdSubmit3d { + fence_ids, + mut cmd_data, + } => virtio_gpu.submit_command(hdr.ctx_id, &mut cmd_data, &fence_ids), + GpuCommand::ResourceCreateBlob(_info) => { + panic!("virtio_gpu: GpuCommand::ResourceCreateBlob unimplemented"); + } + GpuCommand::SetScanoutBlob(_info) => { + panic!("virtio_gpu: GpuCommand::SetScanoutBlob unimplemented"); + } + GpuCommand::ResourceMapBlob(_info) => { + panic!("virtio_gpu: GpuCommand::ResourceMapBlob unimplemented"); + } + GpuCommand::ResourceUnmapBlob(_info) => { + panic!("virtio_gpu: GpuCommand::ResourceUnmapBlob unimplemented"); + } + } + } + + fn process_queue_chain( + &mut self, + virtio_gpu: &mut RutabagaVirtioGpu, + vring: &VringRwLock, + head_index: u16, + reader: &mut Reader, + writer: &mut Writer, + signal_used_queue: &mut bool, + ) -> Result<()> { + let mut response = ErrUnspec; + let mem = self.mem.as_ref().unwrap().memory().into_inner(); + + let ctrl_hdr = match GpuCommand::decode(reader) { + Ok((ctrl_hdr, gpu_cmd)) => { + // TODO: consider having a method that return &'static str for logging purpose + let cmd_name = format!("{:?}", gpu_cmd); + let response_result = self.process_gpu_command(virtio_gpu, &mem, ctrl_hdr, gpu_cmd); + // Unwrap the response from inside Result and log information + response = match response_result { + Ok(response) => response, + Err(response) => { + debug!("GpuCommand {cmd_name} failed: {response:?}"); + response + } + }; + Some(ctrl_hdr) + } + Err(e) => { + warn!("Failed to decode GpuCommand: {e}"); + None + } + }; + + if writer.available_bytes() == 0 { + debug!("Command does not have descriptors for a response"); + vring.add_used(head_index, 0).map_err(Error::QueueAddUsed)?; + *signal_used_queue = true; + return Ok(()); + } + + let mut fence_id = 0; + let mut ctx_id = 0; + let mut flags = 0; + let mut ring_idx = 0; + + if let Some(ctrl_hdr) = ctrl_hdr { + if ctrl_hdr.flags & VIRTIO_GPU_FLAG_FENCE != 0 { + flags = ctrl_hdr.flags; + fence_id = ctrl_hdr.fence_id; + ctx_id = ctrl_hdr.ctx_id; + ring_idx = ctrl_hdr.ring_idx; + + let fence = RutabagaFence { + flags, + fence_id, + ctx_id, + ring_idx, + }; + if let Err(fence_response) = virtio_gpu.create_fence(fence) { + warn!("Failed to create fence: fence_id: {fence_id} fence_response: {fence_response}"); + response = fence_response; + } + } + } + + // Prepare the response now, even if it is going to wait until + // fence is complete. + let response_len = response + .encode(flags, fence_id, ctx_id, ring_idx, writer) + .map_err(Error::GpuResponseEncode)?; + + let mut add_to_queue = true; + if flags & VIRTIO_GPU_FLAG_FENCE != 0 { + let ring = match flags & VIRTIO_GPU_FLAG_INFO_RING_IDX { + 0 => VirtioGpuRing::Global, + _ => VirtioGpuRing::ContextSpecific { ctx_id, ring_idx }, + }; + debug!("Trying to process_fence for the command"); + add_to_queue = virtio_gpu.process_fence(ring, fence_id, head_index, response_len); + } + + if add_to_queue { + vring + .add_used(head_index, response_len) + .map_err(Error::QueueAddUsed)?; + trace!("add_used {}bytes", response_len); + *signal_used_queue = true; + } + Ok(()) + } + + /// Process the requests in the vring and dispatch replies + fn process_queue( + &mut self, + virtio_gpu: &mut RutabagaVirtioGpu, + vring: &VringRwLock, + ) -> Result<()> { + let mem = self.mem.as_ref().unwrap().memory().into_inner(); + let desc_chains: Vec<_> = vring + .get_mut() + .get_queue_mut() + .iter(mem.clone()) + .map_err(Error::CreateIteratorDescChain)? + .collect(); + + let mut signal_used_queue = false; + for desc_chain in desc_chains { + let head_index = desc_chain.head_index(); + let mut reader = desc_chain + .clone() + .reader(&mem) + .map_err(Error::CreateReader)?; + let mut writer = desc_chain.writer(&mem).map_err(Error::CreateWriter)?; + + self.process_queue_chain( + virtio_gpu, + vring, + head_index, + &mut reader, + &mut writer, + &mut signal_used_queue, + )?; + } + + if signal_used_queue { + debug!("Notifying used queue"); + vring + .signal_used_queue() + .map_err(Error::NotificationFailed)?; + } + debug!("Processing control queue finished"); + + Ok(()) + } + + fn handle_event( + &mut self, + device_event: u16, + virtio_gpu: &mut RutabagaVirtioGpu, + vrings: &[VringRwLock], + ) -> IoResult<()> { + match device_event { + CONTROL_QUEUE | CURSOR_QUEUE => { + let vring = &vrings + .get(device_event as usize) + .ok_or_else(|| Error::HandleEventUnknown)?; + if self.event_idx { + // vm-virtio's Queue implementation only checks avail_index + // once, so to properly support EVENT_IDX we need to keep + // calling process_queue() until it stops finding new + // requests on the queue. + loop { + vring.disable_notification().unwrap(); + self.process_queue(virtio_gpu, vring)?; + if !vring.enable_notification().unwrap() { + break; + } + } + } else { + // Without EVENT_IDX, a single call is enough. + self.process_queue(virtio_gpu, vring)?; + } + } + POLL_EVENT => { + trace!("Handling POLL_EVENT"); + virtio_gpu.event_poll() + } + _ => { + warn!("unhandled device_event: {}", device_event); + return Err(Error::HandleEventUnknown.into()); + } + } + + Ok(()) + } + + fn lazy_init_and_handle_event( + &mut self, + device_event: u16, + evset: EventSet, + vrings: &[VringRwLock], + _thread_id: usize, + ) -> IoResult> { + // We use thread_local here because it is the easiest way to handle VirtioGpu being !Send + thread_local! { + static VIRTIO_GPU_REF: RefCell> = const { RefCell::new(None) }; + } + + debug!("Handle event called"); + if evset != EventSet::IN { + return Err(Error::HandleEventNotEpollIn.into()); + }; + + let mut event_poll_fd = None; + VIRTIO_GPU_REF.with_borrow_mut(|maybe_virtio_gpu| { + // Lazy initializes the virtio_gpu + let virtio_gpu = maybe_virtio_gpu.get_or_insert_with(|| { + // We currently pass the CONTROL_QUEUE vring to RutabagaVirtioGpu, because we only + // expect to process fences for that queue. + let control_vring = &vrings[CONTROL_QUEUE as usize]; + + // VirtioGpu::new can be called once per process (otherwise it panics), + // so if somehow another thread accidentally wants to create another gpu here, + // it will panic anyway + let virtio_gpu = RutabagaVirtioGpu::new(control_vring, self.renderer); + event_poll_fd = virtio_gpu.get_event_poll_fd(); + virtio_gpu + }); + + self.handle_event(device_event, virtio_gpu, vrings) + })?; + + Ok(event_poll_fd) + } + + fn get_config(&self, offset: u32, size: u32) -> Vec { + let offset = offset as usize; + let size = size as usize; + + let buf = self.virtio_cfg.as_slice(); + + if offset + size > buf.len() { + return Vec::new(); + } + + buf[offset..offset + size].to_vec() + } +} + +/// VhostUserBackend trait methods +impl VhostUserBackend for VhostUserGpuBackend { + type Vring = VringRwLock; + type Bitmap = (); + + fn num_queues(&self) -> usize { + debug!("Num queues called"); + NUM_QUEUES + } + + fn max_queue_size(&self) -> usize { + debug!("Max queues called"); + QUEUE_SIZE + } + + fn features(&self) -> u64 { + 1 << VIRTIO_F_VERSION_1 + | 1 << VIRTIO_F_RING_RESET + | 1 << VIRTIO_F_NOTIFY_ON_EMPTY + | 1 << VIRTIO_RING_F_INDIRECT_DESC + | 1 << VIRTIO_RING_F_EVENT_IDX + | 1 << VIRTIO_GPU_F_VIRGL + | 1 << VIRTIO_GPU_F_EDID + | 1 << VIRTIO_GPU_F_RESOURCE_BLOB + | 1 << VIRTIO_GPU_F_CONTEXT_INIT + | VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits() + } + + fn protocol_features(&self) -> VhostUserProtocolFeatures { + debug!("Protocol features called"); + VhostUserProtocolFeatures::CONFIG | VhostUserProtocolFeatures::MQ + } + + fn set_event_idx(&self, enabled: bool) { + self.inner.lock().unwrap().event_idx = enabled; + debug!("Event idx set to: {}", enabled); + } + + fn update_memory(&self, mem: GuestMemoryAtomic) -> IoResult<()> { + debug!("Update memory called"); + self.inner.lock().unwrap().mem = Some(mem); + Ok(()) + } + + fn set_gpu_socket(&self, backend: GpuBackend) { + self.inner.lock().unwrap().gpu_backend = Some(backend); + } + + fn get_config(&self, offset: u32, size: u32) -> Vec { + self.inner.lock().unwrap().get_config(offset, size) + } + + fn exit_event(&self, _thread_index: usize) -> Option { + self.inner.lock().unwrap().exit_event.try_clone().ok() + } + + fn handle_event( + &self, + device_event: u16, + evset: EventSet, + vrings: &[Self::Vring], + thread_id: usize, + ) -> IoResult<()> { + let poll_event_fd = self.inner.lock().unwrap().lazy_init_and_handle_event( + device_event, + evset, + vrings, + thread_id, + )?; + + if let Some(poll_event_fd) = poll_event_fd { + let epoll_handler = match self.epoll_handler.lock() { + Ok(h) => h, + Err(poisoned) => poisoned.into_inner(), + }; + let epoll_handler = match epoll_handler.upgrade() { + Some(handler) => handler, + None => { + return Err( + Error::EpollHandler("Failed to upgrade epoll handler".to_string()).into(), + ); + } + }; + epoll_handler + .register_listener(poll_event_fd.as_raw_fd(), EventSet::IN, POLL_EVENT as u64) + .map_err(Error::RegisterEpollListener)?; + debug!("Registered POLL_EVENT on fd: {}", poll_event_fd.as_raw_fd()); + // store the fd, so it is not closed after exiting this scope + self.poll_event_fd.lock().unwrap().replace(poll_event_fd); + } + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::protocol::*; + use rutabaga_gfx::{RutabagaBuilder, RutabagaComponentType, RutabagaHandler}; + use std::{ + collections::BTreeMap, + mem::size_of, + sync::{Arc, Mutex}, + }; + use vhost_user_backend::{VringRwLock, VringT}; + use virtio_bindings::bindings::virtio_ring::{VRING_DESC_F_NEXT, VRING_DESC_F_WRITE}; + use virtio_queue::{mock::MockSplitQueue, Descriptor, DescriptorChain, Queue}; + use vm_memory::{ + Address, ByteValued, Bytes, GuestAddress, GuestMemoryAtomic, GuestMemoryLoadGuard, + GuestMemoryMmap, + }; + + type GpuDescriptorChain = DescriptorChain>>; + + const SOCKET_PATH: &str = "vgpu.socket"; + + #[derive(Copy, Clone, Default)] + #[repr(C)] + struct VirtioGpuOutHdr { + a: u16, + b: u16, + c: u32, + } + + // SAFETY: The layout of the structure is fixed and can be initialized by + // reading its content from byte array. + unsafe impl ByteValued for VirtioGpuOutHdr {} + + #[derive(Copy, Clone, Default)] + #[repr(C)] + struct VirtioGpuInHdr { + d: u8, + } + + // SAFETY: The layout of the structure is fixed and can be initialized by + // reading its content from byte array. + unsafe impl ByteValued for VirtioGpuInHdr {} + + fn init() -> ( + Arc, + GuestMemoryAtomic, + VringRwLock, + ) { + let backend = VhostUserGpuBackend::new(GpuConfig::new( + SOCKET_PATH.into(), + GpuMode::ModeVirglRenderer, + )) + .unwrap(); + let mem = GuestMemoryAtomic::new( + GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x1000)]).unwrap(), + ); + let vring = VringRwLock::new(mem.clone(), 16).unwrap(); + vring.set_queue_info(0x100, 0x200, 0x300).unwrap(); + vring.set_queue_ready(true); + + (backend, mem, vring) + } + + // Prepares a single chain of descriptors + fn prepare_descriptors( + mut next_addr: u64, + mem: &GuestMemoryLoadGuard>, + buf: &mut Vec, + cmd_type: u32, + ) -> Vec { + let mut descriptors = Vec::new(); + let mut index = 0; + + // Gpu header descriptor + let ctrl_hdr = virtio_gpu_ctrl_hdr { + type_: cmd_type, + ..virtio_gpu_ctrl_hdr::default() + }; + + let desc_out = Descriptor::new( + next_addr, + size_of::() as u32, + VRING_DESC_F_NEXT as u16, + index + 1, + ); + next_addr += desc_out.len() as u64; + index += 1; + + mem.write_obj::(ctrl_hdr, desc_out.addr()) + .unwrap(); + descriptors.push(desc_out); + + // Buf descriptor: optional + if !buf.is_empty() { + let desc_buf = Descriptor::new( + next_addr, + buf.len() as u32, + (VRING_DESC_F_WRITE | VRING_DESC_F_NEXT) as u16, + index + 1, + ); + next_addr += desc_buf.len() as u64; + + mem.write(buf, desc_buf.addr()).unwrap(); + descriptors.push(desc_buf); + } + + // In response descriptor + let desc_in = Descriptor::new( + next_addr, + size_of::() as u32, + VRING_DESC_F_WRITE as u16, + 0, + ); + descriptors.push(desc_in); + descriptors + } + + // Prepares a single chain of descriptors + fn prepare_desc_chain( + buf: &mut Vec, + cmd_type: u32, + ) -> (Arc, VringRwLock) { + let (backend, mem, vring) = init(); + let mem_handle = mem.memory(); + let vq = MockSplitQueue::new(&*mem_handle, 16); + let next_addr = vq.desc_table().total_size() + 0x100; + + let descriptors = prepare_descriptors(next_addr, &mem_handle, buf, cmd_type); + + vq.build_desc_chain(&descriptors).unwrap(); + + // Put the descriptor index 0 in the first available ring position. + mem_handle + .write_obj(0u16, vq.avail_addr().unchecked_add(4)) + .unwrap(); + + // Set `avail_idx` to 1. + mem_handle + .write_obj(1u16, vq.avail_addr().unchecked_add(2)) + .unwrap(); + + vring.set_queue_size(16); + vring + .set_queue_info(vq.desc_table_addr().0, vq.avail_addr().0, vq.used_addr().0) + .unwrap(); + vring.set_queue_ready(true); + + backend.update_memory(mem).unwrap(); + + (backend, vring) + } + + // Prepares a chain of descriptors + fn prepare_desc_chains( + mem: &GuestMemoryAtomic, + buf: &mut Vec, + cmd_type: u32, + ) -> GpuDescriptorChain { + let mem_handle = mem.memory(); + let vq = MockSplitQueue::new(&*mem_handle, 16); + let next_addr = vq.desc_table().total_size() + 0x100; + + let descriptors = prepare_descriptors(next_addr, &mem_handle, buf, cmd_type); + + for (idx, desc) in descriptors.iter().enumerate() { + vq.desc_table().store(idx as u16, *desc).unwrap(); + } + + // Put the descriptor index 0 in the first available ring position. + mem_handle + .write_obj(0u16, vq.avail_addr().unchecked_add(4)) + .unwrap(); + + // Set `avail_idx` to 1. + mem_handle + .write_obj(1u16, vq.avail_addr().unchecked_add(2)) + .unwrap(); + + // Create descriptor chain from pre-filled memory + vq.create_queue::() + .unwrap() + .iter(mem_handle) + .unwrap() + .next() + .unwrap() + } + + fn new_2d() -> RutabagaVirtioGpu { + let rutabaga = RutabagaBuilder::new(RutabagaComponentType::Rutabaga2D, 0) + .build(RutabagaHandler::new(|_| {}), None) + .unwrap(); + RutabagaVirtioGpu { + rutabaga, + resources: BTreeMap::default(), + fence_state: Arc::new(Mutex::new(Default::default())), + scanouts: Default::default(), + } + } + + #[test] + fn test_process_queue_chain() { + let (backend, mem, _) = init(); + backend.update_memory(mem.clone()).unwrap(); + let mut backend_inner = backend.inner.lock().unwrap(); + + let vring = VringRwLock::new(mem.clone(), 0x1000).unwrap(); + vring.set_queue_info(0x100, 0x200, 0x300).unwrap(); + vring.set_queue_ready(true); + + let mut buf: Vec = vec![0; 30]; + let command_types = [ + VIRTIO_GPU_CMD_RESOURCE_CREATE_2D, + VIRTIO_GPU_CMD_RESOURCE_UNREF, + VIRTIO_GPU_CMD_SET_SCANOUT, + VIRTIO_GPU_CMD_SET_SCANOUT_BLOB, + VIRTIO_GPU_CMD_RESOURCE_FLUSH, + VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D, + VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING, + VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING, + VIRTIO_GPU_CMD_GET_CAPSET, + VIRTIO_GPU_CMD_GET_CAPSET_INFO, + VIRTIO_GPU_CMD_GET_EDID, + VIRTIO_GPU_CMD_CTX_CREATE, + VIRTIO_GPU_CMD_CTX_DESTROY, + VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE, + VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE, + VIRTIO_GPU_CMD_RESOURCE_CREATE_3D, + VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D, + VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D, + VIRTIO_GPU_CMD_SUBMIT_3D, + VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB, + VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB, + VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB, + VIRTIO_GPU_CMD_UPDATE_CURSOR, + VIRTIO_GPU_CMD_MOVE_CURSOR, + VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID, + ]; + for cmd_type in command_types { + let desc_chain = prepare_desc_chains(&mem, &mut buf, cmd_type); + let mem = mem.memory().into_inner(); + + let mut reader = desc_chain + .clone() + .reader(&mem) + .map_err(Error::CreateReader) + .unwrap(); + let mut writer = desc_chain + .clone() + .writer(&mem) + .map_err(Error::CreateWriter) + .unwrap(); + + let mut virtio_gpu = new_2d(); + let mut signal_used_queue = true; + + backend_inner + .process_queue_chain( + &mut virtio_gpu, + &vring, + desc_chain.head_index(), + &mut reader, + &mut writer, + &mut signal_used_queue, + ) + .unwrap(); + } + } + + #[test] + fn test_process_queue() { + // Test process_queue functionality + let mut buf: Vec = vec![0; 30]; + let (backend, vring) = prepare_desc_chain(&mut buf, 0); + let mut backend_inner = backend.inner.lock().unwrap(); + + let mut virtio_gpu = new_2d(); + backend_inner + .process_queue(&mut virtio_gpu, &vring) + .unwrap(); + } + + #[test] + #[ignore = "This test needs to modified to mock GpuBackend"] + fn test_process_gpu_command() { + let (backend, mem, _) = init(); + let mut backend_inner = backend.inner.lock().unwrap(); + + backend_inner.mem = Some(mem.clone()); + let mem = mem.memory().into_inner(); + let mut virtio_gpu = new_2d(); + let hdr = virtio_gpu_ctrl_hdr::default(); + let gpu_cmd = [ + GpuCommand::ResourceCreate2d(virtio_gpu_resource_create_2d::default()), + GpuCommand::ResourceUnref(virtio_gpu_resource_unref::default()), + GpuCommand::ResourceFlush(virtio_gpu_resource_flush::default()), + GpuCommand::GetCapset(virtio_gpu_get_capset::default()), + GpuCommand::ResourceCreate3d(virtio_gpu_resource_create_3d::default()), + ]; + for cmd in gpu_cmd { + backend_inner + .process_gpu_command(&mut virtio_gpu, &mem, hdr, cmd) + .unwrap(); + } + } + + #[test] + fn test_process_gpu_command_failure() { + let (backend, mem, _) = init(); + let mut backend_inner = backend.inner.lock().unwrap(); + backend_inner.mem = Some(mem.clone()); + + let mem = mem.memory().into_inner(); + let mut virtio_gpu = new_2d(); + let hdr = virtio_gpu_ctrl_hdr::default(); + let gpu_cmd = [ + GpuCommand::TransferToHost2d(virtio_gpu_transfer_to_host_2d::default()), + GpuCommand::TransferToHost3d(virtio_gpu_transfer_host_3d::default()), + GpuCommand::ResourceDetachBacking(virtio_gpu_resource_detach_backing::default()), + GpuCommand::GetCapsetInfo(virtio_gpu_get_capset_info::default()), + GpuCommand::CtxCreate(virtio_gpu_ctx_create::default()), + GpuCommand::CtxAttachResource(virtio_gpu_ctx_resource::default()), + GpuCommand::CtxDetachResource(virtio_gpu_ctx_resource::default()), + GpuCommand::CtxDestroy(virtio_gpu_ctx_destroy::default()), + GpuCommand::ResourceAssignUuid(virtio_gpu_resource_assign_uuid::default()), + GpuCommand::ResourceAttachBacking( + virtio_gpu_resource_attach_backing::default(), + [(GuestAddress(0), 0x1000)].to_vec(), + ), + GpuCommand::CmdSubmit3d { + cmd_data: Vec::new(), + fence_ids: Vec::new(), + }, + ]; + for cmd in gpu_cmd { + backend_inner + .process_gpu_command(&mut virtio_gpu, &mem, hdr, cmd) + .unwrap_err(); + } + } + + #[test] + fn test_verify_backend() { + let gpu_config = GpuConfig::new(SOCKET_PATH.into(), GpuMode::ModeVirglRenderer); + let backend = VhostUserGpuBackend::new(gpu_config).unwrap(); + + assert_eq!(backend.num_queues(), NUM_QUEUES); + assert_eq!(backend.max_queue_size(), QUEUE_SIZE); + assert_eq!(backend.features(), 0x1017100001B); + assert_eq!( + backend.protocol_features(), + VhostUserProtocolFeatures::CONFIG | VhostUserProtocolFeatures::MQ + ); + assert_eq!(backend.queues_per_thread(), vec![0xffff_ffff]); + assert_eq!(backend.get_config(0, 0), vec![]); + + backend.set_event_idx(true); + assert!(backend.inner.lock().unwrap().event_idx); + + assert!(backend.exit_event(0).is_some()); + + let mem = GuestMemoryAtomic::new( + GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x1000)]).unwrap(), + ); + backend.update_memory(mem.clone()).unwrap(); + + let vring = VringRwLock::new(mem, 0x1000).unwrap(); + vring.set_queue_info(0x100, 0x200, 0x300).unwrap(); + vring.set_queue_ready(true); + + assert_eq!( + backend + .handle_event(0, EventSet::OUT, &[vring.clone()], 0) + .unwrap_err() + .kind(), + io::ErrorKind::Other + ); + + assert_eq!( + backend + .handle_event(1, EventSet::IN, &[vring.clone()], 0) + .unwrap_err() + .kind(), + io::ErrorKind::Other + ); + + // Hit the loop part + backend.set_event_idx(true); + backend + .handle_event(0, EventSet::IN, &[vring.clone()], 0) + .unwrap(); + + // Hit the non-loop part + backend.set_event_idx(false); + backend.handle_event(0, EventSet::IN, &[vring], 0).unwrap(); + } + + #[test] + fn test_gpu_command_encode() { + let (backend, mem, _) = init(); + backend.update_memory(mem.clone()).unwrap(); + + let mut buf: Vec = vec![0; 2048]; + let desc_chain = prepare_desc_chains(&mem, &mut buf, 0); + + let mem = mem.memory(); + + let mut writer = desc_chain + .clone() + .writer(&mem) + .map_err(Error::CreateWriter) + .unwrap(); + + let resp = GpuResponse::OkNoData; + let resp_ok_nodata = GpuResponse::encode(&resp, 0, 0, 0, 0, &mut writer).unwrap(); + assert_eq!(resp_ok_nodata, 24); + + let resp = GpuResponse::OkDisplayInfo(vec![(0, 0, false)]); + let resp_display_info = GpuResponse::encode(&resp, 0, 0, 0, 0, &mut writer).unwrap(); + assert_eq!(resp_display_info, 408); + + let edid_data: Box<[u8]> = Box::new([0u8; 1024]); + let resp = GpuResponse::OkEdid { blob: edid_data }; + let resp_edid = GpuResponse::encode(&resp, 0, 0, 0, 0, &mut writer).unwrap(); + assert_eq!(resp_edid, 1056); + + let resp = GpuResponse::OkCapset(vec![]); + let resp_capset = GpuResponse::encode(&resp, 0, 0, 0, 0, &mut writer).unwrap(); + assert_eq!(resp_capset, 24); + + let resp = GpuResponse::OkCapsetInfo { + capset_id: 0, + version: 0, + size: 0, + }; + let resp_capset = GpuResponse::encode(&resp, 0, 0, 0, 0, &mut writer).unwrap(); + assert_eq!(resp_capset, 40); + + let resp = GpuResponse::OkResourcePlaneInfo { + format_modifier: 0, + plane_info: vec![], + }; + let resp_resource_planeinfo = GpuResponse::encode(&resp, 0, 0, 0, 0, &mut writer).unwrap(); + assert_eq!(resp_resource_planeinfo, 72); + + let resp = GpuResponse::OkResourceUuid { uuid: [0u8; 16] }; + let resp_resource_uuid = GpuResponse::encode(&resp, 0, 0, 0, 0, &mut writer).unwrap(); + assert_eq!(resp_resource_uuid, 40); + + let resp = GpuResponse::OkMapInfo { map_info: 0 }; + let resp_map_info = GpuResponse::encode(&resp, 0, 0, 0, 0, &mut writer).unwrap(); + assert_eq!(resp_map_info, 32); + } +} diff --git a/staging/vhost-device-gpu/src/lib.rs b/staging/vhost-device-gpu/src/lib.rs new file mode 100644 index 00000000..ef5242c4 --- /dev/null +++ b/staging/vhost-device-gpu/src/lib.rs @@ -0,0 +1,89 @@ +// Copyright 2024 Red Hat Inc +// +// SPDX-License-Identifier: Apache-2.0 or BSD-3-Clause + +pub mod device; +pub mod protocol; +pub mod virtio_gpu; + +use std::path::PathBuf; + +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum GpuMode { + ModeVirglRenderer, + ModeGfxstream, +} + +#[derive(Debug, Clone)] +/// This structure is the public API through which an external program +/// is allowed to configure the backend. +pub struct GpuConfig { + /// vhost-user Unix domain socket + socket_path: PathBuf, + renderer: GpuMode, +} + +impl GpuConfig { + /// Create a new instance of the GpuConfig struct, containing the + /// parameters to be fed into the gpu-backend server. + pub const fn new(socket_path: PathBuf, renderer: GpuMode) -> Self { + Self { + socket_path, + renderer, + } + } + + /// Return the path of the unix domain socket which is listening to + /// requests from the guest. + pub fn get_socket_path(&self) -> PathBuf { + PathBuf::from(&self.socket_path.clone()) + } + + pub fn get_renderer(&self) -> GpuMode { + self.renderer + } +} + +#[derive(Debug)] +pub enum GpuError { + /// Failed to create event fd. + EventFd(std::io::Error), + /// Failed to decode incoming command. + DecodeCommand(std::io::Error), + /// Error writing to the Queue. + WriteDescriptor(std::io::Error), + /// Error reading Guest Memory, + GuestMemory, +} + +#[cfg(test)] +mod tests { + use super::*; + use assert_matches::assert_matches; + use std::io; + + #[test] + fn test_gpu_config() { + // Test the creation of GpuConfig struct + let socket_path = PathBuf::from("/tmp/socket"); + let gpu_config = GpuConfig::new(socket_path.clone(), GpuMode::ModeVirglRenderer); + assert_eq!(gpu_config.get_socket_path(), socket_path); + } + + #[test] + fn test_gpu_error() { + // Test GPU error variants + let event_fd_error = GpuError::EventFd(io::Error::from(io::ErrorKind::NotFound)); + assert_matches!(event_fd_error, GpuError::EventFd(_)); + + let decode_error = GpuError::DecodeCommand(io::Error::from(io::ErrorKind::InvalidData)); + assert_matches!(decode_error, GpuError::DecodeCommand(_)); + + let write_error = + GpuError::WriteDescriptor(io::Error::from(io::ErrorKind::PermissionDenied)); + assert_matches!(write_error, GpuError::WriteDescriptor(_)); + + let guest_memory_error = GpuError::GuestMemory; + assert_matches!(guest_memory_error, GpuError::GuestMemory); + } +} diff --git a/staging/vhost-device-gpu/src/main.rs b/staging/vhost-device-gpu/src/main.rs new file mode 100644 index 00000000..35a570a3 --- /dev/null +++ b/staging/vhost-device-gpu/src/main.rs @@ -0,0 +1,132 @@ +// VIRTIO GPU Emulation via vhost-user +// +// Copyright 2024 Red Hat Inc +// +// SPDX-License-Identifier: Apache-2.0 or BSD-3-Clause + +use log::{error, info}; +use std::{path::PathBuf, process::exit}; + +use clap::{Parser, ValueEnum}; +use thiserror::Error as ThisError; +use vhost_device_gpu::{ + device::{self, VhostUserGpuBackend}, + GpuConfig, GpuMode, +}; +use vhost_user_backend::VhostUserDaemon; +use vm_memory::{GuestMemoryAtomic, GuestMemoryMmap}; + +type Result = std::result::Result; + +#[derive(Debug, ThisError)] +pub(crate) enum Error { + #[error("Could not create backend: {0}")] + CouldNotCreateBackend(device::Error), + #[error("Could not create daemon: {0}")] + CouldNotCreateDaemon(vhost_user_backend::Error), + #[error("Fatal error: {0}")] + ServeFailed(vhost_user_backend::Error), +} + +#[derive(Parser, Debug)] +#[clap(author, version, about, long_about = None)] +struct GpuArgs { + /// vhost-user Unix domain socket. + #[clap(short, long, value_name = "SOCKET")] + socket_path: PathBuf, + #[clap(short, long, value_enum)] + renderer: RenderMode, +} + +#[derive(Debug, Clone, ValueEnum)] +enum RenderMode { + Gfxstream, + Virglrenderer, +} + +impl From for GpuMode { + fn from(mode: RenderMode) -> Self { + match mode { + RenderMode::Gfxstream => GpuMode::ModeGfxstream, + RenderMode::Virglrenderer => GpuMode::ModeVirglRenderer, + } + } +} + +impl TryFrom for GpuConfig { + type Error = Error; + + fn try_from(args: GpuArgs) -> Result { + let socket_path = args.socket_path; + let renderer: GpuMode = args.renderer.into(); + + Ok(GpuConfig::new(socket_path, renderer)) + } +} + +fn start_backend(config: GpuConfig) -> Result<()> { + info!("Starting backend"); + let socket = config.get_socket_path(); + let backend = VhostUserGpuBackend::new(config).map_err(Error::CouldNotCreateBackend)?; + + let mut daemon = VhostUserDaemon::new( + "vhost-device-gpu-backend".to_string(), + backend.clone(), + GuestMemoryAtomic::new(GuestMemoryMmap::new()), + ) + .map_err(Error::CouldNotCreateDaemon)?; + + backend.set_epoll_handler(&daemon.get_epoll_handlers()); + + daemon.serve(socket).map_err(Error::ServeFailed)?; + Ok(()) +} + +fn main() { + env_logger::init(); + + if let Err(e) = start_backend(GpuConfig::try_from(GpuArgs::parse()).unwrap()) { + error!("{e}"); + exit(1); + } +} + +#[cfg(test)] +mod tests { + use assert_matches::assert_matches; + use std::path::Path; + + use super::*; + + impl GpuArgs { + pub(crate) fn from_args(path: &Path) -> GpuArgs { + GpuArgs { + socket_path: path.to_path_buf(), + renderer: RenderMode::Gfxstream, + } + } + } + + #[test] + fn test_parse_successful() { + let socket_name = Path::new("vgpu.sock"); + + let cmd_args = GpuArgs::from_args(socket_name); + let config = GpuConfig::try_from(cmd_args); + + assert!(config.is_ok()); + + let config = config.unwrap(); + assert_eq!(config.get_socket_path(), socket_name); + } + + #[test] + fn test_fail_listener() { + // This will fail the listeners and thread will panic. + let socket_name = Path::new("~/path/not/present/gpu"); + let cmd_args = GpuArgs::from_args(socket_name); + let config = GpuConfig::try_from(cmd_args).unwrap(); + + assert_matches!(start_backend(config).unwrap_err(), Error::ServeFailed(_)); + } +} diff --git a/staging/vhost-device-gpu/src/protocol.rs b/staging/vhost-device-gpu/src/protocol.rs new file mode 100644 index 00000000..38758263 --- /dev/null +++ b/staging/vhost-device-gpu/src/protocol.rs @@ -0,0 +1,1261 @@ +// Copyright 2024 Red Hat Inc +// Copyright 2019 The ChromiumOS Authors +// +// SPDX-License-Identifier: Apache-2.0 or BSD-3-Clause + +#![allow(non_camel_case_types)] + +use log::trace; +use std::{ + cmp::min, + convert::From, + ffi::CStr, + fmt::{self, Display}, + io::{self, Read, Write}, + marker::PhantomData, + mem::{size_of, size_of_val}, +}; + +use rutabaga_gfx::RutabagaError; +use thiserror::Error; +pub use virtio_bindings::virtio_gpu::{ + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE as VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_CTX_CREATE as VIRTIO_GPU_CMD_CTX_CREATE, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_CTX_DESTROY as VIRTIO_GPU_CMD_CTX_DESTROY, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE as VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_GET_CAPSET as VIRTIO_GPU_CMD_GET_CAPSET, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_GET_CAPSET_INFO as VIRTIO_GPU_CMD_GET_CAPSET_INFO, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_GET_DISPLAY_INFO as VIRTIO_GPU_CMD_GET_DISPLAY_INFO, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_GET_EDID as VIRTIO_GPU_CMD_GET_EDID, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_MOVE_CURSOR as VIRTIO_GPU_CMD_MOVE_CURSOR, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID as VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING as VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_RESOURCE_CREATE_2D as VIRTIO_GPU_CMD_RESOURCE_CREATE_2D, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_RESOURCE_CREATE_3D as VIRTIO_GPU_CMD_RESOURCE_CREATE_3D, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB as VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING as VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_RESOURCE_FLUSH as VIRTIO_GPU_CMD_RESOURCE_FLUSH, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB as VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB as VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_RESOURCE_UNREF as VIRTIO_GPU_CMD_RESOURCE_UNREF, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_SET_SCANOUT as VIRTIO_GPU_CMD_SET_SCANOUT, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_SET_SCANOUT_BLOB as VIRTIO_GPU_CMD_SET_SCANOUT_BLOB, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_SUBMIT_3D as VIRTIO_GPU_CMD_SUBMIT_3D, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D as VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D as VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D as VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D, + virtio_gpu_ctrl_type_VIRTIO_GPU_CMD_UPDATE_CURSOR as VIRTIO_GPU_CMD_UPDATE_CURSOR, + virtio_gpu_ctrl_type_VIRTIO_GPU_RESP_ERR_INVALID_CONTEXT_ID as VIRTIO_GPU_RESP_ERR_INVALID_CONTEXT_ID, + virtio_gpu_ctrl_type_VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER as VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER, + virtio_gpu_ctrl_type_VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID as VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID, + virtio_gpu_ctrl_type_VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID as VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID, + virtio_gpu_ctrl_type_VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY as VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY, + virtio_gpu_ctrl_type_VIRTIO_GPU_RESP_ERR_UNSPEC as VIRTIO_GPU_RESP_ERR_UNSPEC, + virtio_gpu_ctrl_type_VIRTIO_GPU_RESP_OK_CAPSET as VIRTIO_GPU_RESP_OK_CAPSET, + virtio_gpu_ctrl_type_VIRTIO_GPU_RESP_OK_CAPSET_INFO as VIRTIO_GPU_RESP_OK_CAPSET_INFO, + virtio_gpu_ctrl_type_VIRTIO_GPU_RESP_OK_DISPLAY_INFO as VIRTIO_GPU_RESP_OK_DISPLAY_INFO, + virtio_gpu_ctrl_type_VIRTIO_GPU_RESP_OK_EDID as VIRTIO_GPU_RESP_OK_EDID, + virtio_gpu_ctrl_type_VIRTIO_GPU_RESP_OK_MAP_INFO as VIRTIO_GPU_RESP_OK_MAP_INFO, + virtio_gpu_ctrl_type_VIRTIO_GPU_RESP_OK_NODATA as VIRTIO_GPU_RESP_OK_NODATA, + virtio_gpu_ctrl_type_VIRTIO_GPU_RESP_OK_RESOURCE_UUID as VIRTIO_GPU_RESP_OK_RESOURCE_UUID, +}; +use virtio_queue::{Reader, Writer}; +use vm_memory::{ByteValued, GuestAddress, Le32}; +use zerocopy::{AsBytes, FromBytes}; + +use crate::device::{self, Error}; + +pub const QUEUE_SIZE: usize = 1024; +pub const NUM_QUEUES: usize = 2; + +pub const CONTROL_QUEUE: u16 = 0; +pub const CURSOR_QUEUE: u16 = 1; +pub const POLL_EVENT: u16 = NUM_QUEUES as u16 + 1; + +pub const VIRTIO_GPU_MAX_SCANOUTS: usize = 16; + +/* CHROMIUM(b/277982577): success responses */ +pub const VIRTIO_GPU_RESP_OK_RESOURCE_PLANE_INFO: u32 = 0x11FF; + +/* Create a OS-specific handle from guest memory (not upstreamed). */ +pub const VIRTIO_GPU_BLOB_FLAG_CREATE_GUEST_HANDLE: u32 = 0x0008; + +pub const VIRTIO_GPU_FLAG_FENCE: u32 = 1 << 0; +pub const VIRTIO_GPU_FLAG_INFO_RING_IDX: u32 = 1 << 1; + +/// Virtio Gpu Configuration +#[derive(Copy, Clone, Debug, Default, PartialEq, Eq)] +#[repr(C)] +pub struct VirtioGpuConfig { + /// Signals pending events to the driver + pub events_read: Le32, + /// Clears pending events in the device + pub events_clear: Le32, + /// Maximum number of scanouts supported by the device + pub num_scanouts: Le32, + /// Maximum number of capability sets supported by the device + pub num_capsets: Le32, +} + +// SAFETY: The layout of the structure is fixed and can be initialized by +// reading its content from byte array. +unsafe impl ByteValued for VirtioGpuConfig {} + +#[derive(Debug, PartialEq, Eq)] +pub struct InvalidCommandType(u32); + +impl std::fmt::Display for InvalidCommandType { + fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(fmt, "Invalid command type {}", self.0) + } +} + +impl From for crate::device::Error { + fn from(val: InvalidCommandType) -> Self { + Self::InvalidCommandType(val.0) + } +} + +impl std::error::Error for InvalidCommandType {} + +#[derive(Copy, Clone, Debug, Default, AsBytes, FromBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_ctrl_hdr { + pub type_: u32, + pub flags: u32, + pub fence_id: u64, + pub ctx_id: u32, + pub ring_idx: u8, + pub padding: [u8; 3], +} +unsafe impl ByteValued for virtio_gpu_ctrl_hdr {} + +/* data passed in the cursor vq */ + +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_cursor_pos { + pub scanout_id: u32, + pub x: u32, + pub y: u32, + pub padding: u32, +} +unsafe impl ByteValued for virtio_gpu_cursor_pos {} + +/* VIRTIO_GPU_CMD_UPDATE_CURSOR, VIRTIO_GPU_CMD_MOVE_CURSOR */ +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_update_cursor { + pub pos: virtio_gpu_cursor_pos, /* update & move */ + pub resource_id: u32, /* update only */ + pub hot_x: u32, /* update only */ + pub hot_y: u32, /* update only */ + pub padding: u32, +} +unsafe impl ByteValued for virtio_gpu_update_cursor {} + +/* data passed in the control vq, 2d related */ + +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_rect { + pub x: u32, + pub y: u32, + pub width: u32, + pub height: u32, +} +unsafe impl ByteValued for virtio_gpu_rect {} + +/* VIRTIO_GPU_CMD_GET_EDID */ +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_get_edid { + pub scanout: u32, + pub padding: u32, +} +unsafe impl ByteValued for virtio_gpu_get_edid {} + +/* VIRTIO_GPU_CMD_RESOURCE_UNREF */ +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_resource_unref { + pub resource_id: u32, + pub padding: u32, +} +unsafe impl ByteValued for virtio_gpu_resource_unref {} + +/* VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: create a 2d resource with a format */ +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_resource_create_2d { + pub resource_id: u32, + pub format: u32, + pub width: u32, + pub height: u32, +} +unsafe impl ByteValued for virtio_gpu_resource_create_2d {} + +/* VIRTIO_GPU_CMD_SET_SCANOUT */ +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_set_scanout { + pub r: virtio_gpu_rect, + pub scanout_id: u32, + pub resource_id: u32, +} +unsafe impl ByteValued for virtio_gpu_set_scanout {} + +/* VIRTIO_GPU_CMD_RESOURCE_FLUSH */ +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_resource_flush { + pub r: virtio_gpu_rect, + pub resource_id: u32, + pub padding: u32, +} +unsafe impl ByteValued for virtio_gpu_resource_flush {} + +/* VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: simple transfer to_host */ +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_transfer_to_host_2d { + pub r: virtio_gpu_rect, + pub offset: u64, + pub resource_id: u32, + pub padding: u32, +} +unsafe impl ByteValued for virtio_gpu_transfer_to_host_2d {} + +#[derive(Copy, Clone, Debug, Default, AsBytes, FromBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_mem_entry { + pub addr: u64, + pub length: u32, + pub padding: u32, +} +unsafe impl ByteValued for virtio_gpu_mem_entry {} + +/* VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING */ +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_resource_attach_backing { + pub resource_id: u32, + pub nr_entries: u32, +} +unsafe impl ByteValued for virtio_gpu_resource_attach_backing {} + +/* VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING */ +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_resource_detach_backing { + pub resource_id: u32, + pub padding: u32, +} +unsafe impl ByteValued for virtio_gpu_resource_detach_backing {} + +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_display_one { + pub r: virtio_gpu_rect, + pub enabled: u32, + pub flags: u32, +} +unsafe impl ByteValued for virtio_gpu_display_one {} + +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes)] +#[repr(C)] +pub struct virtio_gpu_resp_display_info { + pub hdr: virtio_gpu_ctrl_hdr, + pub pmodes: [virtio_gpu_display_one; VIRTIO_GPU_MAX_SCANOUTS], +} +unsafe impl ByteValued for virtio_gpu_resp_display_info {} + +const EDID_BLOB_MAX_SIZE: usize = 1024; + +#[derive(Debug, Copy, Clone)] +#[repr(C)] +pub struct virtio_gpu_resp_edid { + pub hdr: virtio_gpu_ctrl_hdr, + pub size: u32, + pub padding: u32, + pub edid: [u8; EDID_BLOB_MAX_SIZE], +} + +unsafe impl ByteValued for virtio_gpu_resp_edid {} + +/* data passed in the control vq, 3d related */ + +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_box { + pub x: u32, + pub y: u32, + pub z: u32, + pub w: u32, + pub h: u32, + pub d: u32, +} +unsafe impl ByteValued for virtio_gpu_box {} + +/* VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D, VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D */ +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_transfer_host_3d { + pub box_: virtio_gpu_box, + pub offset: u64, + pub resource_id: u32, + pub level: u32, + pub stride: u32, + pub layer_stride: u32, +} +unsafe impl ByteValued for virtio_gpu_transfer_host_3d {} + +/* VIRTIO_GPU_CMD_RESOURCE_CREATE_3D */ +pub const VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP: u32 = 1 << 0; +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_resource_create_3d { + pub resource_id: u32, + pub target: u32, + pub format: u32, + pub bind: u32, + pub width: u32, + pub height: u32, + pub depth: u32, + pub array_size: u32, + pub last_level: u32, + pub nr_samples: u32, + pub flags: u32, + pub padding: u32, +} +unsafe impl ByteValued for virtio_gpu_resource_create_3d {} + +/* VIRTIO_GPU_CMD_CTX_CREATE */ +pub const VIRTIO_GPU_CONTEXT_INIT_CAPSET_ID_MASK: u32 = 1 << 0; +#[derive(Copy, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_ctx_create { + pub nlen: u32, + pub context_init: u32, + pub debug_name: [u8; 64], +} +unsafe impl ByteValued for virtio_gpu_ctx_create {} + +impl Default for virtio_gpu_ctx_create { + fn default() -> Self { + unsafe { ::std::mem::zeroed() } + } +} + +impl Clone for virtio_gpu_ctx_create { + fn clone(&self) -> virtio_gpu_ctx_create { + *self + } +} + +impl fmt::Debug for virtio_gpu_ctx_create { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let debug_name = CStr::from_bytes_with_nul(&self.debug_name[..min(64, self.nlen as usize)]) + .map_or_else( + |err| format!("Err({})", err), + |c_str| c_str.to_string_lossy().into_owned(), + ); + f.debug_struct(stringify!("virtio_gpu_ctx_create")) + .field("debug_name", &debug_name) + .field("context_init", &self.context_init) + .finish() + } +} + +/* VIRTIO_GPU_CMD_CTX_DESTROY */ +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_ctx_destroy {} +unsafe impl ByteValued for virtio_gpu_ctx_destroy {} + +/* VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE, VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE */ +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_ctx_resource { + pub resource_id: u32, + pub padding: u32, +} +unsafe impl ByteValued for virtio_gpu_ctx_resource {} + +/* VIRTIO_GPU_CMD_SUBMIT_3D */ +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_cmd_submit { + pub size: u32, + + // The in-fence IDs are prepended to the cmd_buf and memory layout + // of the VIRTIO_GPU_CMD_SUBMIT_3D buffer looks like this: + // _________________ + // | CMD_SUBMIT_3D | + // ----------------- + // | header | + // | in-fence IDs | + // | cmd_buf | + // ----------------- + // + // This makes in-fence IDs naturally aligned to the sizeof(u64) inside + // of the virtio buffer. + pub num_in_fences: u32, +} +unsafe impl ByteValued for virtio_gpu_cmd_submit {} + +pub const VIRTIO_GPU_CAPSET_VIRGL: u32 = 1; +pub const VIRTIO_GPU_CAPSET_VIRGL2: u32 = 2; +pub const VIRTIO_GPU_CAPSET_GFXSTREAM: u32 = 3; +pub const VIRTIO_GPU_CAPSET_VENUS: u32 = 4; +pub const VIRTIO_GPU_CAPSET_CROSS_DOMAIN: u32 = 5; + +/* VIRTIO_GPU_CMD_GET_CAPSET_INFO */ +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_get_capset_info { + pub capset_index: u32, + pub padding: u32, +} +unsafe impl ByteValued for virtio_gpu_get_capset_info {} + +/* VIRTIO_GPU_RESP_OK_CAPSET_INFO */ +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes)] +#[repr(C)] +pub struct virtio_gpu_resp_capset_info { + pub hdr: virtio_gpu_ctrl_hdr, + pub capset_id: u32, + pub capset_max_version: u32, + pub capset_max_size: u32, + pub padding: u32, +} +unsafe impl ByteValued for virtio_gpu_resp_capset_info {} + +/* VIRTIO_GPU_CMD_GET_CAPSET */ +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_get_capset { + pub capset_id: u32, + pub capset_version: u32, +} +unsafe impl ByteValued for virtio_gpu_get_capset {} + +/* VIRTIO_GPU_RESP_OK_CAPSET */ +#[derive(Copy, Clone, Debug, Default)] +#[repr(C)] +pub struct virtio_gpu_resp_capset { + pub hdr: virtio_gpu_ctrl_hdr, + pub capset_data: PhantomData<[u8]>, +} +unsafe impl ByteValued for virtio_gpu_resp_capset {} + +/* VIRTIO_GPU_RESP_OK_RESOURCE_PLANE_INFO */ +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes)] +#[repr(C)] +pub struct virtio_gpu_resp_resource_plane_info { + pub hdr: virtio_gpu_ctrl_hdr, + pub count: u32, + pub padding: u32, + pub format_modifier: u64, + pub strides: [u32; 4], + pub offsets: [u32; 4], +} +unsafe impl ByteValued for virtio_gpu_resp_resource_plane_info {} + +pub const PLANE_INFO_MAX_COUNT: usize = 4; + +pub const VIRTIO_GPU_EVENT_DISPLAY: u32 = 1 << 0; + +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_resource_create_blob { + pub resource_id: u32, + pub blob_mem: u32, + pub blob_flags: u32, + pub nr_entries: u32, + pub blob_id: u64, + pub size: u64, +} +unsafe impl ByteValued for virtio_gpu_resource_create_blob {} + +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_resource_map_blob { + pub resource_id: u32, + pub padding: u32, + pub offset: u64, +} +unsafe impl ByteValued for virtio_gpu_resource_map_blob {} + +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_resource_unmap_blob { + pub resource_id: u32, + pub padding: u32, +} +unsafe impl ByteValued for virtio_gpu_resource_unmap_blob {} + +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_resp_map_info { + pub hdr: virtio_gpu_ctrl_hdr, + pub map_info: u32, + pub padding: u32, +} +unsafe impl ByteValued for virtio_gpu_resp_map_info {} + +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_resource_assign_uuid { + pub resource_id: u32, + pub padding: u32, +} +unsafe impl ByteValued for virtio_gpu_resource_assign_uuid {} + +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes)] +#[repr(C)] +pub struct virtio_gpu_resp_resource_uuid { + pub hdr: virtio_gpu_ctrl_hdr, + pub uuid: [u8; 16], +} +unsafe impl ByteValued for virtio_gpu_resp_resource_uuid {} + +/* VIRTIO_GPU_CMD_SET_SCANOUT_BLOB */ +#[derive(Copy, Clone, Debug, Default, FromBytes, AsBytes, PartialEq, Eq)] +#[repr(C)] +pub struct virtio_gpu_set_scanout_blob { + pub r: virtio_gpu_rect, + pub scanout_id: u32, + pub resource_id: u32, + pub width: u32, + pub height: u32, + pub format: u32, + pub padding: u32, + pub strides: [u32; 4], + pub offsets: [u32; 4], +} +unsafe impl ByteValued for virtio_gpu_set_scanout_blob {} + +/* simple formats for fbcon/X use */ +pub const VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM: u32 = 1; +pub const VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM: u32 = 2; +pub const VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM: u32 = 3; +pub const VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM: u32 = 4; +pub const VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM: u32 = 67; +pub const VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM: u32 = 68; +pub const VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM: u32 = 121; +pub const VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM: u32 = 134; + +/// A virtio gpu command and associated metadata specific to each command. +#[derive(Clone, PartialEq, Eq)] +pub enum GpuCommand { + GetDisplayInfo, + GetEdid(virtio_gpu_get_edid), + ResourceCreate2d(virtio_gpu_resource_create_2d), + ResourceUnref(virtio_gpu_resource_unref), + SetScanout(virtio_gpu_set_scanout), + SetScanoutBlob(virtio_gpu_set_scanout_blob), + ResourceFlush(virtio_gpu_resource_flush), + TransferToHost2d(virtio_gpu_transfer_to_host_2d), + ResourceAttachBacking( + virtio_gpu_resource_attach_backing, + Vec<(GuestAddress, usize)>, + ), + ResourceDetachBacking(virtio_gpu_resource_detach_backing), + GetCapsetInfo(virtio_gpu_get_capset_info), + GetCapset(virtio_gpu_get_capset), + CtxCreate(virtio_gpu_ctx_create), + CtxDestroy(virtio_gpu_ctx_destroy), + CtxAttachResource(virtio_gpu_ctx_resource), + CtxDetachResource(virtio_gpu_ctx_resource), + ResourceCreate3d(virtio_gpu_resource_create_3d), + TransferToHost3d(virtio_gpu_transfer_host_3d), + TransferFromHost3d(virtio_gpu_transfer_host_3d), + CmdSubmit3d { + cmd_data: Vec, + fence_ids: Vec, + }, + ResourceCreateBlob(virtio_gpu_resource_create_blob), + ResourceMapBlob(virtio_gpu_resource_map_blob), + ResourceUnmapBlob(virtio_gpu_resource_unmap_blob), + UpdateCursor(virtio_gpu_update_cursor), + MoveCursor(virtio_gpu_update_cursor), + ResourceAssignUuid(virtio_gpu_resource_assign_uuid), +} + +/// An error indicating something went wrong decoding a `GpuCommand`. These correspond to +/// `VIRTIO_GPU_CMD_*`. +#[derive(Error, Debug)] +pub enum GpuCommandDecodeError { + /// The type of the command was invalid. + #[error("invalid command type ({0})")] + InvalidType(u32), + /// An I/O error occurred. + #[error("an I/O error occurred: {0}")] + IO(io::Error), + #[error("Descriptor read failed")] + DescriptorReadFailed, +} + +impl From for GpuCommandDecodeError { + fn from(e: io::Error) -> GpuCommandDecodeError { + GpuCommandDecodeError::IO(e) + } +} + +impl From for GpuCommandDecodeError { + fn from(_: device::Error) -> Self { + GpuCommandDecodeError::DescriptorReadFailed + } +} + +impl From for GpuResponseEncodeError { + fn from(_: device::Error) -> Self { + GpuResponseEncodeError::DescriptorWriteFailed + } +} + +impl fmt::Debug for GpuCommand { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + use self::GpuCommand::*; + match self { + GetDisplayInfo => f.debug_struct("GetDisplayInfo").finish(), + GetEdid(_info) => f.debug_struct("GetEdid").finish(), + ResourceCreate2d(_info) => f.debug_struct("ResourceCreate2d").finish(), + ResourceUnref(_info) => f.debug_struct("ResourceUnref").finish(), + SetScanout(_info) => f.debug_struct("SetScanout").finish(), + SetScanoutBlob(_info) => f.debug_struct("SetScanoutBlob").finish(), + ResourceFlush(_info) => f.debug_struct("ResourceFlush").finish(), + TransferToHost2d(_info) => f.debug_struct("TransferToHost2d").finish(), + ResourceAttachBacking(_info, _vecs) => f.debug_struct("ResourceAttachBacking").finish(), + ResourceDetachBacking(_info) => f.debug_struct("ResourceDetachBacking").finish(), + GetCapsetInfo(_info) => f.debug_struct("GetCapsetInfo").finish(), + GetCapset(_info) => f.debug_struct("GetCapset").finish(), + CtxCreate(_info) => f.debug_struct("CtxCreate").finish(), + CtxDestroy(_info) => f.debug_struct("CtxDestroy").finish(), + CtxAttachResource(_info) => f.debug_struct("CtxAttachResource").finish(), + CtxDetachResource(_info) => f.debug_struct("CtxDetachResource").finish(), + ResourceCreate3d(_info) => f.debug_struct("ResourceCreate3d").finish(), + TransferToHost3d(_info) => f.debug_struct("TransferToHost3d").finish(), + TransferFromHost3d(_info) => f.debug_struct("TransferFromHost3d").finish(), + CmdSubmit3d { .. } => f.debug_struct("CmdSubmit3d").finish(), + ResourceCreateBlob(_info) => f.debug_struct("ResourceCreateBlob").finish(), + ResourceMapBlob(_info) => f.debug_struct("ResourceMapBlob").finish(), + ResourceUnmapBlob(_info) => f.debug_struct("ResourceUnmapBlob").finish(), + UpdateCursor(_info) => f.debug_struct("UpdateCursor").finish(), + MoveCursor(_info) => f.debug_struct("MoveCursor").finish(), + ResourceAssignUuid(_info) => f.debug_struct("ResourceAssignUuid").finish(), + } + } +} + +impl GpuCommand { + /// Decodes a command from the given chunk of memory. + pub fn decode( + reader: &mut Reader, + ) -> Result<(virtio_gpu_ctrl_hdr, GpuCommand), GpuCommandDecodeError> { + use self::GpuCommand::*; + let hdr = reader + .read_obj::() + .map_err(|_| Error::DescriptorReadFailed)?; + trace!("Decoding GpuCommand 0x{:0x}", hdr.type_); + let cmd = match hdr.type_ { + VIRTIO_GPU_CMD_GET_DISPLAY_INFO => GetDisplayInfo, + VIRTIO_GPU_CMD_GET_EDID => { + GetEdid(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_RESOURCE_CREATE_2D => { + ResourceCreate2d(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_RESOURCE_UNREF => { + ResourceUnref(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_SET_SCANOUT => { + SetScanout(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_SET_SCANOUT_BLOB => { + SetScanoutBlob(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_RESOURCE_FLUSH => { + ResourceFlush(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D => { + TransferToHost2d(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING => { + let info: virtio_gpu_resource_attach_backing = + reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?; + let mut entries = Vec::with_capacity(info.nr_entries as usize); + for _ in 0..info.nr_entries { + let entry: virtio_gpu_mem_entry = + reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?; + entries.push((GuestAddress(entry.addr), entry.length as usize)) + } + ResourceAttachBacking(info, entries) + } + VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING => { + ResourceDetachBacking(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_GET_CAPSET_INFO => { + GetCapsetInfo(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_GET_CAPSET => { + GetCapset(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_CTX_CREATE => { + CtxCreate(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_CTX_DESTROY => { + CtxDestroy(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE => { + CtxAttachResource(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE => { + CtxDetachResource(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_RESOURCE_CREATE_3D => { + ResourceCreate3d(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D => { + TransferToHost3d(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D => { + TransferFromHost3d(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_SUBMIT_3D => { + let info: virtio_gpu_cmd_submit = + reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?; + + let mut cmd_data = vec![0; info.size as usize]; + let mut fence_ids: Vec = Vec::with_capacity(info.num_in_fences as usize); + + for _ in 0..info.num_in_fences { + let fence_id = reader + .read_obj::() + .map_err(|_| Error::DescriptorReadFailed)?; + fence_ids.push(fence_id); + } + + reader + .read_exact(&mut cmd_data[..]) + .map_err(|_| Error::DescriptorReadFailed)?; + + CmdSubmit3d { + cmd_data, + fence_ids, + } + } + VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB => { + ResourceCreateBlob(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB => { + ResourceMapBlob(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB => { + ResourceUnmapBlob(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_UPDATE_CURSOR => { + UpdateCursor(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_MOVE_CURSOR => { + MoveCursor(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID => { + ResourceAssignUuid(reader.read_obj().map_err(|_| Error::DescriptorReadFailed)?) + } + _ => return Err(GpuCommandDecodeError::InvalidType(hdr.type_)), + }; + + Ok((hdr, cmd)) + } +} + +#[derive(Debug, PartialEq, Eq)] +pub struct GpuResponsePlaneInfo { + pub stride: u32, + pub offset: u32, +} + +/// A response to a `GpuCommand`. These correspond to `VIRTIO_GPU_RESP_*`. +#[derive(Debug)] +pub enum GpuResponse { + OkNoData, + OkDisplayInfo(Vec<(u32, u32, bool)>), + OkEdid { + /// The EDID display data blob (as specified by VESA) + blob: Box<[u8]>, + }, + OkCapsetInfo { + capset_id: u32, + version: u32, + size: u32, + }, + OkCapset(Vec), + OkResourcePlaneInfo { + format_modifier: u64, + plane_info: Vec, + }, + OkResourceUuid { + uuid: [u8; 16], + }, + OkMapInfo { + map_info: u32, + }, + ErrUnspec, + ErrRutabaga(RutabagaError), + ErrScanout { + num_scanouts: u32, + }, + ErrOutOfMemory, + ErrInvalidScanoutId, + ErrInvalidResourceId, + ErrInvalidContextId, + ErrInvalidParameter, +} + +impl From for GpuResponse { + fn from(e: RutabagaError) -> GpuResponse { + GpuResponse::ErrRutabaga(e) + } +} + +impl Display for GpuResponse { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + use self::GpuResponse::*; + match self { + ErrRutabaga(e) => write!(f, "renderer error: {}", e), + ErrScanout { num_scanouts } => write!(f, "non-zero scanout: {}", num_scanouts), + _ => Ok(()), + } + } +} + +/// An error indicating something went wrong decoding a `GpuCommand`. +#[derive(Error, Debug)] +pub enum GpuResponseEncodeError { + /// An I/O error occurred. + #[error("an I/O error occurred: {0}")] + IO(io::Error), + /// More displays than are valid were in a `OkDisplayInfo`. + #[error("{0} is more displays than are valid")] + TooManyDisplays(usize), + /// More planes than are valid were in a `OkResourcePlaneInfo`. + #[error("{0} is more planes than are valid")] + TooManyPlanes(usize), + #[error("Descriptor write failed")] + DescriptorWriteFailed, +} + +impl From for GpuResponseEncodeError { + fn from(e: io::Error) -> GpuResponseEncodeError { + GpuResponseEncodeError::IO(e) + } +} + +pub type VirtioGpuResult = std::result::Result; + +impl GpuResponse { + /// Encodes a this `GpuResponse` into `resp` and the given set of metadata. + pub fn encode( + &self, + flags: u32, + fence_id: u64, + ctx_id: u32, + ring_idx: u8, + writer: &mut Writer, + ) -> Result { + let hdr = virtio_gpu_ctrl_hdr { + type_: self.get_type(), + flags, + fence_id, + ctx_id, + ring_idx, + padding: Default::default(), + }; + let len = match *self { + GpuResponse::OkDisplayInfo(ref info) => { + if info.len() > VIRTIO_GPU_MAX_SCANOUTS { + return Err(GpuResponseEncodeError::TooManyDisplays(info.len())); + } + let mut disp_info = virtio_gpu_resp_display_info { + hdr, + pmodes: Default::default(), + }; + for (disp_mode, &(width, height, enabled)) in disp_info.pmodes.iter_mut().zip(info) + { + disp_mode.r.width = width; + disp_mode.r.height = height; + disp_mode.enabled = enabled as u32; + } + writer + .write_obj(disp_info) + .map_err(|_| Error::DescriptorWriteFailed)?; + size_of_val(&disp_info) + } + GpuResponse::OkEdid { ref blob } => { + let mut edid_info = virtio_gpu_resp_edid { + hdr, + size: blob.len() as u32, + edid: [0; EDID_BLOB_MAX_SIZE], + padding: Default::default(), + }; + edid_info.edid.copy_from_slice(blob); + writer + .write_obj(edid_info) + .map_err(|_| Error::DescriptorWriteFailed)?; + size_of_val(&edid_info) + } + GpuResponse::OkCapsetInfo { + capset_id, + version, + size, + } => { + writer + .write_obj(virtio_gpu_resp_capset_info { + hdr, + capset_id, + capset_max_version: version, + capset_max_size: size, + padding: 0u32, + }) + .map_err(|_| Error::DescriptorWriteFailed)?; + size_of::() + } + GpuResponse::OkCapset(ref data) => { + writer + .write_obj(hdr) + .map_err(|_| Error::DescriptorWriteFailed)?; + writer + .write(data) + .map_err(|_| Error::DescriptorWriteFailed)?; + size_of_val(&hdr) + data.len() + } + GpuResponse::OkResourcePlaneInfo { + format_modifier, + ref plane_info, + } => { + if plane_info.len() > PLANE_INFO_MAX_COUNT { + return Err(GpuResponseEncodeError::TooManyPlanes(plane_info.len())); + } + let mut strides = [u32::default(); PLANE_INFO_MAX_COUNT]; + let mut offsets = [u32::default(); PLANE_INFO_MAX_COUNT]; + for (plane_index, plane) in plane_info.iter().enumerate() { + strides[plane_index] = plane.stride; + offsets[plane_index] = plane.offset; + } + let plane_info = virtio_gpu_resp_resource_plane_info { + hdr, + count: plane_info.len() as u32, + padding: 0u32, + format_modifier, + strides, + offsets, + }; + if writer.available_bytes() >= size_of_val(&plane_info) { + size_of_val(&plane_info) + } else { + // In case there is too little room in the response slice to store the + // entire virtio_gpu_resp_resource_plane_info, convert response to a regular + // VIRTIO_GPU_RESP_OK_NODATA and attempt to return that. + writer + .write_obj(virtio_gpu_ctrl_hdr { + type_: VIRTIO_GPU_RESP_OK_NODATA, + ..hdr + }) + .map_err(|_| Error::DescriptorWriteFailed)?; + size_of_val(&hdr) + } + } + GpuResponse::OkResourceUuid { uuid } => { + let resp_info = virtio_gpu_resp_resource_uuid { hdr, uuid }; + + writer + .write_obj(resp_info) + .map_err(|_| Error::DescriptorWriteFailed)?; + size_of_val(&resp_info) + } + GpuResponse::OkMapInfo { map_info } => { + let resp_info = virtio_gpu_resp_map_info { + hdr, + map_info, + padding: Default::default(), + }; + + writer + .write_obj(resp_info) + .map_err(|_| Error::DescriptorWriteFailed)?; + size_of_val(&resp_info) + } + _ => { + writer + .write_obj(hdr) + .map_err(|_| Error::DescriptorWriteFailed)?; + size_of_val(&hdr) + } + }; + Ok(len as u32) + } + + /// Gets the `VIRTIO_GPU_*` enum value that corresponds to this variant. + pub fn get_type(&self) -> u32 { + match self { + GpuResponse::OkNoData => VIRTIO_GPU_RESP_OK_NODATA, + GpuResponse::OkDisplayInfo(_) => VIRTIO_GPU_RESP_OK_DISPLAY_INFO, + GpuResponse::OkEdid { .. } => VIRTIO_GPU_RESP_OK_EDID, + GpuResponse::OkCapsetInfo { .. } => VIRTIO_GPU_RESP_OK_CAPSET_INFO, + GpuResponse::OkCapset(_) => VIRTIO_GPU_RESP_OK_CAPSET, + GpuResponse::OkResourcePlaneInfo { .. } => VIRTIO_GPU_RESP_OK_RESOURCE_PLANE_INFO, + GpuResponse::OkResourceUuid { .. } => VIRTIO_GPU_RESP_OK_RESOURCE_UUID, + GpuResponse::OkMapInfo { .. } => VIRTIO_GPU_RESP_OK_MAP_INFO, + GpuResponse::ErrUnspec => VIRTIO_GPU_RESP_ERR_UNSPEC, + GpuResponse::ErrRutabaga(_) => VIRTIO_GPU_RESP_ERR_UNSPEC, + GpuResponse::ErrScanout { num_scanouts: _ } => VIRTIO_GPU_RESP_ERR_UNSPEC, + GpuResponse::ErrOutOfMemory => VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY, + GpuResponse::ErrInvalidScanoutId => VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID, + GpuResponse::ErrInvalidResourceId => VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID, + GpuResponse::ErrInvalidContextId => VIRTIO_GPU_RESP_ERR_INVALID_CONTEXT_ID, + GpuResponse::ErrInvalidParameter => VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER, + } + } +} + +#[cfg(test)] +mod tests { + + use super::*; + + #[test] + fn test_virtio_gpu_config() { + // Test VirtioGpuConfig size + assert_eq!(std::mem::size_of::(), 16); + } + + #[test] + fn test_invalid_command_type_display() { + let error = InvalidCommandType(42); + assert_eq!(format!("{}", error), "Invalid command type 42"); + } + + #[test] + fn test_gpu_response_display() { + let err_rutabaga = GpuResponse::ErrRutabaga(RutabagaError::InvalidContextId); + assert_eq!( + format!("{}", err_rutabaga), + "renderer error: invalid context id" + ); + + let err_scanout = GpuResponse::ErrScanout { num_scanouts: 3 }; + assert_eq!(format!("{}", err_scanout), "non-zero scanout: 3"); + } + + #[test] + fn test_invalid_type_error() { + let error = GpuCommandDecodeError::InvalidType(42); + assert_eq!(format!("{}", error), "invalid command type (42)"); + } + + // Test io_error conversion to gpu command decode error + #[test] + fn test_io_error() { + let io_error = io::Error::new(io::ErrorKind::Other, "Test IO error"); + let gpu_error: GpuCommandDecodeError = io_error.into(); + match gpu_error { + GpuCommandDecodeError::IO(_) => (), + _ => panic!("Expected IO error"), + } + } + + //Test vhu_error conversion to gpu command decode/encode error + #[test] + fn test_device_error() { + let device_error = device::Error::DescriptorReadFailed; + let gpu_error: GpuCommandDecodeError = device_error.into(); + match gpu_error { + GpuCommandDecodeError::DescriptorReadFailed => (), + _ => panic!("Expected DescriptorReadFailed error"), + } + let device_error = device::Error::DescriptorWriteFailed; + let gpu_error: GpuResponseEncodeError = device_error.into(); + match gpu_error { + GpuResponseEncodeError::DescriptorWriteFailed => (), + _ => panic!("Expected DescriptorWriteFailed error"), + } + } + + #[test] + fn test_debug() { + let get_display_info = GpuCommand::GetDisplayInfo; + let get_edid = GpuCommand::GetEdid(virtio_gpu_get_edid::default()); + let resource_create_2d = + GpuCommand::ResourceCreate2d(virtio_gpu_resource_create_2d::default()); + let resource_unref = GpuCommand::ResourceUnref(virtio_gpu_resource_unref::default()); + let set_scanout = GpuCommand::SetScanout(virtio_gpu_set_scanout::default()); + let set_scanout_blob = GpuCommand::SetScanoutBlob(virtio_gpu_set_scanout_blob::default()); + let resource_flush = GpuCommand::ResourceFlush(virtio_gpu_resource_flush::default()); + let transfer_to_host_2d = + GpuCommand::TransferToHost2d(virtio_gpu_transfer_to_host_2d::default()); + //let resource_attach_backing = GpuCommand::ResourceAttachBacking(virtio_gpu_resource_attach_backing::default(), vec![1]); + let resource_detach_backing = + GpuCommand::ResourceDetachBacking(virtio_gpu_resource_detach_backing::default()); + let get_capset_info = GpuCommand::GetCapsetInfo(virtio_gpu_get_capset_info::default()); + let get_capset = GpuCommand::GetCapset(virtio_gpu_get_capset::default()); + let ctx_create = GpuCommand::CtxCreate(virtio_gpu_ctx_create::default()); + let ctx_destroy = GpuCommand::CtxDestroy(virtio_gpu_ctx_destroy::default()); + let ctx_attach_resource = GpuCommand::CtxAttachResource(virtio_gpu_ctx_resource::default()); + let ctx_detach_resource = GpuCommand::CtxDetachResource(virtio_gpu_ctx_resource::default()); + let resource_create_3d = + GpuCommand::ResourceCreate3d(virtio_gpu_resource_create_3d::default()); + let transfer_to_host_3d = + GpuCommand::TransferToHost3d(virtio_gpu_transfer_host_3d::default()); + let transfer_from_host_3d = + GpuCommand::TransferFromHost3d(virtio_gpu_transfer_host_3d::default()); + let cmd_submit_3d = GpuCommand::CmdSubmit3d { + cmd_data: Vec::new(), + fence_ids: Vec::new(), + }; + let resource_create_blob = + GpuCommand::ResourceCreateBlob(virtio_gpu_resource_create_blob::default()); + let resource_map_blob = + GpuCommand::ResourceMapBlob(virtio_gpu_resource_map_blob::default()); + let resource_unmap_blob = + GpuCommand::ResourceUnmapBlob(virtio_gpu_resource_unmap_blob::default()); + let update_cursor = GpuCommand::UpdateCursor(virtio_gpu_update_cursor::default()); + let move_cursor = GpuCommand::MoveCursor(virtio_gpu_update_cursor::default()); + let resource_assign_uuid = + GpuCommand::ResourceAssignUuid(virtio_gpu_resource_assign_uuid::default()); + + let expected_debug_output_display = "GetDisplayInfo"; + let expected_debug_output_edid = "GetEdid"; + let expected_debug_output_create2d = "ResourceCreate2d"; + let expected_debug_output_unref = "ResourceUnref"; + let expected_debug_output_scanout = "SetScanout"; + let expected_debug_output_scanout_blob = "SetScanoutBlob"; + let expected_debug_output_flush = "ResourceFlush"; + let expected_debug_output_transfer_to_host_2d = "TransferToHost2d"; + let expected_debug_output_detach_backing = "ResourceDetachBacking"; + let expected_debug_output_get_capset_info = "GetCapsetInfo"; + let expected_debug_output_get_capset = "GetCapset"; + let expected_debug_output_ctx_create = "CtxCreate"; + let expected_debug_output_ctx_destroy = "CtxDestroy"; + let expected_debug_output_ctx_attach_resource = "CtxAttachResource"; + let expected_debug_output_ctx_detach_resource = "CtxDetachResource"; + let expected_debug_output_resource_create_3d = "ResourceCreate3d"; + let expected_debug_output_transfer_to_host_3d = "TransferToHost3d"; + let expected_debug_output_transfer_from_host_3d = "TransferFromHost3d"; + let expected_debug_output_cmd_submit_3d = "CmdSubmit3d"; + let expected_debug_output_create_blob = "ResourceCreateBlob"; + let expected_debug_output_map_blob = "ResourceMapBlob"; + let expected_debug_output_unmap_blob = "ResourceUnmapBlob"; + let expected_debug_output_update_cursor = "UpdateCursor"; + let expected_debug_output_move_cursor = "MoveCursor"; + let expected_debug_output_assign_uuid = "ResourceAssignUuid"; + + assert_eq!( + format!("{:?}", get_display_info), + expected_debug_output_display + ); + assert_eq!(format!("{:?}", get_edid), expected_debug_output_edid); + assert_eq!( + format!("{:?}", resource_create_2d), + expected_debug_output_create2d + ); + assert_eq!(format!("{:?}", resource_unref), expected_debug_output_unref); + assert_eq!(format!("{:?}", set_scanout), expected_debug_output_scanout); + assert_eq!( + format!("{:?}", set_scanout_blob), + expected_debug_output_scanout_blob + ); + assert_eq!(format!("{:?}", resource_flush), expected_debug_output_flush); + assert_eq!( + format!("{:?}", transfer_to_host_2d), + expected_debug_output_transfer_to_host_2d + ); + assert_eq!( + format!("{:?}", resource_detach_backing), + expected_debug_output_detach_backing + ); + assert_eq!( + format!("{:?}", get_capset_info), + expected_debug_output_get_capset_info + ); + assert_eq!( + format!("{:?}", get_capset), + expected_debug_output_get_capset + ); + assert_eq!( + format!("{:?}", ctx_create), + expected_debug_output_ctx_create + ); + assert_eq!( + format!("{:?}", ctx_destroy), + expected_debug_output_ctx_destroy + ); + assert_eq!( + format!("{:?}", ctx_attach_resource), + expected_debug_output_ctx_attach_resource + ); + assert_eq!( + format!("{:?}", ctx_detach_resource), + expected_debug_output_ctx_detach_resource + ); + assert_eq!( + format!("{:?}", resource_create_3d), + expected_debug_output_resource_create_3d + ); + assert_eq!( + format!("{:?}", transfer_to_host_3d), + expected_debug_output_transfer_to_host_3d + ); + assert_eq!( + format!("{:?}", transfer_from_host_3d), + expected_debug_output_transfer_from_host_3d + ); + assert_eq!( + format!("{:?}", cmd_submit_3d), + expected_debug_output_cmd_submit_3d + ); + assert_eq!( + format!("{:?}", resource_create_blob), + expected_debug_output_create_blob + ); + assert_eq!( + format!("{:?}", resource_map_blob), + expected_debug_output_map_blob + ); + assert_eq!( + format!("{:?}", resource_unmap_blob), + expected_debug_output_unmap_blob + ); + assert_eq!( + format!("{:?}", update_cursor), + expected_debug_output_update_cursor + ); + assert_eq!( + format!("{:?}", move_cursor), + expected_debug_output_move_cursor + ); + assert_eq!( + format!("{:?}", resource_assign_uuid), + expected_debug_output_assign_uuid + ); + + let bytes = b"test_debug\0"; + let original = virtio_gpu_ctx_create { + debug_name: { + let mut debug_name = [0; 64]; + debug_name[..bytes.len()].copy_from_slice(bytes); + debug_name + }, + context_init: 0, + nlen: bytes.len() as u32, + }; + + let debug_string = format!("{:?}", original); + + assert_eq!( + debug_string, + "\"virtio_gpu_ctx_create\" { debug_name: \"test_debug\", context_init: 0 }" + ); + } +} diff --git a/staging/vhost-device-gpu/src/virtio_gpu.rs b/staging/vhost-device-gpu/src/virtio_gpu.rs new file mode 100644 index 00000000..7c569011 --- /dev/null +++ b/staging/vhost-device-gpu/src/virtio_gpu.rs @@ -0,0 +1,964 @@ +// Copyright 2024 Red Hat Inc +// Copyright 2019 The ChromiumOS Authors +// +// SPDX-License-Identifier: Apache-2.0 or BSD-3-Clause + +use log::{debug, error, trace}; +use std::{ + collections::BTreeMap, + io::IoSliceMut, + os::fd::FromRawFd, + result::Result, + sync::{Arc, Mutex}, +}; + +use libc::c_void; +use rutabaga_gfx::{ + ResourceCreate3D, ResourceCreateBlob, Rutabaga, RutabagaBuilder, RutabagaComponentType, + RutabagaFence, RutabagaFenceHandler, RutabagaIntoRawDescriptor, RutabagaIovec, Transfer3D, +}; +use vhost::vhost_user::{ + gpu_message::{ + VhostUserGpuCursorPos, VhostUserGpuCursorUpdate, VhostUserGpuEdidRequest, + VhostUserGpuScanout, VhostUserGpuUpdate, VirtioGpuRespDisplayInfo, + }, + GpuBackend, +}; +use vhost_user_backend::{VringRwLock, VringT}; +use vm_memory::{GuestAddress, GuestMemory, GuestMemoryMmap, VolatileSlice}; +use vmm_sys_util::eventfd::EventFd; + +use crate::protocol::{ + virtio_gpu_rect, GpuResponse, GpuResponse::*, GpuResponsePlaneInfo, VirtioGpuResult, + VIRTIO_GPU_FLAG_INFO_RING_IDX, VIRTIO_GPU_MAX_SCANOUTS, +}; +use crate::{device::Error, GpuMode}; + +fn sglist_to_rutabaga_iovecs( + vecs: &[(GuestAddress, usize)], + mem: &GuestMemoryMmap, +) -> Result, ()> { + if vecs + .iter() + .any(|&(addr, len)| mem.get_slice(addr, len).is_err()) + { + return Err(()); + } + + let mut rutabaga_iovecs: Vec = Vec::new(); + for &(addr, len) in vecs { + let slice = mem.get_slice(addr, len).unwrap(); + rutabaga_iovecs.push(RutabagaIovec { + base: slice.ptr_guard_mut().as_ptr() as *mut c_void, + len, + }); + } + Ok(rutabaga_iovecs) +} + +#[derive(Default, Debug)] +pub struct Rectangle { + pub x: u32, + pub y: u32, + pub width: u32, + pub height: u32, +} + +impl From for Rectangle { + fn from(r: virtio_gpu_rect) -> Self { + Self { + x: r.x, + y: r.y, + width: r.width, + height: r.height, + } + } +} + +pub trait VirtioGpu { + /// Uses the hypervisor to unmap the blob resource. + fn resource_unmap_blob( + &mut self, + resource_id: u32, + shm_region: &VirtioShmRegion, + ) -> VirtioGpuResult; + + /// Uses the hypervisor to map the rutabaga blob resource. + /// + /// When sandboxing is disabled, external_blob is unset and opaque fds are mapped by + /// rutabaga as ExternalMapping. + /// When sandboxing is enabled, external_blob is set and opaque fds must be mapped in the + /// hypervisor process by Vulkano using metadata provided by Rutabaga::vulkan_info(). + fn resource_map_blob( + &mut self, + resource_id: u32, + shm_region: &VirtioShmRegion, + offset: u64, + ) -> VirtioGpuResult; + + /// Creates a blob resource using rutabaga. + fn resource_create_blob( + &mut self, + ctx_id: u32, + resource_id: u32, + resource_create_blob: ResourceCreateBlob, + vecs: Vec<(GuestAddress, usize)>, + mem: &GuestMemoryMmap, + ) -> VirtioGpuResult; + + fn process_fence( + &mut self, + ring: VirtioGpuRing, + fence_id: u64, + desc_index: u16, + len: u32, + ) -> bool; + + /// Creates a fence with the RutabagaFence that can be used to determine when the previous + /// command completed. + fn create_fence(&mut self, rutabaga_fence: RutabagaFence) -> VirtioGpuResult; + + /// Submits a command buffer to a rutabaga context. + fn submit_command( + &mut self, + ctx_id: u32, + commands: &mut [u8], + fence_ids: &[u64], + ) -> VirtioGpuResult; + + /// Detaches a resource from a rutabaga context. + fn context_detach_resource(&mut self, ctx_id: u32, resource_id: u32) -> VirtioGpuResult; + + /// Attaches a resource to a rutabaga context. + fn context_attach_resource(&mut self, ctx_id: u32, resource_id: u32) -> VirtioGpuResult; + + /// Destroys a rutabaga context. + fn destroy_context(&mut self, ctx_id: u32) -> VirtioGpuResult; + fn force_ctx_0(&self); + + /// Gets the list of supported display resolutions as a slice of `(width, height, enabled)` tuples. + fn display_info(&self, display_info: VirtioGpuRespDisplayInfo) -> Vec<(u32, u32, bool)>; + + /// Gets the EDID for the specified scanout ID. If that scanout is not enabled, it would return + /// the EDID of a default display. + fn get_edid( + &self, + gpu_backend: &mut GpuBackend, + edid_req: VhostUserGpuEdidRequest, + ) -> VirtioGpuResult; + + /// Sets the given resource id as the source of scanout to the display. + fn set_scanout( + &mut self, + gpu_backend: &mut GpuBackend, + scanout_id: u32, + resource_id: u32, + rect: Rectangle, + ) -> VirtioGpuResult; + + /// Creates a 3D resource with the given properties and resource_id. + fn resource_create_3d( + &mut self, + resource_id: u32, + resource_create_3d: ResourceCreate3D, + ) -> VirtioGpuResult; + + /// Releases guest kernel reference on the resource. + fn unref_resource(&mut self, resource_id: u32) -> VirtioGpuResult; + + /// If the resource is the scanout resource, flush it to the display. + fn flush_resource( + &mut self, + resource_id: u32, + gpu_backend: &mut GpuBackend, + rect: Rectangle, + ) -> VirtioGpuResult; + + /// Copies data to host resource from the attached iovecs. Can also be used to flush caches. + fn transfer_write( + &mut self, + ctx_id: u32, + resource_id: u32, + transfer: Transfer3D, + ) -> VirtioGpuResult; + + /// Copies data from the host resource to: + /// 1) To the optional volatile slice + /// 2) To the host resource's attached iovecs + /// + /// Can also be used to invalidate caches. + fn transfer_read( + &mut self, + ctx_id: u32, + resource_id: u32, + transfer: Transfer3D, + buf: Option, + ) -> VirtioGpuResult; + + /// Attaches backing memory to the given resource, represented by a `Vec` of `(address, size)` + /// tuples in the guest's physical address space. Converts to RutabagaIovec from the memory + /// mapping. + fn attach_backing( + &mut self, + resource_id: u32, + mem: &GuestMemoryMmap, + vecs: Vec<(GuestAddress, usize)>, + ) -> VirtioGpuResult; + + /// Detaches any previously attached iovecs from the resource. + fn detach_backing(&mut self, resource_id: u32) -> VirtioGpuResult; + + /// Updates the cursor's memory to the given resource_id, and sets its position to the given + /// coordinates. + fn update_cursor( + &mut self, + resource_id: u32, + gpu_backend: &mut GpuBackend, + cursor_pos: VhostUserGpuCursorPos, + hot_x: u32, + hot_y: u32, + ) -> VirtioGpuResult; + + /// Moves the cursor's position to the given coordinates. + fn move_cursor( + &mut self, + resource_id: u32, + gpu_backend: &mut GpuBackend, + cursor: VhostUserGpuCursorPos, + ) -> VirtioGpuResult; + + /// Returns a uuid for the resource. + fn resource_assign_uuid(&self, resource_id: u32) -> VirtioGpuResult; + + /// Gets rutabaga's capset information associated with `index`. + fn get_capset_info(&self, index: u32) -> VirtioGpuResult; + + /// Gets a capset from rutabaga. + fn get_capset(&self, capset_id: u32, version: u32) -> VirtioGpuResult; + + /// Creates a rutabaga context. + fn create_context( + &mut self, + ctx_id: u32, + context_init: u32, + context_name: Option<&str>, + ) -> VirtioGpuResult; + + /// Get an EventFd descriptor, that signals when to call event_poll. + fn get_event_poll_fd(&self) -> Option; + + /// Polls the Rutabaga backend. + fn event_poll(&self); +} + +#[derive(Clone, Default)] +pub struct VirtioShmRegion { + pub host_addr: u64, + pub guest_addr: u64, + pub size: usize, +} + +#[derive(PartialEq, Eq, PartialOrd, Ord)] +pub enum VirtioGpuRing { + Global, + ContextSpecific { ctx_id: u32, ring_idx: u8 }, +} + +struct FenceDescriptor { + ring: VirtioGpuRing, + fence_id: u64, + desc_index: u16, + len: u32, +} + +#[derive(Default)] +pub struct FenceState { + descs: Vec, + completed_fences: BTreeMap, +} + +#[derive(Copy, Clone, Debug, Default)] +struct AssociatedScanouts(u32); + +impl AssociatedScanouts { + fn enable(&mut self, scanout_id: u32) { + self.0 |= 1 << scanout_id; + } + + fn disable(&mut self, scanout_id: u32) { + self.0 ^= 1 << scanout_id; + } + + fn iter_enabled(self) -> impl Iterator { + (0..VIRTIO_GPU_MAX_SCANOUTS) + .filter(move |i| ((self.0 >> i) & 1) == 1) + .map(|n| n as u32) + } +} + +#[derive(Default, Copy, Clone)] +pub struct VirtioGpuResource { + id: u32, + width: u32, + height: u32, + /// Stores information about which scanouts are associated with the given resource. + /// Resource could be used for multiple scanouts (the displays are mirrored). + scanouts: AssociatedScanouts, +} + +impl VirtioGpuResource { + fn calculate_size(&self) -> Result { + let width = self.width as usize; + let height = self.height as usize; + let size = width + .checked_mul(height) + .ok_or("Multiplication of width and height overflowed")? + .checked_mul(READ_RESOURCE_BYTES_PER_PIXEL) + .ok_or("Multiplication of result and bytes_per_pixel overflowed")?; + + Ok(size) + } +} + +impl VirtioGpuResource { + /// Creates a new VirtioGpuResource with 2D/3D metadata + pub fn new(resource_id: u32, width: u32, height: u32) -> VirtioGpuResource { + VirtioGpuResource { + id: resource_id, + width, + height, + scanouts: Default::default(), + } + } +} + +pub struct VirtioGpuScanout { + resource_id: u32, +} + +pub struct RutabagaVirtioGpu { + pub(crate) rutabaga: Rutabaga, + pub(crate) resources: BTreeMap, + pub(crate) fence_state: Arc>, + pub(crate) scanouts: [Option; VIRTIO_GPU_MAX_SCANOUTS], +} + +const READ_RESOURCE_BYTES_PER_PIXEL: usize = 4; + +impl RutabagaVirtioGpu { + // TODO: this depends on Rutabaga builder, so this will need to be handled at runtime eventually + pub const MAX_NUMBER_OF_CAPSETS: u32 = 3; + + fn create_fence_handler( + queue_ctl: VringRwLock, + fence_state: Arc>, + ) -> RutabagaFenceHandler { + RutabagaFenceHandler::new(move |completed_fence: RutabagaFence| { + debug!( + "XXX - fence called: id={}, ring_idx={}", + completed_fence.fence_id, completed_fence.ring_idx + ); + + let mut fence_state = fence_state.lock().unwrap(); + let mut i = 0; + + let ring = match completed_fence.flags & VIRTIO_GPU_FLAG_INFO_RING_IDX { + 0 => VirtioGpuRing::Global, + _ => VirtioGpuRing::ContextSpecific { + ctx_id: completed_fence.ctx_id, + ring_idx: completed_fence.ring_idx, + }, + }; + + while i < fence_state.descs.len() { + debug!("XXX - fence_id: {}", fence_state.descs[i].fence_id); + if fence_state.descs[i].ring == ring + && fence_state.descs[i].fence_id <= completed_fence.fence_id + { + let completed_desc = fence_state.descs.remove(i); + debug!( + "XXX - found fence: desc_index={}", + completed_desc.desc_index + ); + + queue_ctl + .add_used(completed_desc.desc_index, completed_desc.len) + .unwrap(); + + queue_ctl + .signal_used_queue() + .map_err(Error::NotificationFailed) + .unwrap(); + debug!("Notification sent"); + } else { + i += 1; + } + } + // Update the last completed fence for this context + fence_state + .completed_fences + .insert(ring, completed_fence.fence_id); + }) + } + + pub fn new(queue_ctl: &VringRwLock, renderer: GpuMode) -> Self { + let component = match renderer { + GpuMode::ModeVirglRenderer => RutabagaComponentType::VirglRenderer, + GpuMode::ModeGfxstream => RutabagaComponentType::Gfxstream, + }; + let builder = RutabagaBuilder::new(component, 0) + .set_use_egl(true) + .set_use_gles(true) + .set_use_glx(true) + .set_use_surfaceless(true) + .set_use_external_blob(true); + + let fence_state = Arc::new(Mutex::new(Default::default())); + let fence = Self::create_fence_handler(queue_ctl.clone(), fence_state.clone()); + let rutabaga = builder + .build(fence, None) + .expect("Rutabaga initialization failed!"); + + Self { + rutabaga, + resources: Default::default(), + fence_state, + scanouts: Default::default(), + } + } + + fn result_from_query(&mut self, resource_id: u32) -> GpuResponse { + let Ok(query) = self.rutabaga.query(resource_id) else { + return OkNoData; + }; + let mut plane_info = Vec::with_capacity(4); + for plane_index in 0..4 { + plane_info.push(GpuResponsePlaneInfo { + stride: query.strides[plane_index], + offset: query.offsets[plane_index], + }); + } + let format_modifier = query.modifier; + OkResourcePlaneInfo { + format_modifier, + plane_info, + } + } + + fn read_2d_resource( + &mut self, + resource: VirtioGpuResource, + output: &mut [u8], + ) -> Result<(), String> { + let minimal_buffer_size = resource.calculate_size()?; + assert!(output.len() >= minimal_buffer_size); + + let transfer = Transfer3D { + x: 0, + y: 0, + z: 0, + w: resource.width, + h: resource.height, + d: 1, + level: 0, + stride: resource.width * READ_RESOURCE_BYTES_PER_PIXEL as u32, + layer_stride: 0, + offset: 0, + }; + + // ctx_id 0 seems to be special, crosvm uses it for this purpose too + self.rutabaga + .transfer_read(0, resource.id, transfer, Some(IoSliceMut::new(output))) + .map_err(|e| format!("{e}"))?; + + Ok(()) + } +} + +impl VirtioGpu for RutabagaVirtioGpu { + fn force_ctx_0(&self) { + self.rutabaga.force_ctx_0() + } + + fn display_info(&self, display_info: VirtioGpuRespDisplayInfo) -> Vec<(u32, u32, bool)> { + display_info + .pmodes + .iter() + .map(|display| (display.r.width, display.r.height, display.enabled == 1)) + .collect::>() + } + + fn get_edid( + &self, + gpu_backend: &mut GpuBackend, + edid_req: VhostUserGpuEdidRequest, + ) -> VirtioGpuResult { + debug!("edid request: {edid_req:?}"); + let edid = gpu_backend.get_edid(&edid_req).map_err(|e| { + error!("Failed to get edid from frontend: {}", e); + ErrUnspec + })?; + + Ok(OkEdid { + blob: Box::from(&edid.edid[..edid.size as usize]), + }) + } + + fn set_scanout( + &mut self, + gpu_backend: &mut GpuBackend, + scanout_id: u32, + resource_id: u32, + rect: Rectangle, + ) -> VirtioGpuResult { + let scanout = self + .scanouts + .get_mut(scanout_id as usize) + .ok_or(ErrInvalidScanoutId)?; + + // If a resource is already associated with this scanout, make sure to disable this scanout for that resource + if let Some(resource_id) = scanout.as_ref().map(|scanout| scanout.resource_id) { + let resource = self + .resources + .get_mut(&resource_id) + .ok_or(ErrInvalidResourceId)?; + + resource.scanouts.disable(scanout_id); + } + + // Virtio spec: "The driver can use resource_id = 0 to disable a scanout." + if resource_id == 0 { + *scanout = None; + debug!("Disabling scanout scanout_id={scanout_id}"); + gpu_backend + .set_scanout(&VhostUserGpuScanout { + scanout_id, + width: 0, + height: 0, + }) + .map_err(|e| { + error!("Failed to set_scanout: {e:?}"); + ErrUnspec + })?; + return Ok(OkNoData); + } + + debug!("Enabling scanout scanout_id={scanout_id}, resource_id={resource_id}: {rect:?}"); + + // QEMU doesn't like (it lags) when we call set_scanout while the scanout is enabled + if scanout.is_none() { + gpu_backend + .set_scanout(&VhostUserGpuScanout { + scanout_id, + width: rect.width, + height: rect.height, + }) + .map_err(|e| { + error!("Failed to set_scanout: {e:?}"); + ErrUnspec + })?; + } + + let resource = self + .resources + .get_mut(&resource_id) + .ok_or(ErrInvalidResourceId)?; + + resource.scanouts.enable(scanout_id); + *scanout = Some(VirtioGpuScanout { resource_id }); + Ok(OkNoData) + } + + fn resource_create_3d( + &mut self, + resource_id: u32, + resource_create_3d: ResourceCreate3D, + ) -> VirtioGpuResult { + self.rutabaga + .resource_create_3d(resource_id, resource_create_3d)?; + + let resource = VirtioGpuResource::new( + resource_id, + resource_create_3d.width, + resource_create_3d.height, + ); + + debug_assert!( + !self.resources.contains_key(&resource_id), + "Resource ID {} already exists in the resources map.", + resource_id + ); + + // Rely on rutabaga to check for duplicate resource ids. + self.resources.insert(resource_id, resource); + Ok(self.result_from_query(resource_id)) + } + + fn unref_resource(&mut self, resource_id: u32) -> VirtioGpuResult { + self.rutabaga.unref_resource(resource_id)?; + Ok(OkNoData) + } + + /// If the resource is the scanout resource, flush it to the display. + fn flush_resource( + &mut self, + resource_id: u32, + gpu_backend: &mut GpuBackend, + _rect: Rectangle, + ) -> VirtioGpuResult { + if resource_id == 0 { + return Ok(OkNoData); + } + + let resource = *self + .resources + .get(&resource_id) + .ok_or(ErrInvalidResourceId)?; + + for scanout_id in resource.scanouts.iter_enabled() { + let resource_size = resource.calculate_size().map_err(|e| { + error!( + "Resource {id} size calculation failed: {e}", + id = resource.id + ); + ErrUnspec + })?; + + let mut data = vec![0; resource_size]; + + // Gfxstream doesn't support transfer_read for portion of the resource. So we always + // read the whole resource, even if the guest specified to flush only a portion of it. + // + // The function stream_renderer_transfer_read_iov seems to ignore the stride and + // transfer_box parameters and expects the provided buffer to fit the whole resource. + if let Err(e) = self.read_2d_resource(resource, &mut data) { + log::error!("Failed to read resource {resource_id} for scanout {scanout_id}: {e}"); + continue; + } + + gpu_backend + .update_scanout( + &VhostUserGpuUpdate { + scanout_id, + x: 0, + y: 0, + width: resource.width, + height: resource.height, + }, + &data, + ) + .map_err(|e| { + error!("Failed to update_scanout: {e:?}"); + ErrUnspec + })? + } + + Ok(OkNoData) + } + + fn transfer_write( + &mut self, + ctx_id: u32, + resource_id: u32, + transfer: Transfer3D, + ) -> VirtioGpuResult { + trace!("transfer_write ctx_id {ctx_id}, resource_id {resource_id}, {transfer:?}"); + + self.rutabaga + .transfer_write(ctx_id, resource_id, transfer)?; + Ok(OkNoData) + } + + fn transfer_read( + &mut self, + ctx_id: u32, + resource_id: u32, + transfer: Transfer3D, + buf: Option, + ) -> VirtioGpuResult { + let buf = buf.map(|vs| { + IoSliceMut::new( + // SAFETY: trivially safe + unsafe { std::slice::from_raw_parts_mut(vs.ptr_guard_mut().as_ptr(), vs.len()) }, + ) + }); + self.rutabaga + .transfer_read(ctx_id, resource_id, transfer, buf)?; + Ok(OkNoData) + } + + fn attach_backing( + &mut self, + resource_id: u32, + mem: &GuestMemoryMmap, + vecs: Vec<(GuestAddress, usize)>, + ) -> VirtioGpuResult { + let rutabaga_iovecs = sglist_to_rutabaga_iovecs(&vecs[..], mem).map_err(|_| ErrUnspec)?; + self.rutabaga.attach_backing(resource_id, rutabaga_iovecs)?; + Ok(OkNoData) + } + + fn detach_backing(&mut self, resource_id: u32) -> VirtioGpuResult { + self.rutabaga.detach_backing(resource_id)?; + Ok(OkNoData) + } + + fn update_cursor( + &mut self, + resource_id: u32, + gpu_backend: &mut GpuBackend, + cursor_pos: VhostUserGpuCursorPos, + hot_x: u32, + hot_y: u32, + ) -> VirtioGpuResult { + const CURSOR_WIDTH: u32 = 64; + const CURSOR_HEIGHT: u32 = 64; + + let mut data = Box::new( + [0; READ_RESOURCE_BYTES_PER_PIXEL * CURSOR_WIDTH as usize * CURSOR_HEIGHT as usize], + ); + + let cursor_resource = self + .resources + .get(&resource_id) + .ok_or(ErrInvalidResourceId)?; + + if cursor_resource.width != CURSOR_WIDTH || cursor_resource.height != CURSOR_HEIGHT { + error!("Cursor resource has invalid dimensions"); + return Err(ErrInvalidParameter); + } + + self.read_2d_resource(*cursor_resource, &mut data[..]) + .map_err(|e| { + error!("Failed to read resource of cursor: {e}"); + ErrUnspec + })?; + + let cursor_update = VhostUserGpuCursorUpdate { + pos: cursor_pos, + hot_x, + hot_y, + }; + + gpu_backend + .cursor_update(&cursor_update, &data) + .map_err(|e| { + error!("Failed to update cursor pos from frontend: {}", e); + ErrUnspec + })?; + + Ok(OkNoData) + } + + fn move_cursor( + &mut self, + resource_id: u32, + gpu_backend: &mut GpuBackend, + cursor: VhostUserGpuCursorPos, + ) -> VirtioGpuResult { + if resource_id == 0 { + gpu_backend.cursor_pos_hide(&cursor).map_err(|e| { + error!("Failed to set cursor pos from frontend: {}", e); + ErrUnspec + })?; + } else { + gpu_backend.cursor_pos(&cursor).map_err(|e| { + error!("Failed to set cursor pos from frontend: {}", e); + ErrUnspec + })?; + } + + Ok(OkNoData) + } + + fn resource_assign_uuid(&self, resource_id: u32) -> VirtioGpuResult { + if !self.resources.contains_key(&resource_id) { + return Err(ErrInvalidResourceId); + } + + // TODO(stevensd): use real uuids once the virtio wayland protocol is updated to + // handle more than 32 bits. For now, the virtwl driver knows that the uuid is + // actually just the resource id. + let mut uuid: [u8; 16] = [0; 16]; + for (idx, byte) in resource_id.to_be_bytes().iter().enumerate() { + uuid[12 + idx] = *byte; + } + Ok(OkResourceUuid { uuid }) + } + + fn get_capset_info(&self, index: u32) -> VirtioGpuResult { + let (capset_id, version, size) = self.rutabaga.get_capset_info(index)?; + Ok(OkCapsetInfo { + capset_id, + version, + size, + }) + } + + fn get_capset(&self, capset_id: u32, version: u32) -> VirtioGpuResult { + let capset = self.rutabaga.get_capset(capset_id, version)?; + Ok(OkCapset(capset)) + } + + fn create_context( + &mut self, + ctx_id: u32, + context_init: u32, + context_name: Option<&str>, + ) -> VirtioGpuResult { + self.rutabaga + .create_context(ctx_id, context_init, context_name)?; + Ok(OkNoData) + } + + fn destroy_context(&mut self, ctx_id: u32) -> VirtioGpuResult { + self.rutabaga.destroy_context(ctx_id)?; + Ok(OkNoData) + } + + fn context_attach_resource(&mut self, ctx_id: u32, resource_id: u32) -> VirtioGpuResult { + self.rutabaga.context_attach_resource(ctx_id, resource_id)?; + Ok(OkNoData) + } + + fn context_detach_resource(&mut self, ctx_id: u32, resource_id: u32) -> VirtioGpuResult { + self.rutabaga.context_detach_resource(ctx_id, resource_id)?; + Ok(OkNoData) + } + + fn submit_command( + &mut self, + ctx_id: u32, + commands: &mut [u8], + fence_ids: &[u64], + ) -> VirtioGpuResult { + self.rutabaga.submit_command(ctx_id, commands, fence_ids)?; + Ok(OkNoData) + } + + fn create_fence(&mut self, rutabaga_fence: RutabagaFence) -> VirtioGpuResult { + self.rutabaga.create_fence(rutabaga_fence)?; + Ok(OkNoData) + } + + fn process_fence( + &mut self, + ring: VirtioGpuRing, + fence_id: u64, + desc_index: u16, + len: u32, + ) -> bool { + // In case the fence is signaled immediately after creation, don't add a return + // FenceDescriptor. + let mut fence_state = self.fence_state.lock().unwrap(); + if fence_id > *fence_state.completed_fences.get(&ring).unwrap_or(&0) { + fence_state.descs.push(FenceDescriptor { + ring, + fence_id, + desc_index, + len, + }); + + false + } else { + true + } + } + + fn resource_create_blob( + &mut self, + _ctx_id: u32, + _resource_id: u32, + _resource_create_blob: ResourceCreateBlob, + _vecs: Vec<(GuestAddress, usize)>, + _mem: &GuestMemoryMmap, + ) -> VirtioGpuResult { + error!("Not implemented: resource_create_blob"); + Err(ErrUnspec) + } + + fn resource_map_blob( + &mut self, + _resource_id: u32, + _shm_region: &VirtioShmRegion, + _offset: u64, + ) -> VirtioGpuResult { + error!("Not implemented: resource_map_blob"); + Err(ErrUnspec) + } + + fn resource_unmap_blob( + &mut self, + _resource_id: u32, + _shm_region: &VirtioShmRegion, + ) -> VirtioGpuResult { + error!("Not implemented: resource_unmap_blob"); + Err(ErrUnspec) + } + + fn get_event_poll_fd(&self) -> Option { + self.rutabaga.poll_descriptor().map(|fd| { + // SAFETY: Safe, the fd should be valid, because Rutabaga guarantees it. + // into_raw_descriptor() returns a RawFd and makes sure SafeDescriptor::drop doesn't run. + unsafe { EventFd::from_raw_fd(fd.into_raw_descriptor()) } + }) + } + + fn event_poll(&self) { + self.rutabaga.event_poll() + } +} + +#[cfg(test)] +mod tests { + use std::sync::{Arc, Mutex}; + + use super::{RutabagaVirtioGpu, VirtioGpu, VirtioGpuResource, VirtioGpuRing, VirtioShmRegion}; + use rutabaga_gfx::{ + ResourceCreateBlob, RutabagaBuilder, RutabagaComponentType, RutabagaHandler, + }; + use vm_memory::{GuestAddress, GuestMemoryMmap}; + + fn new_2d() -> RutabagaVirtioGpu { + let rutabaga = RutabagaBuilder::new(RutabagaComponentType::Rutabaga2D, 0) + .build(RutabagaHandler::new(|_| {}), None) + .unwrap(); + RutabagaVirtioGpu { + rutabaga, + resources: Default::default(), + fence_state: Arc::new(Mutex::new(Default::default())), + scanouts: Default::default(), + } + } + + #[test] + fn test_gpu_backend_success() { + let mut virtio_gpu = new_2d(); + virtio_gpu.get_capset(0, 0).unwrap(); + virtio_gpu.process_fence(VirtioGpuRing::Global, 0, 0, 0); + } + + #[test] + fn test_gpu_backend_failure() { + let mut virtio_gpu = new_2d(); + + virtio_gpu.get_capset_info(0).unwrap_err(); + let resource_create_blob = ResourceCreateBlob::default(); + let vecs = vec![(GuestAddress(0), 10)]; + let mem = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x1000)]).unwrap(); + virtio_gpu + .resource_create_blob(1, 1, resource_create_blob, vecs, mem) + .unwrap_err(); + + let shm_region = VirtioShmRegion::default(); + let resource = VirtioGpuResource::default(); + virtio_gpu.resources.insert(1, resource); + virtio_gpu.resource_map_blob(1, &shm_region, 0).unwrap_err(); + virtio_gpu.resource_unmap_blob(1, &shm_region).unwrap_err(); + let mut cmd_buf = vec![0; 10]; + let fence_ids: Vec = Vec::with_capacity(0); + virtio_gpu + .submit_command(0, &mut cmd_buf[..], &fence_ids) + .unwrap_err(); + } +}