Skip to content
This repository has been archived by the owner on Oct 24, 2022. It is now read-only.

Commit

Permalink
Inflight I/O: Implement missing traits
Browse files Browse the repository at this point in the history
This commit implements the get and set inflight fd members of the
VhostUserSlaveReqHandlerMut trait, which is used to pass inflight
I/O queue tracking regions as memfds.

Fixes #14.

Signed-off-by: Harshavardhan Unnibhavi <[email protected]>
  • Loading branch information
harshanavkis committed Sep 9, 2021
1 parent 70f668a commit b33b3bd
Showing 1 changed file with 193 additions and 11 deletions.
204 changes: 193 additions & 11 deletions src/handler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,13 @@
use std::error;
use std::fs::File;
use std::io;
use std::os::unix::io::AsRawFd;
use std::os::unix::io::{AsRawFd, FromRawFd};
use std::sync::Arc;
use std::thread;

use vhost::vhost_user::message::{
VhostUserConfigFlags, VhostUserMemoryRegion, VhostUserProtocolFeatures,
DescStatePacked, DescStateSplit, QueueRegionPacked, QueueRegionSplit, VhostUserConfigFlags,
VhostUserInflight, VhostUserMemoryRegion, VhostUserProtocolFeatures,
VhostUserSingleMemoryRegion, VhostUserVirtioFeatures, VhostUserVringAddrFlags,
VhostUserVringState,
};
Expand All @@ -23,9 +24,15 @@ use virtio_bindings::bindings::virtio_ring::VIRTIO_RING_F_EVENT_IDX;
use vm_memory::bitmap::Bitmap;
use vm_memory::mmap::NewBitmap;
use vm_memory::{
FileOffset, GuestAddress, GuestAddressSpace, GuestMemoryMmap, GuestRegionMmap, MmapRegion,
Address, FileOffset, GuestAddress, GuestAddressSpace, GuestMemoryMmap, GuestRegionMmap,
MmapRegion,
};

use libc::c_void;
use std::mem;
use std::os::unix::io::RawFd;
use virtio_bindings::bindings::virtio_net::VIRTIO_F_RING_PACKED;

use super::backend::VhostUserBackend;
use super::event_loop::VringEpollHandler;
use super::event_loop::{VringEpollError, VringEpollResult};
Expand Down Expand Up @@ -89,6 +96,9 @@ where
atomic_mem: GM<B>,
vrings: Vec<V>,
worker_threads: Vec<thread::JoinHandle<VringEpollResult<()>>>,
inflight_file: Option<File>,
inflight_mapping_addr: Option<GuestAddress>,
inflight_mmap_size: usize,
}

impl<S, V, B> VhostUserHandler<S, V, B>
Expand Down Expand Up @@ -146,6 +156,9 @@ where
atomic_mem,
vrings,
worker_threads,
inflight_file: None,
inflight_mapping_addr: None,
inflight_mmap_size: 0,
})
}
}
Expand Down Expand Up @@ -535,20 +548,189 @@ where

fn get_inflight_fd(
&mut self,
_inflight: &vhost::vhost_user::message::VhostUserInflight,
inflight: &vhost::vhost_user::message::VhostUserInflight,
) -> VhostUserResult<(vhost::vhost_user::message::VhostUserInflight, File)> {
// Assume the backend hasn't negotiated the inflight feature; it
// wouldn't be correct for the backend to do so, as we don't (yet)
// provide a way for it to handle such requests.
Err(VhostUserError::InvalidOperation)
let ret_val = -1;
// Total size of the inflight queue region
let mut total_mmap_size =
self.get_inflight_queue_size(inflight.queue_size) * inflight.num_queues as usize;

// Create a memfd region to hold the queues for inflight I/O tracking
let mut inflight_fd = -1;
let mmap_ptr = self.memfd_alloc(total_mmap_size, &mut inflight_fd);

self.inflight_file = Some(unsafe { File::from_raw_fd(inflight_fd as i32) });

if mmap_ptr == ret_val as *mut c_void {
total_mmap_size = 0;
self.inflight_mapping_addr = None;
} else {
unsafe { libc::memset(mmap_ptr, 0, total_mmap_size) };
self.inflight_mapping_addr = Some(GuestAddress::new(mmap_ptr as u64));
}
self.inflight_mmap_size = total_mmap_size;

Ok((
VhostUserInflight {
mmap_size: total_mmap_size as u64,
mmap_offset: 0,
num_queues: inflight.num_queues,
queue_size: inflight.queue_size,
},
unsafe { File::from_raw_fd(inflight_fd as RawFd) },
))
}

fn set_inflight_fd(
&mut self,
_inflight: &vhost::vhost_user::message::VhostUserInflight,
_file: File,
inflight: &vhost::vhost_user::message::VhostUserInflight,
file: File,
) -> VhostUserResult<()> {
Err(VhostUserError::InvalidOperation)
let ret_val = -1;

// Need to unmap any previously mmaped regions as closing the
// associated file doesn't unmap it automatically
if let Some(inflight_addr) = self.inflight_mapping_addr {
unsafe {
libc::munmap(
inflight_addr.raw_value() as *mut c_void,
self.inflight_mmap_size,
)
};
}

let inflight_fd = file.as_raw_fd();

self.inflight_file = Some(file);
let mmap_size = inflight.mmap_size;
let mmap_offset = inflight.mmap_offset;

let mmap_ptr = unsafe {
libc::mmap(
std::ptr::null_mut::<c_void>(),
mmap_size as usize,
libc::PROT_READ | libc::PROT_WRITE,
libc::MAP_SHARED,
inflight_fd,
mmap_offset as i64,
)
};

if mmap_ptr == ret_val as *mut c_void {
self.inflight_mapping_addr = None;
self.inflight_mmap_size = 0;
} else {
self.inflight_mapping_addr = Some(GuestAddress::new(mmap_ptr as u64));
self.inflight_mmap_size = mmap_size as usize;
}

self.set_inflight_region_desc_num(inflight.num_queues, inflight.queue_size);

Ok(())
}
}

impl<S, V, B> VhostUserHandler<S, V, B>
where
S: VhostUserBackend<V, B>,
V: VringT<GM<B>>,
B: NewBitmap + Clone,
{
fn get_inflight_queue_size(&mut self, queue_size: u16) -> usize {
let queue_region_size;
let descr_state_size;
let virtio_features = self.get_features().unwrap();

if virtio_features & (1 << VIRTIO_F_RING_PACKED) == 0 {
// Use descriptor and queue states for split virtqueues
queue_region_size = mem::size_of::<QueueRegionSplit>();
descr_state_size = mem::size_of::<DescStateSplit>();
} else {
// Use descriptor and queue states for packed virtqueues
queue_region_size = mem::size_of::<QueueRegionPacked>();
descr_state_size = mem::size_of::<DescStatePacked>();
}
queue_region_size + descr_state_size * queue_size as usize
}

fn memfd_alloc(&self, mmap_size: usize, inflight_file: &mut i64) -> *mut c_void {
let mut ret_val;

ret_val = unsafe {
libc::syscall(
libc::SYS_memfd_create,
&std::ffi::CString::new("inflight-region").unwrap(),
libc::MFD_ALLOW_SEALING,
)
};

if ret_val == -1 {
return ret_val as *mut c_void;
}

*inflight_file = ret_val;

ret_val = unsafe { libc::ftruncate(*inflight_file as RawFd, mmap_size as i64) } as i64;

if ret_val == -1 {
return ret_val as *mut c_void;
}

ret_val = unsafe {
libc::fcntl(
*inflight_file as i32,
libc::F_ADD_SEALS,
libc::F_SEAL_GROW | libc::F_SEAL_SHRINK | libc::F_SEAL_SEAL,
)
} as i64;

if ret_val == -1 {
return ret_val as *mut c_void;
}

unsafe {
libc::mmap(
std::ptr::null_mut(),
mmap_size,
libc::PROT_READ | libc::PROT_WRITE,
libc::MAP_SHARED,
*inflight_file as i32,
0,
)
}
}

fn set_desc_num_packed(&mut self, inflight_region: u64, num_queues: u16, queue_size: u16) {
let raw_ptr = inflight_region as *mut QueueRegionPacked;

for i in 0..num_queues {
unsafe {
let queue_region = raw_ptr.offset(i as isize).as_mut().unwrap();
queue_region.desc_num = queue_size;
}
}
}

fn set_desc_num_split(&mut self, inflight_region: u64, num_queues: u16, queue_size: u16) {
let raw_ptr = inflight_region as *mut QueueRegionSplit;

for i in 0..num_queues {
unsafe {
let queue_region = raw_ptr.offset(i as isize).as_mut().unwrap();
queue_region.desc_num = queue_size;
}
}
}

fn set_inflight_region_desc_num(&mut self, num_queues: u16, queue_size: u16) {
let inflight_region = self.inflight_mapping_addr.unwrap().raw_value();

let virtio_features = self.get_features().unwrap();

match virtio_features & (1 << VIRTIO_F_RING_PACKED) {
0 => self.set_desc_num_split(inflight_region, num_queues, queue_size),
_ => self.set_desc_num_packed(inflight_region, num_queues, queue_size),
};
}
}

Expand Down

0 comments on commit b33b3bd

Please sign in to comment.