feat: add virtiofs support (#1800)

- Add virtio-fs device driver with PCI transport support
- Implement virtio-fs bridge for FUSE communication
- Extend FUSE connection to support virtio-fs backend
- Add virtiofs mountable filesystem with kernel thread bridge
- Update Makefile with virtiofs build and run targets
- Add virtiofsd startup script and configuration
- Upgrade virtio-drivers dependency to support virtio-fs

Signed-off-by: longjin <longjin@DragonOS.org>
This commit is contained in:
LoGin 2026-03-03 13:47:40 +08:00 committed by GitHub
parent 2f6e86f1a9
commit 653cbcf30b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
13 changed files with 1331 additions and 6 deletions

View File

@ -205,6 +205,16 @@ else
sh -c "cd tools && bash run-qemu.sh --bios=legacy --display=nographic && cd .."
endif
# 启动virtiofsd用于virtiofs功能验证
.PHONY: virtiofsd
virtiofsd:
sh -c "cd tools/virtiofs && bash start_virtiofsd.sh && cd ../.."
# 不编译直接以nographic方式启动QEMU并启用virtiofs设备
.PHONY: qemu-virtiofs-nographic
qemu-virtiofs-nographic: check_arch
sh -c "cd tools && DRAGONOS_VIRTIOFS_ENABLE=1 bash run-qemu.sh --bios=legacy --display=nographic && cd .."
# 不编译直接启动QEMU(UEFI)
qemu-uefi: check_arch
sh -c "cd tools && bash run-qemu.sh --bios=uefi --display=window && cd .."
@ -339,7 +349,9 @@ help:
@echo ""
@echo "运行:"
@echo " make qemu - 不编译,直接从已有的磁盘镜像启动运行"
@echo " make qemu-virtiofs-nographic - 不编译以nographic方式启动并启用virtiofs设备"
@echo " make qemu-uefi - 不编译直接从已有的磁盘镜像以UEFI启动运行"
@echo " make virtiofsd - 启动virtiofsd读取tools/virtiofs/env.sh"
@echo ""
@echo ""
@echo "注: 对于上述的run, run-uefi, qemu, qemu-uefi命令可以在命令后加上-vnc后缀,来通过vnc连接到DragonOS, 默认会在5900端口运行vnc服务器。如make run-vnc "

2
kernel/Cargo.lock generated
View File

@ -1815,7 +1815,7 @@ checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a"
[[package]]
name = "virtio-drivers"
version = "0.7.2"
source = "git+https://git.mirrors.dragonos.org.cn/DragonOS-Community/virtio-drivers?rev=415ab38ff9#415ab38ff99f3c8e150269c04f65d684ba9d1365"
source = "git+https://git.mirrors.dragonos.org.cn/DragonOS-Community/virtio-drivers?rev=755e305#755e3053cd8856279e44abd0ecca4d6eef3aec93"
dependencies = [
"bitflags 2.9.1",
"log",

View File

@ -81,7 +81,7 @@ smoltcp = { version = "=0.12.0", git = "https://git.mirrors.dragonos.org.cn/Drag
syscall_table_macros = { path = "crates/syscall_table_macros" }
system_error = { path = "crates/system_error" }
unified-init = { path = "crates/unified-init" }
virtio-drivers = { git = "https://git.mirrors.dragonos.org.cn/DragonOS-Community/virtio-drivers", rev = "415ab38ff9" }
virtio-drivers = { git = "https://git.mirrors.dragonos.org.cn/DragonOS-Community/virtio-drivers", rev = "755e305" }
wait_queue_macros = { path = "crates/wait_queue_macros" }
paste = "=1.0.14"
slabmalloc = { path = "crates/rust-slabmalloc" }

View File

@ -13,6 +13,7 @@ pub mod transport_mmio;
pub mod transport_pci;
#[allow(clippy::module_inception)]
pub mod virtio;
pub mod virtio_fs;
pub mod virtio_impl;
/// virtio 设备厂商ID

View File

@ -1,5 +1,6 @@
use super::mmio::virtio_probe_mmio;
use super::transport_pci::PciTransport;
use super::virtio_fs::virtio_fs;
use super::virtio_impl::HalImpl;
use crate::driver::base::device::bus::Bus;
use crate::driver::base::device::{Device, DeviceId};
@ -76,6 +77,7 @@ pub(super) fn virtio_device_init(
warn!("Not support virtio_input device for now");
}
DeviceType::Network => virtio_net(transport, dev_id, dev_parent),
DeviceType::FileSystem => virtio_fs(transport, dev_id, dev_parent),
t => {
warn!("Unrecognized virtio device: {:?}", t);
}
@ -102,7 +104,8 @@ fn virtio_device_search() -> Vec<Arc<PciDeviceStructureGeneralDevice>> {
let standard_device = device.as_standard_device().unwrap();
let header = &standard_device.common_header;
// log::info!("header: {:?}", header);
if header.device_id >= 0x1000 && header.device_id <= 0x103F {
// 支持 transitional(0x1000..=0x103f) 与 modern(0x1040..=0x107f) virtio PCI 设备。
if header.device_id >= 0x1000 && header.device_id <= 0x107F {
virtio_list.push(standard_device);
}
}

View File

@ -0,0 +1,231 @@
use alloc::{
collections::BTreeMap,
string::{String, ToString},
sync::Arc,
};
use core::ptr;
use log::{info, warn};
use system_error::SystemError;
use virtio_drivers::transport::Transport;
use crate::{
driver::base::device::{Device, DeviceId},
libs::{spinlock::SpinLock, wait_queue::WaitQueue},
};
use super::transport::VirtIOTransport;
const VIRTIO_FS_TAG_LEN: usize = 36;
const VIRTIO_FS_REQUEST_QUEUE_BASE: u16 = 1;
const VIRTIO_FS_MAX_REQUEST_QUEUES: u32 =
(u16::MAX as u32) - (VIRTIO_FS_REQUEST_QUEUE_BASE as u32) + 1;
#[repr(C, packed)]
struct VirtioFsConfig {
tag: [u8; VIRTIO_FS_TAG_LEN],
num_request_queues: u32,
}
struct VirtioFsTransportHolder(VirtIOTransport);
impl core::fmt::Debug for VirtioFsTransportHolder {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
f.write_str("VirtioFsTransportHolder(..)")
}
}
// Safety: virtio-fs transport is always moved by ownership under SpinLock
// protection, and never accessed concurrently through shared references.
unsafe impl Send for VirtioFsTransportHolder {}
#[derive(Debug)]
struct VirtioFsInstanceState {
transport: Option<VirtioFsTransportHolder>,
session_active: bool,
active_session_id: u64,
released_session_id: u64,
next_session_id: u64,
}
#[derive(Debug)]
pub struct VirtioFsInstance {
tag: String,
num_request_queues: u32,
dev_id: Arc<DeviceId>,
state: SpinLock<VirtioFsInstanceState>,
session_wait: WaitQueue,
}
impl VirtioFsInstance {
fn new(
tag: String,
num_request_queues: u32,
dev_id: Arc<DeviceId>,
transport: VirtIOTransport,
) -> Self {
Self {
tag,
num_request_queues,
dev_id,
state: SpinLock::new(VirtioFsInstanceState {
transport: Some(VirtioFsTransportHolder(transport)),
session_active: false,
active_session_id: 0,
released_session_id: 0,
next_session_id: 1,
}),
session_wait: WaitQueue::default(),
}
}
pub fn tag(&self) -> &str {
&self.tag
}
pub fn dev_id(&self) -> &Arc<DeviceId> {
&self.dev_id
}
pub fn num_request_queues(&self) -> u32 {
self.num_request_queues
}
pub fn hiprio_queue_index(&self) -> u16 {
0
}
pub fn request_queue_count(&self) -> usize {
self.num_request_queues as usize
}
pub fn request_queue_index_by_slot(&self, slot: usize) -> Option<u16> {
if slot >= self.request_queue_count() {
return None;
}
let slot = u16::try_from(slot).ok()?;
VIRTIO_FS_REQUEST_QUEUE_BASE
.checked_add(slot)
.filter(|idx| {
(*idx as usize)
< (VIRTIO_FS_REQUEST_QUEUE_BASE as usize + self.request_queue_count())
})
}
pub fn take_transport_for_session(&self) -> Result<(VirtIOTransport, u64), SystemError> {
let mut state = self.state.lock_irqsave();
if state.session_active {
return Err(SystemError::EBUSY);
}
let transport = state.transport.take().ok_or(SystemError::EBUSY)?.0;
let session_id = state.next_session_id;
state.next_session_id = state.next_session_id.wrapping_add(1);
state.active_session_id = session_id;
state.session_active = true;
Ok((transport, session_id))
}
pub fn put_transport_after_session(&self, transport: VirtIOTransport) {
let mut state = self.state.lock_irqsave();
state.transport = Some(VirtioFsTransportHolder(transport));
state.released_session_id = state.active_session_id;
state.active_session_id = 0;
state.session_active = false;
drop(state);
self.session_wait.wakeup(None);
}
pub fn wait_session_released(&self, session_id: u64) {
self.session_wait.wait_until(|| {
let state = self.state.lock_irqsave();
if state.released_session_id == session_id && state.transport.is_some() {
Some(())
} else {
None
}
});
}
}
lazy_static! {
static ref VIRTIO_FS_INSTANCES: SpinLock<BTreeMap<String, Arc<VirtioFsInstance>>> =
SpinLock::new(BTreeMap::new());
}
fn read_config(transport: &VirtIOTransport) -> Result<(String, u32), SystemError> {
let cfg = transport
.config_space::<VirtioFsConfig>()
.map_err(|_| SystemError::EINVAL)?;
let base = cfg.as_ptr() as *const u8;
let mut tag_raw = [0u8; VIRTIO_FS_TAG_LEN];
for (i, b) in tag_raw.iter_mut().enumerate() {
*b = unsafe { ptr::read_volatile(base.add(i)) };
}
let tag_len = tag_raw
.iter()
.position(|x| *x == 0)
.unwrap_or(VIRTIO_FS_TAG_LEN);
if tag_len == 0 {
return Err(SystemError::EINVAL);
}
let tag = core::str::from_utf8(&tag_raw[..tag_len])
.map_err(|_| SystemError::EINVAL)?
.to_string();
let mut nrqs_raw = [0u8; core::mem::size_of::<u32>()];
for (i, b) in nrqs_raw.iter_mut().enumerate() {
*b = unsafe { ptr::read_volatile(base.add(VIRTIO_FS_TAG_LEN + i)) };
}
let nrqs = u32::from_le_bytes(nrqs_raw);
if nrqs == 0 || nrqs > VIRTIO_FS_MAX_REQUEST_QUEUES {
return Err(SystemError::EINVAL);
}
Ok((tag, nrqs))
}
pub fn virtio_fs_find_instance(tag: &str) -> Option<Arc<VirtioFsInstance>> {
VIRTIO_FS_INSTANCES.lock_irqsave().get(tag).cloned()
}
pub fn virtio_fs(
transport: VirtIOTransport,
dev_id: Arc<DeviceId>,
_dev_parent: Option<Arc<dyn Device>>,
) {
let (tag, nrqs) = match read_config(&transport) {
Ok(v) => v,
Err(e) => {
warn!(
"virtio-fs: failed to read config for device {:?}: {:?}",
dev_id, e
);
return;
}
};
let instance = Arc::new(VirtioFsInstance::new(
tag.clone(),
nrqs,
dev_id.clone(),
transport,
));
let mut map = VIRTIO_FS_INSTANCES.lock_irqsave();
if map.contains_key(&tag) {
warn!(
"virtio-fs: duplicated tag '{}' for device {:?}, ignore new device",
tag, dev_id
);
return;
}
map.insert(tag.clone(), instance);
info!(
"virtio-fs: registered instance tag='{}' dev={:?} request_queues={}",
tag, dev_id, nrqs
);
}

View File

@ -147,6 +147,7 @@ struct FuseConnInner {
no_open: bool,
no_opendir: bool,
no_readdirplus: bool,
max_write_cap: usize,
pending: VecDeque<Arc<FuseRequest>>,
processing: BTreeMap<u64, Arc<FusePendingState>>,
}
@ -165,8 +166,23 @@ pub struct FuseConn {
impl FuseConn {
// Keep this in sync with `sys_read.rs` userspace chunking size.
const USER_READ_CHUNK: usize = 64 * 1024;
const MIN_MAX_WRITE: usize = 4096;
pub fn new() -> Arc<Self> {
Self::new_with_max_write_cap(Self::max_write_cap_for_user_read_chunk())
}
pub fn new_for_virtiofs(max_message_size: usize) -> Arc<Self> {
let overhead = core::mem::size_of::<FuseInHeader>() + core::mem::size_of::<FuseWriteIn>();
let cap = if max_message_size > overhead {
core::cmp::max(Self::MIN_MAX_WRITE, max_message_size - overhead)
} else {
Self::MIN_MAX_WRITE
};
Self::new_with_max_write_cap(cap)
}
fn new_with_max_write_cap(max_write_cap: usize) -> Arc<Self> {
Arc::new(Self {
inner: Mutex::new(FuseConnInner {
connected: true,
@ -179,6 +195,7 @@ impl FuseConn {
no_open: false,
no_opendir: false,
no_readdirplus: false,
max_write_cap,
pending: VecDeque::new(),
processing: BTreeMap::new(),
}),
@ -196,6 +213,14 @@ impl FuseConn {
self.inner.lock().mounted
}
pub fn is_connected(&self) -> bool {
self.inner.lock().connected
}
pub fn has_pending_requests(&self) -> bool {
!self.inner.lock().pending.is_empty()
}
pub fn mark_mounted(&self) -> Result<(), SystemError> {
let mut g = self.inner.lock();
if !g.connected {
@ -461,12 +486,17 @@ impl FuseConn {
fn max_write_cap_for_user_read_chunk() -> usize {
let overhead = core::mem::size_of::<FuseInHeader>() + core::mem::size_of::<FuseWriteIn>();
if Self::USER_READ_CHUNK <= overhead {
4096
Self::MIN_MAX_WRITE
} else {
Self::USER_READ_CHUNK - overhead
}
}
fn max_write_cap(&self) -> usize {
let g = self.inner.lock();
core::cmp::max(Self::MIN_MAX_WRITE, g.max_write_cap)
}
fn min_read_buffer(&self) -> usize {
let g = self.inner.lock();
Self::calc_min_read_buffer(g.init.max_write as usize)
@ -777,11 +807,11 @@ impl FuseConn {
1
};
let negotiated_max_write = core::cmp::max(4096usize, init_out.max_write as usize);
let max_write_cap = Self::max_write_cap_for_user_read_chunk();
let max_write_cap = self.max_write_cap();
let capped_max_write = core::cmp::min(negotiated_max_write, max_write_cap);
if capped_max_write < negotiated_max_write {
log::trace!(
"fuse: cap negotiated max_write from {} to {} due user read chunk limit",
"fuse: cap negotiated max_write from {} to {} due backend read buffer limit",
negotiated_max_write,
capped_max_write
);

View File

@ -4,3 +4,4 @@ pub mod fs;
pub mod inode;
pub mod private_data;
pub mod protocol;
pub mod virtiofs;

View File

@ -0,0 +1,873 @@
use alloc::{
boxed::Box,
collections::{BTreeMap, VecDeque},
string::String,
sync::Arc,
vec,
vec::Vec,
};
use linkme::distributed_slice;
use log::{debug, warn};
use system_error::SystemError;
use virtio_drivers::{
queue::VirtQueue,
transport::{DeviceStatus, Transport},
Error as VirtioError, PAGE_SIZE,
};
use crate::{
driver::virtio::{
transport::VirtIOTransport,
virtio_drivers_error_to_system_error,
virtio_fs::{virtio_fs_find_instance, VirtioFsInstance},
virtio_impl::HalImpl,
},
filesystem::vfs::{
FileSystem, FileSystemMakerData, FsInfo, IndexNode, MountableFileSystem, SuperBlock,
FSMAKER,
},
process::{kthread::KernelThreadClosure, kthread::KernelThreadMechanism, ProcessManager},
register_mountable_fs,
time::{sleep::nanosleep, PosixTimeSpec},
};
use super::{
conn::FuseConn,
fs::{FuseFS, FuseMountData},
protocol::{
fuse_pack_struct, fuse_read_struct, FuseInHeader, FuseOutHeader, FUSE_DESTROY, FUSE_FORGET,
FUSE_INTERRUPT,
},
};
const VIRTIOFS_REQ_QUEUE_SIZE: usize = 8;
const VIRTIOFS_REQ_BUF_SIZE: usize = 256 * 1024;
const VIRTIOFS_RSP_BUF_SIZE: usize = 256 * 1024;
const VIRTIOFS_POLL_NS: i64 = 1_000_000;
const VIRTIOFS_PUMP_BUDGET: usize = 64;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum QueueKind {
Hiprio,
Request(usize),
}
#[derive(Debug)]
struct PendingReq {
req: Vec<u8>,
unique: u64,
opcode: u32,
noreply: bool,
queue: QueueKind,
}
#[derive(Debug)]
struct InflightReq {
pending: PendingReq,
rsp: Option<Vec<u8>>,
}
struct VirtioFsBridgeContext {
instance: Arc<VirtioFsInstance>,
conn: Arc<FuseConn>,
transport: Option<VirtIOTransport>,
hiprio_vq: Option<VirtQueue<HalImpl, { VIRTIOFS_REQ_QUEUE_SIZE }>>,
request_vqs: Vec<VirtQueue<HalImpl, { VIRTIOFS_REQ_QUEUE_SIZE }>>,
hiprio_pending: VecDeque<PendingReq>,
request_pending: Vec<VecDeque<PendingReq>>,
hiprio_inflight: BTreeMap<u16, InflightReq>,
request_inflight: Vec<BTreeMap<u16, InflightReq>>,
next_request_slot: usize,
req_buf: Vec<u8>,
}
impl VirtioFsBridgeContext {
fn poll_pause() {
let _ = nanosleep(PosixTimeSpec::new(0, VIRTIOFS_POLL_NS));
}
fn has_internal_pending(&self) -> bool {
!self.hiprio_pending.is_empty() || self.request_pending.iter().any(|q| !q.is_empty())
}
fn has_inflight(&self) -> bool {
!self.hiprio_inflight.is_empty() || self.request_inflight.iter().any(|m| !m.is_empty())
}
fn push_pending_back(&mut self, pending: PendingReq) -> Result<(), SystemError> {
match pending.queue {
QueueKind::Hiprio => self.hiprio_pending.push_back(pending),
QueueKind::Request(slot) => self
.request_pending
.get_mut(slot)
.ok_or(SystemError::EINVAL)?
.push_back(pending),
}
Ok(())
}
fn push_pending_front(&mut self, pending: PendingReq) -> Result<(), SystemError> {
match pending.queue {
QueueKind::Hiprio => self.hiprio_pending.push_front(pending),
QueueKind::Request(slot) => self
.request_pending
.get_mut(slot)
.ok_or(SystemError::EINVAL)?
.push_front(pending),
}
Ok(())
}
fn pop_pending_front(&mut self, kind: QueueKind) -> Result<Option<PendingReq>, SystemError> {
Ok(match kind {
QueueKind::Hiprio => self.hiprio_pending.pop_front(),
QueueKind::Request(slot) => self
.request_pending
.get_mut(slot)
.ok_or(SystemError::EINVAL)?
.pop_front(),
})
}
fn queue_index(&self, kind: QueueKind) -> Result<u16, SystemError> {
match kind {
QueueKind::Hiprio => Ok(self.instance.hiprio_queue_index()),
QueueKind::Request(slot) => self
.instance
.request_queue_index_by_slot(slot)
.ok_or(SystemError::EINVAL),
}
}
fn take_inflight(&mut self, kind: QueueKind, token: u16) -> Result<InflightReq, SystemError> {
match kind {
QueueKind::Hiprio => self.hiprio_inflight.remove(&token).ok_or(SystemError::EIO),
QueueKind::Request(slot) => self
.request_inflight
.get_mut(slot)
.ok_or(SystemError::EINVAL)?
.remove(&token)
.ok_or(SystemError::EIO),
}
}
fn put_back_inflight(
&mut self,
kind: QueueKind,
token: u16,
inflight: InflightReq,
) -> Result<(), SystemError> {
let replaced = match kind {
QueueKind::Hiprio => self.hiprio_inflight.insert(token, inflight),
QueueKind::Request(slot) => self
.request_inflight
.get_mut(slot)
.ok_or(SystemError::EINVAL)?
.insert(token, inflight),
};
debug_assert!(replaced.is_none());
Ok(())
}
fn complete_request_with_errno(conn: &Arc<FuseConn>, unique: u64, errno: i32) {
if unique == 0 {
return;
}
let out_hdr = FuseOutHeader {
len: core::mem::size_of::<FuseOutHeader>() as u32,
error: errno,
unique,
};
let payload = fuse_pack_struct(&out_hdr);
let _ = conn.write_reply(payload);
}
fn complete_request_with_error(&self, unique: u64, err: SystemError) {
Self::complete_request_with_errno(&self.conn, unique, err.to_posix_errno());
}
fn choose_request_slot(&mut self) -> Result<usize, SystemError> {
if self.request_vqs.is_empty() {
return Err(SystemError::ENODEV);
}
let slot = self.next_request_slot % self.request_vqs.len();
self.next_request_slot = (self.next_request_slot + 1) % self.request_vqs.len();
Ok(slot)
}
fn pump_new_requests(&mut self) -> Result<bool, SystemError> {
let mut progressed = false;
for _ in 0..VIRTIOFS_PUMP_BUDGET {
let len = match self.conn.read_request(true, &mut self.req_buf) {
Ok(len) => len,
Err(SystemError::EAGAIN_OR_EWOULDBLOCK) => break,
Err(e) => return Err(e),
};
let req = self.req_buf[..len].to_vec();
let in_hdr: FuseInHeader = fuse_read_struct(&req)?;
let noreply = matches!(in_hdr.opcode, FUSE_FORGET | FUSE_DESTROY);
let queue = if matches!(in_hdr.opcode, FUSE_FORGET | FUSE_INTERRUPT) {
QueueKind::Hiprio
} else {
QueueKind::Request(self.choose_request_slot()?)
};
self.push_pending_back(PendingReq {
req,
unique: in_hdr.unique,
opcode: in_hdr.opcode,
noreply,
queue,
})?;
progressed = true;
}
Ok(progressed)
}
fn submit_one_pending(&mut self, kind: QueueKind) -> Result<bool, SystemError> {
let pending = match self.pop_pending_front(kind)? {
Some(p) => p,
None => return Ok(false),
};
let queue_idx = self.queue_index(kind)?;
let mut rsp = if pending.noreply {
None
} else {
Some(vec![0u8; VIRTIOFS_RSP_BUF_SIZE])
};
let (token, should_notify) = match kind {
QueueKind::Hiprio => {
let queue = self.hiprio_vq.as_mut().ok_or(SystemError::EIO)?;
let token = if let Some(rsp_buf) = rsp.as_mut() {
let inputs = [pending.req.as_slice()];
let mut outputs = [rsp_buf.as_mut_slice()];
unsafe { queue.add(&inputs, &mut outputs) }
} else {
let inputs = [pending.req.as_slice()];
let mut outputs: [&mut [u8]; 0] = [];
unsafe { queue.add(&inputs, &mut outputs) }
};
(token, queue.should_notify())
}
QueueKind::Request(slot) => {
let queue = self.request_vqs.get_mut(slot).ok_or(SystemError::EINVAL)?;
let token = if let Some(rsp_buf) = rsp.as_mut() {
let inputs = [pending.req.as_slice()];
let mut outputs = [rsp_buf.as_mut_slice()];
unsafe { queue.add(&inputs, &mut outputs) }
} else {
let inputs = [pending.req.as_slice()];
let mut outputs: [&mut [u8]; 0] = [];
unsafe { queue.add(&inputs, &mut outputs) }
};
(token, queue.should_notify())
}
};
let token = match token {
Ok(token) => token,
Err(VirtioError::QueueFull) | Err(VirtioError::NotReady) => {
self.push_pending_front(pending)?;
return Ok(false);
}
Err(e) => {
let se = virtio_drivers_error_to_system_error(e);
warn!(
"virtiofs bridge: submit failed opcode={} unique={} queue={:?} err={:?}",
pending.opcode, pending.unique, kind, se
);
if !pending.noreply {
self.complete_request_with_error(pending.unique, se);
}
return Ok(true);
}
};
if should_notify {
self.transport
.as_mut()
.ok_or(SystemError::EIO)?
.notify(queue_idx);
}
let inflight = InflightReq { pending, rsp };
match kind {
QueueKind::Hiprio => {
self.hiprio_inflight.insert(token, inflight);
}
QueueKind::Request(slot) => {
self.request_inflight
.get_mut(slot)
.ok_or(SystemError::EINVAL)?
.insert(token, inflight);
}
}
Ok(true)
}
fn submit_pending(&mut self) -> Result<bool, SystemError> {
let mut progressed = false;
while self.submit_one_pending(QueueKind::Hiprio)? {
progressed = true;
}
for slot in 0..self.request_vqs.len() {
while self.submit_one_pending(QueueKind::Request(slot))? {
progressed = true;
}
}
Ok(progressed)
}
fn pop_one_used(&mut self, kind: QueueKind) -> Result<bool, SystemError> {
let token = match kind {
QueueKind::Hiprio => {
let queue = self.hiprio_vq.as_mut().ok_or(SystemError::EIO)?;
if !queue.can_pop() {
return Ok(false);
}
queue.peek_used().ok_or(SystemError::EIO)?
}
QueueKind::Request(slot) => {
let queue = self.request_vqs.get_mut(slot).ok_or(SystemError::EINVAL)?;
if !queue.can_pop() {
return Ok(false);
}
queue.peek_used().ok_or(SystemError::EIO)?
}
};
let mut inflight = self.take_inflight(kind, token)?;
let used_len_res = match kind {
QueueKind::Hiprio => {
let queue = self.hiprio_vq.as_mut().ok_or(SystemError::EIO)?;
let inputs = [inflight.pending.req.as_slice()];
if let Some(rsp_buf) = inflight.rsp.as_mut() {
let mut outputs = [rsp_buf.as_mut_slice()];
unsafe { queue.pop_used(token, &inputs, &mut outputs) }
.map_err(virtio_drivers_error_to_system_error)
} else {
let mut outputs: [&mut [u8]; 0] = [];
unsafe { queue.pop_used(token, &inputs, &mut outputs) }
.map_err(virtio_drivers_error_to_system_error)
}
}
QueueKind::Request(slot) => {
let queue = self.request_vqs.get_mut(slot).ok_or(SystemError::EINVAL)?;
let inputs = [inflight.pending.req.as_slice()];
if let Some(rsp_buf) = inflight.rsp.as_mut() {
let mut outputs = [rsp_buf.as_mut_slice()];
unsafe { queue.pop_used(token, &inputs, &mut outputs) }
.map_err(virtio_drivers_error_to_system_error)
} else {
let mut outputs: [&mut [u8]; 0] = [];
unsafe { queue.pop_used(token, &inputs, &mut outputs) }
.map_err(virtio_drivers_error_to_system_error)
}
}
};
let used_len = match used_len_res {
Ok(v) => v as usize,
Err(e) => {
let unique = inflight.pending.unique;
self.put_back_inflight(kind, token, inflight)?;
warn!(
"virtiofs bridge: pop_used failed unique={} token={} queue={:?} err={:?}",
unique, token, kind, e
);
return Err(e);
}
};
if inflight.pending.noreply {
return Ok(true);
}
let rsp_buf = inflight.rsp.as_ref().ok_or(SystemError::EIO)?;
if used_len > rsp_buf.len() {
self.complete_request_with_error(inflight.pending.unique, SystemError::EIO);
return Ok(true);
}
match self.conn.write_reply(&rsp_buf[..used_len]) {
Ok(_) | Err(SystemError::ENOENT) => {}
Err(e) => {
// Linux virtio-fs always ends a completed request from the used ring.
// Keep that behavior here: fail this unique instead of exiting bridge loop.
let unique = inflight.pending.unique;
let completion_err = if e == SystemError::EINVAL {
SystemError::EIO
} else {
e.clone()
};
warn!(
"virtiofs bridge: write_reply failed unique={} opcode={} err={:?}, complete with {:?}",
unique, inflight.pending.opcode, e, completion_err
);
self.complete_request_with_error(unique, completion_err);
}
}
Ok(true)
}
fn drain_completions(&mut self) -> Result<bool, SystemError> {
let mut progressed = false;
while self.pop_one_used(QueueKind::Hiprio)? {
progressed = true;
}
for slot in 0..self.request_vqs.len() {
while self.pop_one_used(QueueKind::Request(slot))? {
progressed = true;
}
}
Ok(progressed)
}
fn fail_all_unfinished(&mut self, err: SystemError) {
let conn = self.conn.clone();
let errno = err.to_posix_errno();
let mut need_reply = Vec::new();
while let Some(req) = self.hiprio_pending.pop_front() {
if !req.noreply {
need_reply.push(req.unique);
}
}
for pending_q in &mut self.request_pending {
while let Some(req) = pending_q.pop_front() {
if !req.noreply {
need_reply.push(req.unique);
}
}
}
for (_, req) in self.hiprio_inflight.iter() {
if !req.pending.noreply {
need_reply.push(req.pending.unique);
}
}
self.hiprio_inflight.clear();
for inflight_map in &mut self.request_inflight {
for (_, req) in inflight_map.iter() {
if !req.pending.noreply {
need_reply.push(req.pending.unique);
}
}
inflight_map.clear();
}
for unique in need_reply {
Self::complete_request_with_errno(&conn, unique, errno);
}
}
fn run_loop(&mut self) -> Result<(), SystemError> {
loop {
let mut progressed = false;
match self.pump_new_requests() {
Ok(v) => progressed |= v,
Err(SystemError::ENOTCONN) => {}
Err(e) => {
warn!("virtiofs bridge: read_request failed: {:?}", e);
if !self.conn.is_connected() {
break;
}
}
}
progressed |= self.submit_pending()?;
if let Some(transport) = self.transport.as_mut() {
let _ = transport.ack_interrupt();
}
progressed |= self.drain_completions()?;
if !self.conn.is_connected() && !self.has_inflight() {
break;
}
if !self.conn.is_mounted()
&& !self.conn.has_pending_requests()
&& !self.has_internal_pending()
&& !self.has_inflight()
{
break;
}
if !progressed {
Self::poll_pause();
}
}
Ok(())
}
fn finish(&mut self) {
self.fail_all_unfinished(SystemError::ENOTCONN);
if let Some(transport) = self.transport.as_mut() {
if self.hiprio_vq.take().is_some() {
transport.queue_unset(self.instance.hiprio_queue_index());
}
for slot in 0..self.request_vqs.len() {
if let Some(idx) = self.instance.request_queue_index_by_slot(slot) {
transport.queue_unset(idx);
}
}
self.request_vqs.clear();
transport.set_status(DeviceStatus::empty());
}
if let Some(transport) = self.transport.take() {
self.instance.put_transport_after_session(transport);
}
}
}
fn virtiofs_bridge_thread_entry(arg: usize) -> i32 {
let mut ctx = unsafe { Box::from_raw(arg as *mut VirtioFsBridgeContext) };
let result = ctx.run_loop();
if let Err(e) = &result {
warn!("virtiofs bridge thread exit with error: {:?}", e);
}
ctx.finish();
result.map(|_| 0).unwrap_or_else(|e| e.to_posix_errno())
}
fn start_bridge(instance: Arc<VirtioFsInstance>, conn: Arc<FuseConn>) -> Result<u64, SystemError> {
let (mut transport, session_id) = instance.take_transport_for_session()?;
if instance.request_queue_count() == 0 {
warn!(
"virtiofs bridge: no request queues: tag='{}' dev={:?}",
instance.tag(),
instance.dev_id(),
);
instance.put_transport_after_session(transport);
return Err(SystemError::EINVAL);
}
debug!(
"virtiofs bridge: start tag='{}' dev={:?} request_queues={}",
instance.tag(),
instance.dev_id(),
instance.num_request_queues()
);
transport.set_status(DeviceStatus::empty());
transport.set_status(DeviceStatus::ACKNOWLEDGE | DeviceStatus::DRIVER);
let _device_features = transport.read_device_features();
transport.write_driver_features(0);
transport
.set_status(DeviceStatus::ACKNOWLEDGE | DeviceStatus::DRIVER | DeviceStatus::FEATURES_OK);
let status = transport.get_status();
if !status.contains(DeviceStatus::FEATURES_OK) {
warn!(
"virtiofs bridge: device rejected features tag='{}' dev={:?} status={:?}",
instance.tag(),
instance.dev_id(),
status
);
transport.set_status(DeviceStatus::FAILED);
instance.put_transport_after_session(transport);
return Err(SystemError::ENODEV);
}
transport.set_guest_page_size(PAGE_SIZE as u32);
let hiprio_vq = match VirtQueue::<HalImpl, { VIRTIOFS_REQ_QUEUE_SIZE }>::new(
&mut transport,
instance.hiprio_queue_index(),
false,
false,
) {
Ok(vq) => vq,
Err(e) => {
let se = virtio_drivers_error_to_system_error(e);
transport.set_status(DeviceStatus::FAILED);
instance.put_transport_after_session(transport);
return Err(se);
}
};
let mut request_vqs = Vec::with_capacity(instance.request_queue_count());
for slot in 0..instance.request_queue_count() {
let idx = instance
.request_queue_index_by_slot(slot)
.ok_or(SystemError::EINVAL)?;
let vq = match VirtQueue::<HalImpl, { VIRTIOFS_REQ_QUEUE_SIZE }>::new(
&mut transport,
idx,
false,
false,
) {
Ok(vq) => vq,
Err(e) => {
let se = virtio_drivers_error_to_system_error(e);
transport.set_status(DeviceStatus::FAILED);
instance.put_transport_after_session(transport);
return Err(se);
}
};
request_vqs.push(vq);
}
transport.finish_init();
let ctx = Box::new(VirtioFsBridgeContext {
instance,
conn,
transport: Some(transport),
hiprio_vq: Some(hiprio_vq),
request_pending: core::iter::repeat_with(VecDeque::new)
.take(request_vqs.len())
.collect(),
request_inflight: core::iter::repeat_with(BTreeMap::new)
.take(request_vqs.len())
.collect(),
request_vqs,
hiprio_pending: VecDeque::new(),
hiprio_inflight: BTreeMap::new(),
next_request_slot: 0,
req_buf: vec![0u8; VIRTIOFS_REQ_BUF_SIZE],
});
let raw = Box::into_raw(ctx);
if KernelThreadMechanism::create_and_run(
KernelThreadClosure::StaticUsizeClosure((
&(virtiofs_bridge_thread_entry as fn(usize) -> i32),
raw as usize,
)),
String::from("virtiofs-bridge"),
)
.is_none()
{
let mut ctx = unsafe { Box::from_raw(raw) };
ctx.finish();
return Err(SystemError::ENOMEM);
}
Ok(session_id)
}
#[derive(Debug)]
struct VirtioFsMountData {
rootmode: u32,
user_id: u32,
group_id: u32,
allow_other: bool,
default_permissions: bool,
dax_mode: VirtioFsDaxMode,
conn: Arc<FuseConn>,
instance: Arc<VirtioFsInstance>,
}
impl FileSystemMakerData for VirtioFsMountData {
fn as_any(&self) -> &dyn core::any::Any {
self
}
}
#[derive(Debug)]
struct VirtioFsFs {
inner: Arc<dyn FileSystem>,
instance: Arc<VirtioFsInstance>,
session_id: u64,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum VirtioFsDaxMode {
Never,
Always,
Inode,
}
impl VirtioFsFs {
fn parse_opt_u32_decimal(v: &str) -> Result<u32, SystemError> {
v.parse::<u32>().map_err(|_| SystemError::EINVAL)
}
fn parse_opt_u32_octal(v: &str) -> Result<u32, SystemError> {
u32::from_str_radix(v, 8).map_err(|_| SystemError::EINVAL)
}
fn parse_opt_bool_switch(v: &str) -> bool {
v.is_empty() || v != "0"
}
fn parse_dax_mode(v: &str) -> Result<VirtioFsDaxMode, SystemError> {
if v.is_empty() {
return Ok(VirtioFsDaxMode::Always);
}
match v {
"always" => Ok(VirtioFsDaxMode::Always),
"never" => Ok(VirtioFsDaxMode::Never),
"inode" => Ok(VirtioFsDaxMode::Inode),
_ => Err(SystemError::EINVAL),
}
}
fn parse_mount_options(
raw: Option<&str>,
) -> Result<(u32, u32, u32, bool, bool, VirtioFsDaxMode), SystemError> {
let pcb = ProcessManager::current_pcb();
let cred = pcb.cred();
let mut rootmode: Option<u32> = None;
let mut user_id: Option<u32> = None;
let mut group_id: Option<u32> = None;
let mut default_permissions = true;
let mut allow_other = true;
let mut dax_mode = VirtioFsDaxMode::Never;
for part in raw.unwrap_or("").split(',') {
let part = part.trim();
if part.is_empty() {
continue;
}
let (k, v) = match part.split_once('=') {
Some((k, v)) => (k.trim(), v.trim()),
None => (part, ""),
};
match k {
"rootmode" => rootmode = Some(Self::parse_opt_u32_octal(v)?),
"user_id" => user_id = Some(Self::parse_opt_u32_decimal(v)?),
"group_id" => group_id = Some(Self::parse_opt_u32_decimal(v)?),
"default_permissions" => default_permissions = Self::parse_opt_bool_switch(v),
"allow_other" => allow_other = Self::parse_opt_bool_switch(v),
"dax" => dax_mode = Self::parse_dax_mode(v)?,
_ => return Err(SystemError::EINVAL),
}
}
if dax_mode != VirtioFsDaxMode::Never {
return Err(SystemError::EOPNOTSUPP_OR_ENOTSUP);
}
Ok((
rootmode.unwrap_or(0o040755),
user_id.unwrap_or(cred.fsuid.data() as u32),
group_id.unwrap_or(cred.fsgid.data() as u32),
default_permissions,
allow_other,
dax_mode,
))
}
}
impl FileSystem for VirtioFsFs {
fn root_inode(&self) -> Arc<dyn IndexNode> {
self.inner.root_inode()
}
fn info(&self) -> FsInfo {
self.inner.info()
}
fn support_readahead(&self) -> bool {
self.inner.support_readahead()
}
fn as_any_ref(&self) -> &dyn core::any::Any {
self
}
fn name(&self) -> &str {
"virtiofs"
}
fn super_block(&self) -> SuperBlock {
self.inner.super_block()
}
fn statfs(&self, inode: &Arc<dyn IndexNode>) -> Result<SuperBlock, SystemError> {
self.inner.statfs(inode)
}
fn permission_policy(&self) -> crate::filesystem::vfs::FsPermissionPolicy {
self.inner.permission_policy()
}
fn on_umount(&self) {
self.inner.on_umount();
self.instance.wait_session_released(self.session_id);
}
}
impl MountableFileSystem for VirtioFsFs {
fn make_mount_data(
raw_data: Option<&str>,
source: &str,
) -> Result<Option<Arc<dyn FileSystemMakerData + 'static>>, SystemError> {
if source.is_empty() {
return Err(SystemError::EINVAL);
}
let (rootmode, user_id, group_id, default_permissions, allow_other, dax_mode) =
Self::parse_mount_options(raw_data)?;
let instance = virtio_fs_find_instance(source).ok_or(SystemError::ENODEV)?;
let conn = FuseConn::new_for_virtiofs(core::cmp::min(
VIRTIOFS_REQ_BUF_SIZE,
VIRTIOFS_RSP_BUF_SIZE,
));
Ok(Some(Arc::new(VirtioFsMountData {
rootmode,
user_id,
group_id,
allow_other,
default_permissions,
dax_mode,
conn,
instance,
})))
}
fn make_fs(
data: Option<&dyn FileSystemMakerData>,
) -> Result<Arc<dyn FileSystem + 'static>, SystemError> {
let md = data
.and_then(|d| d.as_any().downcast_ref::<VirtioFsMountData>())
.ok_or(SystemError::EINVAL)?;
let fuse_mount_data = FuseMountData {
rootmode: md.rootmode,
user_id: md.user_id,
group_id: md.group_id,
allow_other: md.allow_other,
default_permissions: md.default_permissions,
conn: md.conn.clone(),
};
if md.dax_mode != VirtioFsDaxMode::Never {
return Err(SystemError::EOPNOTSUPP_OR_ENOTSUP);
}
let inner = <FuseFS as MountableFileSystem>::make_fs(Some(
&fuse_mount_data as &dyn FileSystemMakerData,
))?;
let session_id = match start_bridge(md.instance.clone(), md.conn.clone()) {
Ok(id) => id,
Err(e) => {
inner.on_umount();
return Err(e);
}
};
Ok(Arc::new(VirtioFsFs {
inner,
instance: md.instance.clone(),
session_id,
}))
}
}
register_mountable_fs!(VirtioFsFs, VIRTIOFSMAKER, "virtiofs");

View File

@ -10,6 +10,10 @@
#
# - AUTO_TEST: 自动测试选项
# - SYSCALL_TEST_DIR: 系统调用测试目录
# - DRAGONOS_VIRTIOFS_ENABLE: 是否启用 virtiofs1启用默认0
# - DRAGONOS_VIRTIOFS_SOCKET: virtiofsd socket路径
# - DRAGONOS_VIRTIOFS_TAG: virtiofs 挂载tag
# - DRAGONOS_VIRTIOFS_ENV_FILE: virtiofs配置文件路径默认 ${ROOT_PATH}/tools/virtiofs/env.sh
#
check_dependencies()
@ -99,6 +103,10 @@ QEMU_DISK_IMAGE="../bin/${DISK_NAME}"
QEMU_EXT4_DISK_IMAGE="../bin/${EXT4_DISK_NAME}"
QEMU_FAT_DISK_IMAGE="../bin/${FAT_DISK_NAME}"
QEMU_MEMORY="2G"
DRAGONOS_VIRTIOFS_ENABLE=${DRAGONOS_VIRTIOFS_ENABLE:=0}
DRAGONOS_VIRTIOFS_SOCKET=${DRAGONOS_VIRTIOFS_SOCKET:=/tmp/dragonos-virtiofsd.sock}
DRAGONOS_VIRTIOFS_TAG=${DRAGONOS_VIRTIOFS_TAG:=hostshare}
DRAGONOS_VIRTIOFS_ENV_FILE=${DRAGONOS_VIRTIOFS_ENV_FILE:=}
# 检查必要的环境变量
if [ -z "${ROOT_PATH}" ]; then
@ -142,6 +150,9 @@ QEMU_DRIVE_ARGS=(-drive "id=disk,file=${QEMU_DISK_IMAGE},if=none,format=raw")
QEMU_ACCEL_ARGS=()
QEMU_DEVICE_ARGS=()
QEMU_DISPLAY_ARGS=()
QEMU_OBJECT_ARGS=()
QEMU_NUMA_ARGS=()
QEMU_CHARDEV_ARGS=()
QEMU_ARGS=()
# QEMU_ARGUMENT+=" -S "
@ -294,6 +305,43 @@ setup_kernel_init_program
# 从环境变量设置内核命令行参数
setup_kernel_cmdline_from_env
if [ "${DRAGONOS_VIRTIOFS_ENABLE}" == "1" ]; then
if [ "${ARCH}" != "x86_64" ]; then
echo "[错误] virtiofs临时运行支持当前仅实现x86_64"
exit 1
fi
if [ -z "${DRAGONOS_VIRTIOFS_ENV_FILE}" ]; then
DRAGONOS_VIRTIOFS_ENV_FILE="${ROOT_PATH}/tools/virtiofs/env.sh"
fi
if [ -f "${DRAGONOS_VIRTIOFS_ENV_FILE}" ]; then
# shellcheck source=/dev/null
. "${DRAGONOS_VIRTIOFS_ENV_FILE}"
if [ "${DRAGONOS_VIRTIOFS_SOCKET}" = "/tmp/dragonos-virtiofsd.sock" ] && [ -n "${SOCKET_PATH:-}" ]; then
DRAGONOS_VIRTIOFS_SOCKET="${SOCKET_PATH}"
fi
if [ "${DRAGONOS_VIRTIOFS_TAG}" = "hostshare" ] && [ -n "${VIRTIOFS_TAG:-}" ]; then
DRAGONOS_VIRTIOFS_TAG="${VIRTIOFS_TAG}"
fi
fi
if [ ! -S "${DRAGONOS_VIRTIOFS_SOCKET}" ]; then
echo "[错误] 未检测到virtiofsd socket: ${DRAGONOS_VIRTIOFS_SOCKET}"
echo "[提示] 请先在另一个终端启动: tools/virtiofs/start_virtiofsd.sh"
exit 1
fi
echo "[INFO] 启用virtiofs: tag=${DRAGONOS_VIRTIOFS_TAG}, socket=${DRAGONOS_VIRTIOFS_SOCKET}"
QEMU_OBJECT_ARGS+=(
-object "memory-backend-memfd,id=mem,size=${QEMU_MEMORY},share=on"
)
QEMU_NUMA_ARGS+=(-numa "node,memdev=mem")
QEMU_CHARDEV_ARGS+=(-chardev "socket,id=char_virtiofs,path=${DRAGONOS_VIRTIOFS_SOCKET}")
QEMU_DEVICE_ARGS+=(-device "vhost-user-fs-pci,chardev=char_virtiofs,tag=${DRAGONOS_VIRTIOFS_TAG}")
fi
if [ ${QEMU_NOGRAPHIC} == true ]; then
QEMU_SERIAL_ARGS=(-serial chardev:mux -monitor chardev:mux -chardev "stdio,id=mux,mux=on,signal=off,logfile=${QEMU_SERIAL_LOG_FILE}")
@ -349,6 +397,9 @@ QEMU_ARGS+=(
"${QEMU_MACHINE_ARGS[@]}"
"${QEMU_CPU_ARGS[@]}"
"${QEMU_RTC_ARGS[@]}"
"${QEMU_OBJECT_ARGS[@]}"
"${QEMU_NUMA_ARGS[@]}"
"${QEMU_CHARDEV_ARGS[@]}"
"${QEMU_SERIAL_ARGS[@]}"
"${QEMU_DRIVE_ARGS[@]}"
"${QEMU_DEVICE_ARGS[@]}"

1
tools/virtiofs/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
env.sh

View File

@ -0,0 +1,19 @@
#!/usr/bin/env bash
#
# 复制为 env.sh 后按需修改:
# cp env.sh.example env.sh
# Host 上要共享给 guest 的目录
export HOST_SHARE_DIR="${HOME}/dragonos-virtiofs-share"
# Guest 内 mount -t virtiofs 使用的 tag
export VIRTIOFS_TAG="hostshare"
# virtiofsd 运行时目录与 socket
export RUNTIME_DIR="${HOME}/.dragonos-virtiofs"
export SOCKET_PATH="${RUNTIME_DIR}/virtiofsd.sock"
# 可选:手动指定 virtiofsd 可执行文件,留空则自动探测
export VIRTIOFSD_BIN=""
export VIRTIOFSD_CACHE="auto"
export VIRTIOFSD_EXTRA_ARGS=""

103
tools/virtiofs/start_virtiofsd.sh Executable file
View File

@ -0,0 +1,103 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)"
SCRIPT_PATH="${SCRIPT_DIR}/$(basename -- "${BASH_SOURCE[0]}")"
ENV_FILE="${SCRIPT_DIR}/env.sh"
if [[ "${EUID}" -ne 0 ]]; then
echo "virtiofsd 需要以 sudo 权限启动,正在尝试提权..."
exec sudo HOME="${HOME}" bash "${SCRIPT_PATH}" "$@"
fi
if [[ ! -f "${ENV_FILE}" ]]; then
echo "未找到 ${ENV_FILE}"
echo "请先执行cp \"${SCRIPT_DIR}/env.sh.example\" \"${ENV_FILE}\" 并按需修改"
exit 1
fi
# shellcheck source=/dev/null
source "${ENV_FILE}"
detect_virtiofsd_bin() {
if [[ -n "${VIRTIOFSD_BIN:-}" ]]; then
echo "${VIRTIOFSD_BIN}"
return 0
fi
if command -v virtiofsd >/dev/null 2>&1; then
command -v virtiofsd
return 0
fi
local candidates=(
"/usr/libexec/virtiofsd"
"/usr/lib/qemu/virtiofsd"
)
local p
for p in "${candidates[@]}"; do
if [[ -x "${p}" ]]; then
echo "${p}"
return 0
fi
done
return 1
}
VIRTIOFSD_PATH="$(detect_virtiofsd_bin || true)"
if [[ -z "${VIRTIOFSD_PATH}" ]]; then
echo "找不到 virtiofsd请安装 qemu/virtiofsd 或在 env.sh 中设置 VIRTIOFSD_BIN"
exit 1
fi
mkdir -p "${HOST_SHARE_DIR}"
mkdir -p "${RUNTIME_DIR}"
rm -f "${SOCKET_PATH}"
echo "启动 virtiofsd:"
echo " binary : ${VIRTIOFSD_PATH}"
echo " shared : ${HOST_SHARE_DIR}"
echo " socket : ${SOCKET_PATH}"
echo " cache : ${VIRTIOFSD_CACHE:-auto}"
echo
echo "保持此终端运行,不要关闭。"
build_virtiofsd_args() {
local help_text
help_text="$("${VIRTIOFSD_PATH}" --help 2>&1 || true)"
VIRTIOFSD_ARGS=("--socket-path=${SOCKET_PATH}")
if grep -q -- "--shared-dir" <<<"${help_text}"; then
VIRTIOFSD_ARGS+=("--shared-dir=${HOST_SHARE_DIR}")
if grep -q -- "--cache" <<<"${help_text}"; then
VIRTIOFSD_ARGS+=("--cache=${VIRTIOFSD_CACHE:-auto}")
else
VIRTIOFSD_ARGS+=("-o" "cache=${VIRTIOFSD_CACHE:-auto}")
fi
return 0
fi
if grep -q "source=PATH" <<<"${help_text}"; then
VIRTIOFSD_ARGS+=(
"-o" "source=${HOST_SHARE_DIR}"
"-o" "cache=${VIRTIOFSD_CACHE:-auto}"
)
return 0
fi
VIRTIOFSD_ARGS+=(
"-o" "cache=${VIRTIOFSD_CACHE:-auto}"
"${HOST_SHARE_DIR}"
)
}
build_virtiofsd_args
if [[ -n "${VIRTIOFSD_EXTRA_ARGS:-}" ]]; then
# shellcheck disable=SC2086
exec "${VIRTIOFSD_PATH}" "${VIRTIOFSD_ARGS[@]}" ${VIRTIOFSD_EXTRA_ARGS}
else
exec "${VIRTIOFSD_PATH}" "${VIRTIOFSD_ARGS[@]}"
fi