|
| 1 | +/* |
| 2 | +Copyright 2026 The Hyperlight Authors. |
| 3 | +
|
| 4 | +Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | +you may not use this file except in compliance with the License. |
| 6 | +You may obtain a copy of the License at |
| 7 | +
|
| 8 | + http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | +
|
| 10 | +Unless required by applicable law or agreed to in writing, software |
| 11 | +distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | +See the License for the specific language governing permissions and |
| 14 | +limitations under the License. |
| 15 | +*/ |
| 16 | + |
| 17 | +//! Host-side [`MemOps`] implementation for virtqueue access. |
| 18 | +//! |
| 19 | +//! Translates guest virtual addresses used in virtqueue descriptors |
| 20 | +//! to offsets into the scratch [`HostSharedMemory`], reusing its |
| 21 | +//! volatile access and locking patterns. |
| 22 | +
|
| 23 | +use core::sync::atomic::{AtomicU16, Ordering}; |
| 24 | + |
| 25 | +use hyperlight_common::virtq::MemOps; |
| 26 | + |
| 27 | +use super::shared_mem::{HostSharedMemory, SharedMemory}; |
| 28 | + |
| 29 | +/// Error type for host memory operations. |
| 30 | +#[derive(Debug, thiserror::Error)] |
| 31 | +pub enum HostMemError { |
| 32 | + #[error("address {addr:#x} out of bounds scratch_size={scratch_size}")] |
| 33 | + OutOfBounds { addr: u64, scratch_size: usize }, |
| 34 | + #[error("shared memory error: {0}")] |
| 35 | + SharedMem(String), |
| 36 | + #[error("as_slice/as_mut_slice not supported on host")] |
| 37 | + DirectSliceNotSupported, |
| 38 | +} |
| 39 | + |
| 40 | +/// Host-side memory accessor for virtqueue operations. |
| 41 | +/// |
| 42 | +/// Owns a clone of the scratch [`HostSharedMemory`] and translates |
| 43 | +/// guest virtual addresses (in the scratch region) to offsets for the |
| 44 | +/// existing volatile read/write methods. |
| 45 | +#[derive(Clone)] |
| 46 | +pub(crate) struct HostMemOps { |
| 47 | + /// Cloned handle to the scratch shared memory |
| 48 | + scratch: HostSharedMemory, |
| 49 | + /// The guest virtual address that corresponds to scratch offset 0. |
| 50 | + scratch_base_gva: u64, |
| 51 | +} |
| 52 | + |
| 53 | +impl HostMemOps { |
| 54 | + /// Create a new `HostMemOps` backed by shared memory. |
| 55 | + pub(crate) fn new(scratch: &HostSharedMemory, scratch_base_gva: u64) -> Self { |
| 56 | + Self { |
| 57 | + scratch: scratch.clone(), |
| 58 | + scratch_base_gva, |
| 59 | + } |
| 60 | + } |
| 61 | + |
| 62 | + /// Translate a guest virtual address to a scratch offset. |
| 63 | + fn to_offset(&self, addr: u64) -> Result<usize, HostMemError> { |
| 64 | + addr.checked_sub(self.scratch_base_gva) |
| 65 | + .map(|o| o as usize) |
| 66 | + .ok_or(HostMemError::OutOfBounds { |
| 67 | + addr, |
| 68 | + scratch_size: self.scratch.mem_size(), |
| 69 | + }) |
| 70 | + } |
| 71 | + |
| 72 | + /// Get a raw pointer into scratch memory at the given guest address. |
| 73 | + fn raw_ptr(&self, addr: u64, len: usize) -> Result<*mut u8, HostMemError> { |
| 74 | + let offset = self.to_offset(addr)?; |
| 75 | + let scratch_size = self.scratch.mem_size(); |
| 76 | + |
| 77 | + if offset.checked_add(len).is_none_or(|end| end > scratch_size) { |
| 78 | + return Err(HostMemError::OutOfBounds { addr, scratch_size }); |
| 79 | + } |
| 80 | + |
| 81 | + Ok(self.scratch.base_ptr().wrapping_add(offset)) |
| 82 | + } |
| 83 | +} |
| 84 | + |
| 85 | +impl MemOps for HostMemOps { |
| 86 | + type Error = HostMemError; |
| 87 | + |
| 88 | + fn read(&self, addr: u64, dst: &mut [u8]) -> Result<usize, Self::Error> { |
| 89 | + let offset = self.to_offset(addr)?; |
| 90 | + self.scratch |
| 91 | + .copy_to_slice(dst, offset) |
| 92 | + .map_err(|e| HostMemError::SharedMem(e.to_string()))?; |
| 93 | + Ok(dst.len()) |
| 94 | + } |
| 95 | + |
| 96 | + fn write(&self, addr: u64, src: &[u8]) -> Result<usize, Self::Error> { |
| 97 | + let offset = self.to_offset(addr)?; |
| 98 | + self.scratch |
| 99 | + .copy_from_slice(src, offset) |
| 100 | + .map_err(|e| HostMemError::SharedMem(e.to_string()))?; |
| 101 | + Ok(src.len()) |
| 102 | + } |
| 103 | + |
| 104 | + fn load_acquire(&self, addr: u64) -> Result<u16, Self::Error> { |
| 105 | + let ptr = self.raw_ptr(addr, core::mem::size_of::<u16>())?; |
| 106 | + let atomic = unsafe { &*(ptr as *const AtomicU16) }; |
| 107 | + Ok(atomic.load(Ordering::Acquire)) |
| 108 | + } |
| 109 | + |
| 110 | + fn store_release(&self, addr: u64, val: u16) -> Result<(), Self::Error> { |
| 111 | + let ptr = self.raw_ptr(addr, core::mem::size_of::<u16>())?; |
| 112 | + let atomic = unsafe { &*(ptr as *const AtomicU16) }; |
| 113 | + atomic.store(val, Ordering::Release); |
| 114 | + Ok(()) |
| 115 | + } |
| 116 | + |
| 117 | + unsafe fn as_slice(&self, _addr: u64, _len: usize) -> Result<&[u8], Self::Error> { |
| 118 | + Err(HostMemError::DirectSliceNotSupported) |
| 119 | + } |
| 120 | + |
| 121 | + unsafe fn as_mut_slice(&self, _addr: u64, _len: usize) -> Result<&mut [u8], Self::Error> { |
| 122 | + Err(HostMemError::DirectSliceNotSupported) |
| 123 | + } |
| 124 | +} |
0 commit comments