Skip to content

Commit 8a76042

Browse files
committed
feat(virtq): add recycle pool tests
Signed-off-by: Tomasz Andrzejak <andreiltd@gmail.com>
1 parent 0905a42 commit 8a76042

2 files changed

Lines changed: 151 additions & 7 deletions

File tree

src/hyperlight_common/src/virtq/buffer.rs

Lines changed: 10 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,7 @@ impl<T: BufferProvider> BufferProvider for Arc<T> {
103103
/// zero-copy `Bytes` backed by shared memory.
104104
///
105105
/// When dropped, the allocation is returned to the pool.
106-
#[derive(Debug, Clone)]
106+
#[derive(Debug)]
107107
pub struct BufferOwner<P: BufferProvider, M: MemOps> {
108108
pub(crate) pool: P,
109109
pub(crate) mem: M,
@@ -140,9 +140,10 @@ impl<F: FnOnce(Allocation)> AllocGuard<F> {
140140

141141
pub fn release(mut self) -> Allocation {
142142
// Safety: AllocGuard is always constructed with Some, and release is only called once
143-
self.0.take().map(|(alloc, _)| alloc).unwrap_or_else(|| {
144-
unreachable!("AllocGuard::release called on dismissed guard")
145-
})
143+
self.0
144+
.take()
145+
.map(|(alloc, _)| alloc)
146+
.unwrap_or_else(|| unreachable!("AllocGuard::release called on dismissed guard"))
146147
}
147148
}
148149

@@ -152,9 +153,11 @@ impl<F: FnOnce(Allocation)> core::ops::Deref for AllocGuard<F> {
152153
fn deref(&self) -> &Allocation {
153154
// Safety: AllocGuard is always constructed with Some, and the inner value is only
154155
// taken by release() or Drop.
155-
&self.0.as_ref().unwrap_or_else(|| {
156-
unreachable!("AllocGuard::deref called on dismissed guard")
157-
}).0
156+
&self
157+
.0
158+
.as_ref()
159+
.unwrap_or_else(|| unreachable!("AllocGuard::deref called on dismissed guard"))
160+
.0
158161
}
159162
}
160163

src/hyperlight_common/src/virtq/pool.rs

Lines changed: 141 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -680,6 +680,20 @@ impl BufferProvider for RecyclePool {
680680

681681
fn dealloc(&self, alloc: Allocation) -> Result<(), AllocError> {
682682
let mut inner = self.inner.borrow_mut();
683+
let end = inner.base_addr + (inner.count * inner.slot_size) as u64;
684+
685+
if alloc.addr < inner.base_addr || alloc.addr >= end {
686+
return Err(AllocError::InvalidFree(alloc.addr, alloc.len));
687+
}
688+
689+
if (alloc.addr - inner.base_addr) % inner.slot_size as u64 != 0 {
690+
return Err(AllocError::InvalidFree(alloc.addr, alloc.len));
691+
}
692+
693+
if inner.free.contains(&alloc.addr) {
694+
return Err(AllocError::InvalidFree(alloc.addr, alloc.len));
695+
}
696+
683697
inner.free.push(alloc.addr);
684698
Ok(())
685699
}
@@ -1389,6 +1403,133 @@ mod tests {
13891403
pool.restore_allocated(&[0x80000]).unwrap();
13901404
assert_eq!(pool.num_free(), 3);
13911405
}
1406+
1407+
#[test]
1408+
fn test_recycle_pool_dealloc_out_of_range() {
1409+
let pool = make_recycle_pool(4, 4096);
1410+
let _ = pool.alloc(4096).unwrap();
1411+
1412+
let bogus = Allocation {
1413+
addr: 0xDEAD,
1414+
len: 4096,
1415+
};
1416+
assert!(matches!(
1417+
pool.dealloc(bogus),
1418+
Err(AllocError::InvalidFree(0xDEAD, 4096))
1419+
));
1420+
}
1421+
1422+
#[test]
1423+
fn test_recycle_pool_dealloc_misaligned() {
1424+
let pool = make_recycle_pool(4, 4096);
1425+
let _ = pool.alloc(4096).unwrap();
1426+
1427+
let misaligned = Allocation {
1428+
addr: 0x80001,
1429+
len: 4096,
1430+
};
1431+
assert!(matches!(
1432+
pool.dealloc(misaligned),
1433+
Err(AllocError::InvalidFree(0x80001, 4096))
1434+
));
1435+
}
1436+
1437+
#[test]
1438+
fn test_recycle_pool_dealloc_double_free() {
1439+
let pool = make_recycle_pool(4, 4096);
1440+
let a = pool.alloc(4096).unwrap();
1441+
pool.dealloc(a).unwrap();
1442+
1443+
// Second dealloc should fail - address is already in the free list
1444+
assert!(matches!(
1445+
pool.dealloc(a),
1446+
Err(AllocError::InvalidFree(_, _))
1447+
));
1448+
}
1449+
1450+
#[test]
1451+
fn test_recycle_pool_random_order_dealloc() {
1452+
let pool = make_recycle_pool(8, 4096);
1453+
1454+
let mut allocs: Vec<Allocation> = (0..8).map(|_| pool.alloc(4096).unwrap()).collect();
1455+
assert_eq!(pool.num_free(), 0);
1456+
1457+
// Dealloc in reverse order
1458+
allocs.reverse();
1459+
for a in &allocs {
1460+
pool.dealloc(*a).unwrap();
1461+
}
1462+
assert_eq!(pool.num_free(), 8);
1463+
1464+
// All slots should be re-allocatable
1465+
let reallocs: Vec<Allocation> = (0..8).map(|_| pool.alloc(4096).unwrap()).collect();
1466+
assert_eq!(pool.num_free(), 0);
1467+
1468+
// Verify all addresses are distinct
1469+
let mut addrs: Vec<u64> = reallocs.iter().map(|a| a.addr).collect();
1470+
addrs.sort();
1471+
addrs.dedup();
1472+
assert_eq!(addrs.len(), 8);
1473+
}
1474+
1475+
#[test]
1476+
fn test_recycle_pool_interleaved_alloc_dealloc_order() {
1477+
let pool = make_recycle_pool(4, 4096);
1478+
1479+
let a0 = pool.alloc(4096).unwrap();
1480+
let a1 = pool.alloc(4096).unwrap();
1481+
let a2 = pool.alloc(4096).unwrap();
1482+
let a3 = pool.alloc(4096).unwrap();
1483+
assert_eq!(pool.num_free(), 0);
1484+
1485+
// Free middle slots first (out of allocation order)
1486+
pool.dealloc(a2).unwrap();
1487+
pool.dealloc(a0).unwrap();
1488+
assert_eq!(pool.num_free(), 2);
1489+
1490+
// Re-alloc gets the out-of-order slots back (LIFO)
1491+
let b0 = pool.alloc(4096).unwrap();
1492+
assert_eq!(b0.addr, a0.addr);
1493+
let b1 = pool.alloc(4096).unwrap();
1494+
assert_eq!(b1.addr, a2.addr);
1495+
1496+
// Free everything in yet another order
1497+
pool.dealloc(a1).unwrap();
1498+
pool.dealloc(b0).unwrap();
1499+
pool.dealloc(b1).unwrap();
1500+
pool.dealloc(a3).unwrap();
1501+
assert_eq!(pool.num_free(), 4);
1502+
1503+
// All 4 original addresses should be available
1504+
let mut final_addrs: Vec<u64> = (0..4).map(|_| pool.alloc(4096).unwrap().addr).collect();
1505+
final_addrs.sort();
1506+
let expected: Vec<u64> = (0..4).map(|i| 0x80000 + i * 4096).collect();
1507+
assert_eq!(final_addrs, expected);
1508+
}
1509+
1510+
#[test]
1511+
fn test_recycle_pool_dealloc_order_independent_of_alloc_order() {
1512+
let pool = make_recycle_pool(6, 256);
1513+
1514+
// Allocate all
1515+
let allocs: Vec<Allocation> = (0..6).map(|_| pool.alloc(256).unwrap()).collect();
1516+
1517+
// Dealloc in scattered order: 4, 1, 5, 0, 3, 2
1518+
let order = [4, 1, 5, 0, 3, 2];
1519+
for &i in &order {
1520+
pool.dealloc(allocs[i]).unwrap();
1521+
}
1522+
assert_eq!(pool.num_free(), 6);
1523+
1524+
// Re-allocate all and verify we get back the full set
1525+
let mut realloc_addrs: Vec<u64> = (0..6).map(|_| pool.alloc(256).unwrap().addr).collect();
1526+
realloc_addrs.sort();
1527+
1528+
let mut orig_addrs: Vec<u64> = allocs.iter().map(|a| a.addr).collect();
1529+
orig_addrs.sort();
1530+
1531+
assert_eq!(realloc_addrs, orig_addrs);
1532+
}
13921533
}
13931534

13941535
#[cfg(test)]

0 commit comments

Comments
 (0)