|
50 | 50 | VFIO_LOG_AND_EXIT(_fmt, ##__VA_ARGS__); \ |
51 | 51 | } while (0) |
52 | 52 |
|
| 53 | +#define ioctl_assert(_fd, _op, _arg) do { \ |
| 54 | + void *__arg = (_arg); \ |
| 55 | + int __ret = ioctl((_fd), (_op), (__arg)); \ |
| 56 | + VFIO_ASSERT_EQ(__ret, 0, "ioctl(%s, %s, %s) returned %d\n", #_fd, #_op, #_arg, __ret); \ |
| 57 | +} while (0) |
| 58 | + |
53 | 59 | #define dev_info(_dev, _fmt, ...) printf("%s: " _fmt, (_dev)->bdf, ##__VA_ARGS__) |
54 | 60 | #define dev_err(_dev, _fmt, ...) fprintf(stderr, "%s: " _fmt, (_dev)->bdf, ##__VA_ARGS__) |
55 | 61 |
|
@@ -223,37 +229,78 @@ extern const char *default_iommu_mode; |
223 | 229 | struct iommu *iommu_init(const char *iommu_mode); |
224 | 230 | void iommu_cleanup(struct iommu *iommu); |
225 | 231 |
|
| 232 | +int __iommu_map(struct iommu *iommu, struct dma_region *region); |
| 233 | + |
| 234 | +static inline void iommu_map(struct iommu *iommu, struct dma_region *region) |
| 235 | +{ |
| 236 | + VFIO_ASSERT_EQ(__iommu_map(iommu, region), 0); |
| 237 | +} |
| 238 | + |
| 239 | +int __iommu_unmap(struct iommu *iommu, struct dma_region *region, u64 *unmapped); |
| 240 | + |
| 241 | +static inline void iommu_unmap(struct iommu *iommu, struct dma_region *region) |
| 242 | +{ |
| 243 | + VFIO_ASSERT_EQ(__iommu_unmap(iommu, region, NULL), 0); |
| 244 | +} |
| 245 | + |
| 246 | +int __iommu_unmap_all(struct iommu *iommu, u64 *unmapped); |
| 247 | + |
| 248 | +static inline void iommu_unmap_all(struct iommu *iommu) |
| 249 | +{ |
| 250 | + VFIO_ASSERT_EQ(__iommu_unmap_all(iommu, NULL), 0); |
| 251 | +} |
| 252 | + |
| 253 | +iova_t __iommu_hva2iova(struct iommu *iommu, void *vaddr); |
| 254 | +iova_t iommu_hva2iova(struct iommu *iommu, void *vaddr); |
| 255 | + |
| 256 | +struct iommu_iova_range *iommu_iova_ranges(struct iommu *iommu, u32 *nranges); |
| 257 | + |
226 | 258 | struct vfio_pci_device *vfio_pci_device_init(const char *bdf, struct iommu *iommu); |
227 | 259 | void vfio_pci_device_cleanup(struct vfio_pci_device *device); |
228 | 260 |
|
229 | 261 | void vfio_pci_device_reset(struct vfio_pci_device *device); |
230 | 262 |
|
231 | | -struct iommu_iova_range *vfio_pci_iova_ranges(struct vfio_pci_device *device, |
232 | | - u32 *nranges); |
| 263 | +static inline struct iommu_iova_range *vfio_pci_iova_ranges(struct vfio_pci_device *device, |
| 264 | + u32 *nranges) |
| 265 | +{ |
| 266 | + return iommu_iova_ranges(device->iommu, nranges); |
| 267 | +} |
233 | 268 |
|
234 | 269 | struct iova_allocator *iova_allocator_init(struct vfio_pci_device *device); |
235 | 270 | void iova_allocator_cleanup(struct iova_allocator *allocator); |
236 | 271 | iova_t iova_allocator_alloc(struct iova_allocator *allocator, size_t size); |
237 | 272 |
|
238 | | -int __vfio_pci_dma_map(struct vfio_pci_device *device, |
239 | | - struct dma_region *region); |
240 | | -int __vfio_pci_dma_unmap(struct vfio_pci_device *device, |
241 | | - struct dma_region *region, |
242 | | - u64 *unmapped); |
243 | | -int __vfio_pci_dma_unmap_all(struct vfio_pci_device *device, u64 *unmapped); |
| 273 | +static inline int __vfio_pci_dma_map(struct vfio_pci_device *device, |
| 274 | + struct dma_region *region) |
| 275 | +{ |
| 276 | + return __iommu_map(device->iommu, region); |
| 277 | +} |
244 | 278 |
|
245 | 279 | static inline void vfio_pci_dma_map(struct vfio_pci_device *device, |
246 | 280 | struct dma_region *region) |
247 | 281 | { |
248 | 282 | VFIO_ASSERT_EQ(__vfio_pci_dma_map(device, region), 0); |
249 | 283 | } |
250 | 284 |
|
| 285 | +static inline int __vfio_pci_dma_unmap(struct vfio_pci_device *device, |
| 286 | + struct dma_region *region, |
| 287 | + u64 *unmapped) |
| 288 | +{ |
| 289 | + return __iommu_unmap(device->iommu, region, unmapped); |
| 290 | +} |
| 291 | + |
251 | 292 | static inline void vfio_pci_dma_unmap(struct vfio_pci_device *device, |
252 | 293 | struct dma_region *region) |
253 | 294 | { |
254 | 295 | VFIO_ASSERT_EQ(__vfio_pci_dma_unmap(device, region, NULL), 0); |
255 | 296 | } |
256 | 297 |
|
| 298 | +static inline int __vfio_pci_dma_unmap_all(struct vfio_pci_device *device, |
| 299 | + u64 *unmapped) |
| 300 | +{ |
| 301 | + return __iommu_unmap_all(device->iommu, unmapped); |
| 302 | +} |
| 303 | + |
257 | 304 | static inline void vfio_pci_dma_unmap_all(struct vfio_pci_device *device) |
258 | 305 | { |
259 | 306 | VFIO_ASSERT_EQ(__vfio_pci_dma_unmap_all(device, NULL), 0); |
@@ -319,8 +366,15 @@ static inline void vfio_pci_msix_disable(struct vfio_pci_device *device) |
319 | 366 | vfio_pci_irq_disable(device, VFIO_PCI_MSIX_IRQ_INDEX); |
320 | 367 | } |
321 | 368 |
|
322 | | -iova_t __to_iova(struct vfio_pci_device *device, void *vaddr); |
323 | | -iova_t to_iova(struct vfio_pci_device *device, void *vaddr); |
| 369 | +static inline iova_t __to_iova(struct vfio_pci_device *device, void *vaddr) |
| 370 | +{ |
| 371 | + return __iommu_hva2iova(device->iommu, vaddr); |
| 372 | +} |
| 373 | + |
| 374 | +static inline iova_t to_iova(struct vfio_pci_device *device, void *vaddr) |
| 375 | +{ |
| 376 | + return iommu_hva2iova(device->iommu, vaddr); |
| 377 | +} |
324 | 378 |
|
325 | 379 | static inline bool vfio_pci_device_match(struct vfio_pci_device *device, |
326 | 380 | u16 vendor_id, u16 device_id) |
|
0 commit comments