Skip to content

Commit f6f3721

Browse files
LuBaolujgunthorpe
authored andcommitted
iommu/vt-d: Add iotlb flush for nested domain
This implements the .cache_invalidate_user() callback to support iotlb flush for nested domain. Link: https://lore.kernel.org/r/20240111041015.47920-9-yi.l.liu@intel.com Reviewed-by: Kevin Tian <kevin.tian@intel.com> Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Co-developed-by: Yi Liu <yi.l.liu@intel.com> Signed-off-by: Yi Liu <yi.l.liu@intel.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
1 parent 393a577 commit f6f3721

1 file changed

Lines changed: 88 additions & 0 deletions

File tree

drivers/iommu/intel/nested.c

Lines changed: 88 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -73,9 +73,97 @@ static void intel_nested_domain_free(struct iommu_domain *domain)
7373
kfree(to_dmar_domain(domain));
7474
}
7575

76+
static void nested_flush_dev_iotlb(struct dmar_domain *domain, u64 addr,
77+
unsigned int mask)
78+
{
79+
struct device_domain_info *info;
80+
unsigned long flags;
81+
u16 sid, qdep;
82+
83+
spin_lock_irqsave(&domain->lock, flags);
84+
list_for_each_entry(info, &domain->devices, link) {
85+
if (!info->ats_enabled)
86+
continue;
87+
sid = info->bus << 8 | info->devfn;
88+
qdep = info->ats_qdep;
89+
qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
90+
qdep, addr, mask);
91+
quirk_extra_dev_tlb_flush(info, addr, mask,
92+
IOMMU_NO_PASID, qdep);
93+
}
94+
spin_unlock_irqrestore(&domain->lock, flags);
95+
}
96+
97+
static void intel_nested_flush_cache(struct dmar_domain *domain, u64 addr,
98+
unsigned long npages, bool ih)
99+
{
100+
struct iommu_domain_info *info;
101+
unsigned int mask;
102+
unsigned long i;
103+
104+
xa_for_each(&domain->iommu_array, i, info)
105+
qi_flush_piotlb(info->iommu,
106+
domain_id_iommu(domain, info->iommu),
107+
IOMMU_NO_PASID, addr, npages, ih);
108+
109+
if (!domain->has_iotlb_device)
110+
return;
111+
112+
if (npages == U64_MAX)
113+
mask = 64 - VTD_PAGE_SHIFT;
114+
else
115+
mask = ilog2(__roundup_pow_of_two(npages));
116+
117+
nested_flush_dev_iotlb(domain, addr, mask);
118+
}
119+
120+
static int intel_nested_cache_invalidate_user(struct iommu_domain *domain,
121+
struct iommu_user_data_array *array)
122+
{
123+
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
124+
struct iommu_hwpt_vtd_s1_invalidate inv_entry;
125+
u32 index, processed = 0;
126+
int ret = 0;
127+
128+
if (array->type != IOMMU_HWPT_INVALIDATE_DATA_VTD_S1) {
129+
ret = -EINVAL;
130+
goto out;
131+
}
132+
133+
for (index = 0; index < array->entry_num; index++) {
134+
ret = iommu_copy_struct_from_user_array(&inv_entry, array,
135+
IOMMU_HWPT_INVALIDATE_DATA_VTD_S1,
136+
index, __reserved);
137+
if (ret)
138+
break;
139+
140+
if ((inv_entry.flags & ~IOMMU_VTD_INV_FLAGS_LEAF) ||
141+
inv_entry.__reserved) {
142+
ret = -EOPNOTSUPP;
143+
break;
144+
}
145+
146+
if (!IS_ALIGNED(inv_entry.addr, VTD_PAGE_SIZE) ||
147+
((inv_entry.npages == U64_MAX) && inv_entry.addr)) {
148+
ret = -EINVAL;
149+
break;
150+
}
151+
152+
intel_nested_flush_cache(dmar_domain, inv_entry.addr,
153+
inv_entry.npages,
154+
inv_entry.flags & IOMMU_VTD_INV_FLAGS_LEAF);
155+
processed++;
156+
}
157+
158+
out:
159+
array->entry_num = processed;
160+
return ret;
161+
}
162+
76163
static const struct iommu_domain_ops intel_nested_domain_ops = {
77164
.attach_dev = intel_nested_attach_dev,
78165
.free = intel_nested_domain_free,
166+
.cache_invalidate_user = intel_nested_cache_invalidate_user,
79167
};
80168

81169
struct iommu_domain *intel_nested_domain_alloc(struct iommu_domain *parent,

0 commit comments

Comments
 (0)