Skip to content

Commit 5eaafdf

Browse files
LuBaolujoergroedel
authored andcommitted
iommu/vt-d: Convert global spinlock into per domain lock
Using a global device_domain_lock spinlock to protect per-domain device tracking lists is an inefficient way, especially considering this lock is also needed in the hot paths. This optimizes the locking mechanism by converting the global lock to per domain lock. On the other hand, as the device tracking lists are never accessed in any interrupt context, there is no need to disable interrupts while spinning. Replace irqsave variant with spinlock calls. Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Link: https://lore.kernel.org/r/20220706025524.2904370-12-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
1 parent 969aaef commit 5eaafdf

2 files changed

Lines changed: 22 additions & 21 deletions

File tree

drivers/iommu/intel/iommu.c

Lines changed: 21 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -310,7 +310,6 @@ static int iommu_skip_te_disable;
310310
#define IDENTMAP_GFX 2
311311
#define IDENTMAP_AZALIA 4
312312

313-
static DEFINE_SPINLOCK(device_domain_lock);
314313
const struct iommu_ops intel_iommu_ops;
315314

316315
static bool translation_pre_enabled(struct intel_iommu *iommu)
@@ -535,7 +534,7 @@ static int domain_update_device_node(struct dmar_domain *domain)
535534
struct device_domain_info *info;
536535
int nid = NUMA_NO_NODE;
537536

538-
spin_lock(&device_domain_lock);
537+
spin_lock(&domain->lock);
539538
list_for_each_entry(info, &domain->devices, link) {
540539
/*
541540
* There could possibly be multiple device numa nodes as devices
@@ -547,7 +546,7 @@ static int domain_update_device_node(struct dmar_domain *domain)
547546
if (nid != NUMA_NO_NODE)
548547
break;
549548
}
550-
spin_unlock(&device_domain_lock);
549+
spin_unlock(&domain->lock);
551550

552551
return nid;
553552
}
@@ -1378,15 +1377,15 @@ iommu_support_dev_iotlb(struct dmar_domain *domain, struct intel_iommu *iommu,
13781377
if (!iommu->qi)
13791378
return NULL;
13801379

1381-
spin_lock(&device_domain_lock);
1380+
spin_lock(&domain->lock);
13821381
list_for_each_entry(info, &domain->devices, link) {
13831382
if (info->iommu == iommu && info->bus == bus &&
13841383
info->devfn == devfn) {
1385-
spin_unlock(&device_domain_lock);
1384+
spin_unlock(&domain->lock);
13861385
return info->ats_supported ? info : NULL;
13871386
}
13881387
}
1389-
spin_unlock(&device_domain_lock);
1388+
spin_unlock(&domain->lock);
13901389

13911390
return NULL;
13921391
}
@@ -1396,15 +1395,15 @@ static void domain_update_iotlb(struct dmar_domain *domain)
13961395
struct device_domain_info *info;
13971396
bool has_iotlb_device = false;
13981397

1399-
spin_lock(&device_domain_lock);
1398+
spin_lock(&domain->lock);
14001399
list_for_each_entry(info, &domain->devices, link) {
14011400
if (info->ats_enabled) {
14021401
has_iotlb_device = true;
14031402
break;
14041403
}
14051404
}
14061405
domain->has_iotlb_device = has_iotlb_device;
1407-
spin_unlock(&device_domain_lock);
1406+
spin_unlock(&domain->lock);
14081407
}
14091408

14101409
static void iommu_enable_dev_iotlb(struct device_domain_info *info)
@@ -1500,10 +1499,10 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
15001499
if (!domain->has_iotlb_device)
15011500
return;
15021501

1503-
spin_lock(&device_domain_lock);
1502+
spin_lock(&domain->lock);
15041503
list_for_each_entry(info, &domain->devices, link)
15051504
__iommu_flush_dev_iotlb(info, addr, mask);
1506-
spin_unlock(&device_domain_lock);
1505+
spin_unlock(&domain->lock);
15071506
}
15081507

15091508
static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
@@ -1763,6 +1762,7 @@ static struct dmar_domain *alloc_domain(unsigned int type)
17631762
domain->flags |= DOMAIN_FLAG_USE_FIRST_LEVEL;
17641763
domain->has_iotlb_device = false;
17651764
INIT_LIST_HEAD(&domain->devices);
1765+
spin_lock_init(&domain->lock);
17661766

17671767
return domain;
17681768
}
@@ -2446,9 +2446,9 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
24462446
if (ret)
24472447
return ret;
24482448
info->domain = domain;
2449-
spin_lock(&device_domain_lock);
2449+
spin_lock(&domain->lock);
24502450
list_add(&info->link, &domain->devices);
2451-
spin_unlock(&device_domain_lock);
2451+
spin_unlock(&domain->lock);
24522452

24532453
/* PASID table is mandatory for a PCI device in scalable mode. */
24542454
if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
@@ -4123,6 +4123,7 @@ static void domain_context_clear(struct device_domain_info *info)
41234123
static void dmar_remove_one_dev_info(struct device *dev)
41244124
{
41254125
struct device_domain_info *info = dev_iommu_priv_get(dev);
4126+
struct dmar_domain *domain = info->domain;
41264127
struct intel_iommu *iommu = info->iommu;
41274128

41284129
if (!dev_is_real_dma_subdevice(info->dev)) {
@@ -4135,11 +4136,11 @@ static void dmar_remove_one_dev_info(struct device *dev)
41354136
intel_pasid_free_table(info->dev);
41364137
}
41374138

4138-
spin_lock(&device_domain_lock);
4139+
spin_lock(&domain->lock);
41394140
list_del(&info->link);
4140-
spin_unlock(&device_domain_lock);
4141+
spin_unlock(&domain->lock);
41414142

4142-
domain_detach_iommu(info->domain, iommu);
4143+
domain_detach_iommu(domain, iommu);
41434144
info->domain = NULL;
41444145
}
41454146

@@ -4422,7 +4423,7 @@ static bool domain_support_force_snooping(struct dmar_domain *domain)
44224423
struct device_domain_info *info;
44234424
bool support = true;
44244425

4425-
assert_spin_locked(&device_domain_lock);
4426+
assert_spin_locked(&domain->lock);
44264427
list_for_each_entry(info, &domain->devices, link) {
44274428
if (!ecap_sc_support(info->iommu->ecap)) {
44284429
support = false;
@@ -4437,8 +4438,7 @@ static void domain_set_force_snooping(struct dmar_domain *domain)
44374438
{
44384439
struct device_domain_info *info;
44394440

4440-
assert_spin_locked(&device_domain_lock);
4441-
4441+
assert_spin_locked(&domain->lock);
44424442
/*
44434443
* Second level page table supports per-PTE snoop control. The
44444444
* iommu_map() interface will handle this by setting SNP bit.
@@ -4460,15 +4460,15 @@ static bool intel_iommu_enforce_cache_coherency(struct iommu_domain *domain)
44604460
if (dmar_domain->force_snooping)
44614461
return true;
44624462

4463-
spin_lock(&device_domain_lock);
4463+
spin_lock(&dmar_domain->lock);
44644464
if (!domain_support_force_snooping(dmar_domain)) {
4465-
spin_unlock(&device_domain_lock);
4465+
spin_unlock(&dmar_domain->lock);
44664466
return false;
44674467
}
44684468

44694469
domain_set_force_snooping(dmar_domain);
44704470
dmar_domain->force_snooping = true;
4471-
spin_unlock(&device_domain_lock);
4471+
spin_unlock(&dmar_domain->lock);
44724472

44734473
return true;
44744474
}

drivers/iommu/intel/iommu.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -541,6 +541,7 @@ struct dmar_domain {
541541
u8 force_snooping : 1; /* Create IOPTEs with snoop control */
542542
u8 set_pte_snp:1;
543543

544+
spinlock_t lock; /* Protect device tracking lists */
544545
struct list_head devices; /* all devices' list */
545546

546547
struct dma_pte *pgd; /* virtual address */

0 commit comments

Comments
 (0)