Skip to content

Commit 9906b93

Browse files
Longpeng(Mike)joergroedel
authored andcommitted
iommu/vt-d: Avoid duplicate removing in __domain_mapping()
The __domain_mapping() always removes the pages in the range from 'iov_pfn' to 'end_pfn', but the 'end_pfn' is always the last pfn of the range that the caller wants to map. This would introduce too many duplicated removing and leads the map operation take too long, for example: Map iova=0x100000,nr_pages=0x7d61800 iov_pfn: 0x100000, end_pfn: 0x7e617ff iov_pfn: 0x140000, end_pfn: 0x7e617ff iov_pfn: 0x180000, end_pfn: 0x7e617ff iov_pfn: 0x1c0000, end_pfn: 0x7e617ff iov_pfn: 0x200000, end_pfn: 0x7e617ff ... it takes about 50ms in total. We can reduce the cost by recalculate the 'end_pfn' and limit it to the boundary of the end of this pte page. Map iova=0x100000,nr_pages=0x7d61800 iov_pfn: 0x100000, end_pfn: 0x13ffff iov_pfn: 0x140000, end_pfn: 0x17ffff iov_pfn: 0x180000, end_pfn: 0x1bffff iov_pfn: 0x1c0000, end_pfn: 0x1fffff iov_pfn: 0x200000, end_pfn: 0x23ffff ... it only need 9ms now. This also removes a meaningless BUG_ON() in __domain_mapping(). Signed-off-by: Longpeng(Mike) <longpeng2@huawei.com> Tested-by: Liujunjie <liujunjie23@huawei.com> Link: https://lore.kernel.org/r/20211008000433.1115-1-longpeng2@huawei.com Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Link: https://lore.kernel.org/r/20211014053839.727419-10-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
1 parent 37c8041 commit 9906b93

2 files changed

Lines changed: 12 additions & 5 deletions

File tree

drivers/iommu/intel/iommu.c

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2479,12 +2479,17 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
24792479
return -ENOMEM;
24802480
first_pte = pte;
24812481

2482+
lvl_pages = lvl_to_nr_pages(largepage_lvl);
2483+
24822484
/* It is large page*/
24832485
if (largepage_lvl > 1) {
24842486
unsigned long end_pfn;
2487+
unsigned long pages_to_remove;
24852488

24862489
pteval |= DMA_PTE_LARGE_PAGE;
2487-
end_pfn = ((iov_pfn + nr_pages) & level_mask(largepage_lvl)) - 1;
2490+
pages_to_remove = min_t(unsigned long, nr_pages,
2491+
nr_pte_to_next_page(pte) * lvl_pages);
2492+
end_pfn = iov_pfn + pages_to_remove - 1;
24882493
switch_to_super_page(domain, iov_pfn, end_pfn, largepage_lvl);
24892494
} else {
24902495
pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
@@ -2506,10 +2511,6 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
25062511
WARN_ON(1);
25072512
}
25082513

2509-
lvl_pages = lvl_to_nr_pages(largepage_lvl);
2510-
2511-
BUG_ON(nr_pages < lvl_pages);
2512-
25132514
nr_pages -= lvl_pages;
25142515
iov_pfn += lvl_pages;
25152516
phys_pfn += lvl_pages;

include/linux/intel-iommu.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -710,6 +710,12 @@ static inline bool first_pte_in_page(struct dma_pte *pte)
710710
return IS_ALIGNED((unsigned long)pte, VTD_PAGE_SIZE);
711711
}
712712

713+
static inline int nr_pte_to_next_page(struct dma_pte *pte)
714+
{
715+
return first_pte_in_page(pte) ? BIT_ULL(VTD_STRIDE_SHIFT) :
716+
(struct dma_pte *)ALIGN((unsigned long)pte, VTD_PAGE_SIZE) - pte;
717+
}
718+
713719
extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
714720
extern int dmar_find_matched_atsr_unit(struct pci_dev *dev);
715721

0 commit comments

Comments
 (0)