@@ -1303,35 +1303,30 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
13031303 know the hardware page-walk will no longer touch them.
13041304 The 'pte' argument is the *parent* PTE, pointing to the page that is to
13051305 be freed. */
1306- static struct page * dma_pte_list_pagetables (struct dmar_domain * domain ,
1307- int level , struct dma_pte * pte ,
1308- struct page * freelist )
1306+ static void dma_pte_list_pagetables (struct dmar_domain * domain ,
1307+ int level , struct dma_pte * pte ,
1308+ struct list_head * freelist )
13091309{
13101310 struct page * pg ;
13111311
13121312 pg = pfn_to_page (dma_pte_addr (pte ) >> PAGE_SHIFT );
1313- pg -> freelist = freelist ;
1314- freelist = pg ;
1313+ list_add_tail (& pg -> lru , freelist );
13151314
13161315 if (level == 1 )
1317- return freelist ;
1316+ return ;
13181317
13191318 pte = page_address (pg );
13201319 do {
13211320 if (dma_pte_present (pte ) && !dma_pte_superpage (pte ))
1322- freelist = dma_pte_list_pagetables (domain , level - 1 ,
1323- pte , freelist );
1321+ dma_pte_list_pagetables (domain , level - 1 , pte , freelist );
13241322 pte ++ ;
13251323 } while (!first_pte_in_page (pte ));
1326-
1327- return freelist ;
13281324}
13291325
1330- static struct page * dma_pte_clear_level (struct dmar_domain * domain , int level ,
1331- struct dma_pte * pte , unsigned long pfn ,
1332- unsigned long start_pfn ,
1333- unsigned long last_pfn ,
1334- struct page * freelist )
1326+ static void dma_pte_clear_level (struct dmar_domain * domain , int level ,
1327+ struct dma_pte * pte , unsigned long pfn ,
1328+ unsigned long start_pfn , unsigned long last_pfn ,
1329+ struct list_head * freelist )
13351330{
13361331 struct dma_pte * first_pte = NULL , * last_pte = NULL ;
13371332
@@ -1350,18 +1345,18 @@ static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
13501345 /* These suborbinate page tables are going away entirely. Don't
13511346 bother to clear them; we're just going to *free* them. */
13521347 if (level > 1 && !dma_pte_superpage (pte ))
1353- freelist = dma_pte_list_pagetables (domain , level - 1 , pte , freelist );
1348+ dma_pte_list_pagetables (domain , level - 1 , pte , freelist );
13541349
13551350 dma_clear_pte (pte );
13561351 if (!first_pte )
13571352 first_pte = pte ;
13581353 last_pte = pte ;
13591354 } else if (level > 1 ) {
13601355 /* Recurse down into a level that isn't *entirely* obsolete */
1361- freelist = dma_pte_clear_level (domain , level - 1 ,
1362- phys_to_virt (dma_pte_addr (pte )),
1363- level_pfn , start_pfn , last_pfn ,
1364- freelist );
1356+ dma_pte_clear_level (domain , level - 1 ,
1357+ phys_to_virt (dma_pte_addr (pte )),
1358+ level_pfn , start_pfn , last_pfn ,
1359+ freelist );
13651360 }
13661361next :
13671362 pfn = level_pfn + level_size (level );
@@ -1370,47 +1365,28 @@ static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
13701365 if (first_pte )
13711366 domain_flush_cache (domain , first_pte ,
13721367 (void * )++ last_pte - (void * )first_pte );
1373-
1374- return freelist ;
13751368}
13761369
13771370/* We can't just free the pages because the IOMMU may still be walking
13781371 the page tables, and may have cached the intermediate levels. The
13791372 pages can only be freed after the IOTLB flush has been done. */
1380- static struct page * domain_unmap (struct dmar_domain * domain ,
1381- unsigned long start_pfn ,
1382- unsigned long last_pfn ,
1383- struct page * freelist )
1373+ static void domain_unmap (struct dmar_domain * domain , unsigned long start_pfn ,
1374+ unsigned long last_pfn , struct list_head * freelist )
13841375{
13851376 BUG_ON (!domain_pfn_supported (domain , start_pfn ));
13861377 BUG_ON (!domain_pfn_supported (domain , last_pfn ));
13871378 BUG_ON (start_pfn > last_pfn );
13881379
13891380 /* we don't need lock here; nobody else touches the iova range */
1390- freelist = dma_pte_clear_level (domain , agaw_to_level (domain -> agaw ),
1391- domain -> pgd , 0 , start_pfn , last_pfn ,
1392- freelist );
1381+ dma_pte_clear_level (domain , agaw_to_level (domain -> agaw ),
1382+ domain -> pgd , 0 , start_pfn , last_pfn , freelist );
13931383
13941384 /* free pgd */
13951385 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN (domain -> gaw )) {
13961386 struct page * pgd_page = virt_to_page (domain -> pgd );
1397- pgd_page -> freelist = freelist ;
1398- freelist = pgd_page ;
1399-
1387+ list_add_tail (& pgd_page -> lru , freelist );
14001388 domain -> pgd = NULL ;
14011389 }
1402-
1403- return freelist ;
1404- }
1405-
1406- static void dma_free_pagelist (struct page * freelist )
1407- {
1408- struct page * pg ;
1409-
1410- while ((pg = freelist )) {
1411- freelist = pg -> freelist ;
1412- free_pgtable_page (page_address (pg ));
1413- }
14141390}
14151391
14161392/* iommu handling */
@@ -2095,11 +2071,10 @@ static void domain_exit(struct dmar_domain *domain)
20952071 domain_remove_dev_info (domain );
20962072
20972073 if (domain -> pgd ) {
2098- struct page * freelist ;
2074+ LIST_HEAD ( freelist ) ;
20992075
2100- freelist = domain_unmap (domain , 0 ,
2101- DOMAIN_MAX_PFN (domain -> gaw ), NULL );
2102- dma_free_pagelist (freelist );
2076+ domain_unmap (domain , 0 , DOMAIN_MAX_PFN (domain -> gaw ), & freelist );
2077+ put_pages_list (& freelist );
21032078 }
21042079
21052080 free_domain_mem (domain );
@@ -4192,19 +4167,17 @@ static int intel_iommu_memory_notifier(struct notifier_block *nb,
41924167 {
41934168 struct dmar_drhd_unit * drhd ;
41944169 struct intel_iommu * iommu ;
4195- struct page * freelist ;
4170+ LIST_HEAD ( freelist ) ;
41964171
4197- freelist = domain_unmap (si_domain ,
4198- start_vpfn , last_vpfn ,
4199- NULL );
4172+ domain_unmap (si_domain , start_vpfn , last_vpfn , & freelist );
42004173
42014174 rcu_read_lock ();
42024175 for_each_active_iommu (iommu , drhd )
42034176 iommu_flush_iotlb_psi (iommu , si_domain ,
42044177 start_vpfn , mhp -> nr_pages ,
4205- ! freelist , 0 );
4178+ list_empty ( & freelist ) , 0 );
42064179 rcu_read_unlock ();
4207- dma_free_pagelist ( freelist );
4180+ put_pages_list ( & freelist );
42084181 }
42094182 break ;
42104183 }
@@ -5211,8 +5184,7 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain,
52115184 start_pfn = iova >> VTD_PAGE_SHIFT ;
52125185 last_pfn = (iova + size - 1 ) >> VTD_PAGE_SHIFT ;
52135186
5214- gather -> freelist = domain_unmap (dmar_domain , start_pfn ,
5215- last_pfn , gather -> freelist );
5187+ domain_unmap (dmar_domain , start_pfn , last_pfn , & gather -> freelist );
52165188
52175189 if (dmar_domain -> max_addr == iova + size )
52185190 dmar_domain -> max_addr = iova ;
@@ -5248,9 +5220,10 @@ static void intel_iommu_tlb_sync(struct iommu_domain *domain,
52485220
52495221 for_each_domain_iommu (iommu_id , dmar_domain )
52505222 iommu_flush_iotlb_psi (g_iommus [iommu_id ], dmar_domain ,
5251- start_pfn , nrpages , !gather -> freelist , 0 );
5223+ start_pfn , nrpages ,
5224+ list_empty (& gather -> freelist ), 0 );
52525225
5253- dma_free_pagelist ( gather -> freelist );
5226+ put_pages_list ( & gather -> freelist );
52545227}
52555228
52565229static phys_addr_t intel_iommu_iova_to_phys (struct iommu_domain * domain ,
0 commit comments