4646
4747#define DEFAULT_DOMAIN_ADDRESS_WIDTH 57
4848
49- #define MAX_AGAW_WIDTH 64
50- #define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
51-
5249#define __DOMAIN_MAX_PFN (gaw ) ((((uint64_t)1) << ((gaw) - VTD_PAGE_SHIFT)) - 1)
5350#define __DOMAIN_MAX_ADDR (gaw ) ((((uint64_t)1) << (gaw)) - 1)
5451
6360
6461#define IOVA_PFN (addr ) ((addr) >> PAGE_SHIFT)
6562
66- /* page table handling */
67- #define LEVEL_STRIDE (9)
68- #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
69-
70- static inline int agaw_to_level (int agaw )
71- {
72- return agaw + 2 ;
73- }
74-
75- static inline int agaw_to_width (int agaw )
76- {
77- return min_t (int , 30 + agaw * LEVEL_STRIDE , MAX_AGAW_WIDTH );
78- }
79-
80- static inline int width_to_agaw (int width )
81- {
82- return DIV_ROUND_UP (width - 30 , LEVEL_STRIDE );
83- }
84-
85- static inline unsigned int level_to_offset_bits (int level )
86- {
87- return (level - 1 ) * LEVEL_STRIDE ;
88- }
89-
90- static inline int pfn_level_offset (u64 pfn , int level )
91- {
92- return (pfn >> level_to_offset_bits (level )) & LEVEL_MASK ;
93- }
94-
95- static inline u64 level_mask (int level )
96- {
97- return -1ULL << level_to_offset_bits (level );
98- }
99-
100- static inline u64 level_size (int level )
101- {
102- return 1ULL << level_to_offset_bits (level );
103- }
104-
105- static inline u64 align_to_level (u64 pfn , int level )
106- {
107- return (pfn + level_size (level ) - 1 ) & level_mask (level );
108- }
109-
110- static inline unsigned long lvl_to_nr_pages (unsigned int lvl )
111- {
112- return 1UL << min_t (int , (lvl - 1 ) * LEVEL_STRIDE , MAX_AGAW_PFN_WIDTH );
113- }
114-
115- /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
116- are never going to work. */
117- static inline unsigned long mm_to_dma_pfn_start (unsigned long mm_pfn )
118- {
119- return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT );
120- }
121- static inline unsigned long mm_to_dma_pfn_end (unsigned long mm_pfn )
122- {
123- return ((mm_pfn + 1 ) << (PAGE_SHIFT - VTD_PAGE_SHIFT )) - 1 ;
124- }
125- static inline unsigned long page_to_dma_pfn (struct page * pg )
126- {
127- return mm_to_dma_pfn_start (page_to_pfn (pg ));
128- }
129- static inline unsigned long virt_to_dma_pfn (void * p )
130- {
131- return page_to_dma_pfn (virt_to_page (p ));
132- }
133-
13463static void __init check_tylersburg_isoch (void );
13564static int rwbf_quirk ;
13665
@@ -168,78 +97,6 @@ static phys_addr_t root_entry_uctp(struct root_entry *re)
16897 return re -> hi & VTD_PAGE_MASK ;
16998}
17099
171- static inline void context_set_present (struct context_entry * context )
172- {
173- context -> lo |= 1 ;
174- }
175-
176- static inline void context_set_fault_enable (struct context_entry * context )
177- {
178- context -> lo &= (((u64 )- 1 ) << 2 ) | 1 ;
179- }
180-
181- static inline void context_set_translation_type (struct context_entry * context ,
182- unsigned long value )
183- {
184- context -> lo &= (((u64 )- 1 ) << 4 ) | 3 ;
185- context -> lo |= (value & 3 ) << 2 ;
186- }
187-
188- static inline void context_set_address_root (struct context_entry * context ,
189- unsigned long value )
190- {
191- context -> lo &= ~VTD_PAGE_MASK ;
192- context -> lo |= value & VTD_PAGE_MASK ;
193- }
194-
195- static inline void context_set_address_width (struct context_entry * context ,
196- unsigned long value )
197- {
198- context -> hi |= value & 7 ;
199- }
200-
201- static inline void context_set_domain_id (struct context_entry * context ,
202- unsigned long value )
203- {
204- context -> hi |= (value & ((1 << 16 ) - 1 )) << 8 ;
205- }
206-
207- static inline void context_set_pasid (struct context_entry * context )
208- {
209- context -> lo |= CONTEXT_PASIDE ;
210- }
211-
212- static inline int context_domain_id (struct context_entry * c )
213- {
214- return ((c -> hi >> 8 ) & 0xffff );
215- }
216-
217- static inline void context_clear_entry (struct context_entry * context )
218- {
219- context -> lo = 0 ;
220- context -> hi = 0 ;
221- }
222-
223- static inline bool context_copied (struct intel_iommu * iommu , u8 bus , u8 devfn )
224- {
225- if (!iommu -> copied_tables )
226- return false;
227-
228- return test_bit (((long )bus << 8 ) | devfn , iommu -> copied_tables );
229- }
230-
231- static inline void
232- set_context_copied (struct intel_iommu * iommu , u8 bus , u8 devfn )
233- {
234- set_bit (((long )bus << 8 ) | devfn , iommu -> copied_tables );
235- }
236-
237- static inline void
238- clear_context_copied (struct intel_iommu * iommu , u8 bus , u8 devfn )
239- {
240- clear_bit (((long )bus << 8 ) | devfn , iommu -> copied_tables );
241- }
242-
243100/*
244101 * This domain is a statically identity mapping domain.
245102 * 1. This domain creats a static 1:1 mapping to all usable memory.
@@ -383,13 +240,12 @@ void free_pgtable_page(void *vaddr)
383240 free_page ((unsigned long )vaddr );
384241}
385242
386- static inline int domain_type_is_si (struct dmar_domain * domain )
243+ static int domain_type_is_si (struct dmar_domain * domain )
387244{
388245 return domain -> domain .type == IOMMU_DOMAIN_IDENTITY ;
389246}
390247
391- static inline int domain_pfn_supported (struct dmar_domain * domain ,
392- unsigned long pfn )
248+ static int domain_pfn_supported (struct dmar_domain * domain , unsigned long pfn )
393249{
394250 int addr_width = agaw_to_width (domain -> agaw ) - VTD_PAGE_SHIFT ;
395251
@@ -451,7 +307,7 @@ int iommu_calculate_agaw(struct intel_iommu *iommu)
451307 return __iommu_calculate_agaw (iommu , DEFAULT_DOMAIN_ADDRESS_WIDTH );
452308}
453309
454- static inline bool iommu_paging_structure_coherency (struct intel_iommu * iommu )
310+ static bool iommu_paging_structure_coherency (struct intel_iommu * iommu )
455311{
456312 return sm_supported (iommu ) ?
457313 ecap_smpwc (iommu -> ecap ) : ecap_coherent (iommu -> ecap );
@@ -1574,9 +1430,8 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
15741430}
15751431
15761432/* Notification for newly created mappings */
1577- static inline void __mapping_notify_one (struct intel_iommu * iommu ,
1578- struct dmar_domain * domain ,
1579- unsigned long pfn , unsigned int pages )
1433+ static void __mapping_notify_one (struct intel_iommu * iommu , struct dmar_domain * domain ,
1434+ unsigned long pfn , unsigned int pages )
15801435{
15811436 /*
15821437 * It's a non-present to present mapping. Only flush if caching mode
@@ -1843,7 +1698,7 @@ void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
18431698 spin_unlock (& iommu -> lock );
18441699}
18451700
1846- static inline int guestwidth_to_adjustwidth (int gaw )
1701+ static int guestwidth_to_adjustwidth (int gaw )
18471702{
18481703 int agaw ;
18491704 int r = (gaw - 12 ) % 9 ;
@@ -1877,7 +1732,7 @@ static void domain_exit(struct dmar_domain *domain)
18771732 * Value of X in the PDTS field of a scalable mode context entry
18781733 * indicates PASID directory with 2^(X + 7) entries.
18791734 */
1880- static inline unsigned long context_get_sm_pds (struct pasid_table * table )
1735+ static unsigned long context_get_sm_pds (struct pasid_table * table )
18811736{
18821737 unsigned long pds , max_pde ;
18831738
@@ -1889,38 +1744,6 @@ static inline unsigned long context_get_sm_pds(struct pasid_table *table)
18891744 return pds - 7 ;
18901745}
18911746
1892- /*
1893- * Set the RID_PASID field of a scalable mode context entry. The
1894- * IOMMU hardware will use the PASID value set in this field for
1895- * DMA translations of DMA requests without PASID.
1896- */
1897- static inline void
1898- context_set_sm_rid2pasid (struct context_entry * context , unsigned long pasid )
1899- {
1900- context -> hi |= pasid & ((1 << 20 ) - 1 );
1901- }
1902-
1903- /*
1904- * Set the DTE(Device-TLB Enable) field of a scalable mode context
1905- * entry.
1906- */
1907- static inline void context_set_sm_dte (struct context_entry * context )
1908- {
1909- context -> lo |= BIT_ULL (2 );
1910- }
1911-
1912- /*
1913- * Set the PRE(Page Request Enable) field of a scalable mode context
1914- * entry.
1915- */
1916- static inline void context_set_sm_pre (struct context_entry * context )
1917- {
1918- context -> lo |= BIT_ULL (4 );
1919- }
1920-
1921- /* Convert value to context PASID directory size field coding. */
1922- #define context_pdts (pds ) (((pds) & 0x7) << 9)
1923-
19241747static int domain_context_mapping_one (struct dmar_domain * domain ,
19251748 struct intel_iommu * iommu ,
19261749 struct pasid_table * table ,
@@ -2102,18 +1925,15 @@ domain_context_mapping(struct dmar_domain *domain, struct device *dev)
21021925}
21031926
21041927/* Returns a number of VTD pages, but aligned to MM page size */
2105- static inline unsigned long aligned_nrpages (unsigned long host_addr ,
2106- size_t size )
1928+ static unsigned long aligned_nrpages (unsigned long host_addr , size_t size )
21071929{
21081930 host_addr &= ~PAGE_MASK ;
21091931 return PAGE_ALIGN (host_addr + size ) >> VTD_PAGE_SHIFT ;
21101932}
21111933
21121934/* Return largest possible superpage level for a given mapping */
2113- static inline int hardware_largepage_caps (struct dmar_domain * domain ,
2114- unsigned long iov_pfn ,
2115- unsigned long phy_pfn ,
2116- unsigned long pages )
1935+ static int hardware_largepage_caps (struct dmar_domain * domain , unsigned long iov_pfn ,
1936+ unsigned long phy_pfn , unsigned long pages )
21171937{
21181938 int support , level = 1 ;
21191939 unsigned long pfnmerge ;
@@ -3604,7 +3424,7 @@ void intel_iommu_shutdown(void)
36043424 up_write (& dmar_global_lock );
36053425}
36063426
3607- static inline struct intel_iommu * dev_to_intel_iommu (struct device * dev )
3427+ static struct intel_iommu * dev_to_intel_iommu (struct device * dev )
36083428{
36093429 struct iommu_device * iommu_dev = dev_to_iommu_device (dev );
36103430
@@ -3683,7 +3503,7 @@ const struct attribute_group *intel_iommu_groups[] = {
36833503 NULL ,
36843504};
36853505
3686- static inline bool has_external_pci (void )
3506+ static bool has_external_pci (void )
36873507{
36883508 struct pci_dev * pdev = NULL ;
36893509
0 commit comments