@@ -845,6 +845,7 @@ amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { }
845845 (MMIO_STATUS_EVT_OVERFLOW_INT_MASK | \
846846 MMIO_STATUS_EVT_INT_MASK | \
847847 MMIO_STATUS_PPR_INT_MASK | \
848+ MMIO_STATUS_GALOG_OVERFLOW_MASK | \
848849 MMIO_STATUS_GALOG_INT_MASK)
849850
850851irqreturn_t amd_iommu_int_thread (int irq , void * data )
@@ -868,10 +869,16 @@ irqreturn_t amd_iommu_int_thread(int irq, void *data)
868869 }
869870
870871#ifdef CONFIG_IRQ_REMAP
871- if (status & MMIO_STATUS_GALOG_INT_MASK ) {
872+ if (status & (MMIO_STATUS_GALOG_INT_MASK |
873+ MMIO_STATUS_GALOG_OVERFLOW_MASK )) {
872874 pr_devel ("Processing IOMMU GA Log\n" );
873875 iommu_poll_ga_log (iommu );
874876 }
877+
878+ if (status & MMIO_STATUS_GALOG_OVERFLOW_MASK ) {
879+ pr_info_ratelimited ("IOMMU GA Log overflow\n" );
880+ amd_iommu_restart_ga_log (iommu );
881+ }
875882#endif
876883
877884 if (status & MMIO_STATUS_EVT_OVERFLOW_INT_MASK ) {
@@ -2067,7 +2074,7 @@ static struct protection_domain *protection_domain_alloc(unsigned int type)
20672074{
20682075 struct io_pgtable_ops * pgtbl_ops ;
20692076 struct protection_domain * domain ;
2070- int pgtable = amd_iommu_pgtable ;
2077+ int pgtable ;
20712078 int mode = DEFAULT_PGTABLE_LEVEL ;
20722079 int ret ;
20732080
@@ -2084,6 +2091,10 @@ static struct protection_domain *protection_domain_alloc(unsigned int type)
20842091 mode = PAGE_MODE_NONE ;
20852092 } else if (type == IOMMU_DOMAIN_UNMANAGED ) {
20862093 pgtable = AMD_IOMMU_V1 ;
2094+ } else if (type == IOMMU_DOMAIN_DMA || type == IOMMU_DOMAIN_DMA_FQ ) {
2095+ pgtable = amd_iommu_pgtable ;
2096+ } else {
2097+ return NULL ;
20872098 }
20882099
20892100 switch (pgtable ) {
@@ -2118,6 +2129,15 @@ static struct protection_domain *protection_domain_alloc(unsigned int type)
21182129 return NULL ;
21192130}
21202131
2132+ static inline u64 dma_max_address (void )
2133+ {
2134+ if (amd_iommu_pgtable == AMD_IOMMU_V1 )
2135+ return ~0ULL ;
2136+
2137+ /* V2 with 4/5 level page table */
2138+ return ((1ULL << PM_LEVEL_SHIFT (amd_iommu_gpt_level )) - 1 );
2139+ }
2140+
21212141static struct iommu_domain * amd_iommu_domain_alloc (unsigned type )
21222142{
21232143 struct protection_domain * domain ;
@@ -2134,7 +2154,7 @@ static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
21342154 return NULL ;
21352155
21362156 domain -> domain .geometry .aperture_start = 0 ;
2137- domain -> domain .geometry .aperture_end = ~ 0ULL ;
2157+ domain -> domain .geometry .aperture_end = dma_max_address () ;
21382158 domain -> domain .geometry .force_aperture = true;
21392159
21402160 return & domain -> domain ;
@@ -2387,7 +2407,7 @@ static void amd_iommu_iotlb_sync(struct iommu_domain *domain,
23872407 unsigned long flags ;
23882408
23892409 spin_lock_irqsave (& dom -> lock , flags );
2390- domain_flush_pages (dom , gather -> start , gather -> end - gather -> start , 1 );
2410+ domain_flush_pages (dom , gather -> start , gather -> end - gather -> start + 1 , 1 );
23912411 amd_iommu_domain_flush_complete (dom );
23922412 spin_unlock_irqrestore (& dom -> lock , flags );
23932413}
@@ -3493,8 +3513,7 @@ int amd_iommu_activate_guest_mode(void *data)
34933513 struct irte_ga * entry = (struct irte_ga * ) ir_data -> entry ;
34943514 u64 valid ;
34953515
3496- if (!AMD_IOMMU_GUEST_IR_VAPIC (amd_iommu_guest_ir ) ||
3497- !entry || entry -> lo .fields_vapic .guest_mode )
3516+ if (!AMD_IOMMU_GUEST_IR_VAPIC (amd_iommu_guest_ir ) || !entry )
34983517 return 0 ;
34993518
35003519 valid = entry -> lo .fields_vapic .valid ;
0 commit comments