@@ -806,16 +806,27 @@ static int iommu_ga_log_enable(struct amd_iommu *iommu)
806806{
807807#ifdef CONFIG_IRQ_REMAP
808808 u32 status , i ;
809+ u64 entry ;
809810
810811 if (!iommu -> ga_log )
811812 return - EINVAL ;
812813
813- status = readl (iommu -> mmio_base + MMIO_STATUS_OFFSET );
814-
815814 /* Check if already running */
816- if (status & (MMIO_STATUS_GALOG_RUN_MASK ))
815+ status = readl (iommu -> mmio_base + MMIO_STATUS_OFFSET );
816+ if (WARN_ON (status & (MMIO_STATUS_GALOG_RUN_MASK )))
817817 return 0 ;
818818
819+ entry = iommu_virt_to_phys (iommu -> ga_log ) | GA_LOG_SIZE_512 ;
820+ memcpy_toio (iommu -> mmio_base + MMIO_GA_LOG_BASE_OFFSET ,
821+ & entry , sizeof (entry ));
822+ entry = (iommu_virt_to_phys (iommu -> ga_log_tail ) &
823+ (BIT_ULL (52 )- 1 )) & ~7ULL ;
824+ memcpy_toio (iommu -> mmio_base + MMIO_GA_LOG_TAIL_OFFSET ,
825+ & entry , sizeof (entry ));
826+ writel (0x00 , iommu -> mmio_base + MMIO_GA_HEAD_OFFSET );
827+ writel (0x00 , iommu -> mmio_base + MMIO_GA_TAIL_OFFSET );
828+
829+
819830 iommu_feature_enable (iommu , CONTROL_GAINT_EN );
820831 iommu_feature_enable (iommu , CONTROL_GALOG_EN );
821832
@@ -825,7 +836,7 @@ static int iommu_ga_log_enable(struct amd_iommu *iommu)
825836 break ;
826837 }
827838
828- if (i >= LOOP_TIMEOUT )
839+ if (WARN_ON ( i >= LOOP_TIMEOUT ) )
829840 return - EINVAL ;
830841#endif /* CONFIG_IRQ_REMAP */
831842 return 0 ;
@@ -834,8 +845,6 @@ static int iommu_ga_log_enable(struct amd_iommu *iommu)
834845static int iommu_init_ga_log (struct amd_iommu * iommu )
835846{
836847#ifdef CONFIG_IRQ_REMAP
837- u64 entry ;
838-
839848 if (!AMD_IOMMU_GUEST_IR_VAPIC (amd_iommu_guest_ir ))
840849 return 0 ;
841850
@@ -849,16 +858,6 @@ static int iommu_init_ga_log(struct amd_iommu *iommu)
849858 if (!iommu -> ga_log_tail )
850859 goto err_out ;
851860
852- entry = iommu_virt_to_phys (iommu -> ga_log ) | GA_LOG_SIZE_512 ;
853- memcpy_toio (iommu -> mmio_base + MMIO_GA_LOG_BASE_OFFSET ,
854- & entry , sizeof (entry ));
855- entry = (iommu_virt_to_phys (iommu -> ga_log_tail ) &
856- (BIT_ULL (52 )- 1 )) & ~7ULL ;
857- memcpy_toio (iommu -> mmio_base + MMIO_GA_LOG_TAIL_OFFSET ,
858- & entry , sizeof (entry ));
859- writel (0x00 , iommu -> mmio_base + MMIO_GA_HEAD_OFFSET );
860- writel (0x00 , iommu -> mmio_base + MMIO_GA_TAIL_OFFSET );
861-
862861 return 0 ;
863862err_out :
864863 free_ga_log (iommu );
@@ -1523,7 +1522,7 @@ static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
15231522}
15241523
15251524/*
1526- * This function clues the initialization function for one IOMMU
1525+ * This function glues the initialization function for one IOMMU
15271526 * together and also allocates the command buffer and programs the
15281527 * hardware. It does NOT enable the IOMMU. This is done afterwards.
15291528 */
@@ -2016,48 +2015,18 @@ union intcapxt {
20162015 };
20172016} __attribute__ ((packed ));
20182017
2019- /*
2020- * There isn't really any need to mask/unmask at the irqchip level because
2021- * the 64-bit INTCAPXT registers can be updated atomically without tearing
2022- * when the affinity is being updated.
2023- */
2024- static void intcapxt_unmask_irq (struct irq_data * data )
2025- {
2026- }
2027-
2028- static void intcapxt_mask_irq (struct irq_data * data )
2029- {
2030- }
20312018
20322019static struct irq_chip intcapxt_controller ;
20332020
20342021static int intcapxt_irqdomain_activate (struct irq_domain * domain ,
20352022 struct irq_data * irqd , bool reserve )
20362023{
2037- struct amd_iommu * iommu = irqd -> chip_data ;
2038- struct irq_cfg * cfg = irqd_cfg (irqd );
2039- union intcapxt xt ;
2040-
2041- xt .capxt = 0ULL ;
2042- xt .dest_mode_logical = apic -> dest_mode_logical ;
2043- xt .vector = cfg -> vector ;
2044- xt .destid_0_23 = cfg -> dest_apicid & GENMASK (23 , 0 );
2045- xt .destid_24_31 = cfg -> dest_apicid >> 24 ;
2046-
2047- /**
2048- * Current IOMMU implemtation uses the same IRQ for all
2049- * 3 IOMMU interrupts.
2050- */
2051- writeq (xt .capxt , iommu -> mmio_base + MMIO_INTCAPXT_EVT_OFFSET );
2052- writeq (xt .capxt , iommu -> mmio_base + MMIO_INTCAPXT_PPR_OFFSET );
2053- writeq (xt .capxt , iommu -> mmio_base + MMIO_INTCAPXT_GALOG_OFFSET );
20542024 return 0 ;
20552025}
20562026
20572027static void intcapxt_irqdomain_deactivate (struct irq_domain * domain ,
20582028 struct irq_data * irqd )
20592029{
2060- intcapxt_mask_irq (irqd );
20612030}
20622031
20632032
@@ -2091,6 +2060,38 @@ static void intcapxt_irqdomain_free(struct irq_domain *domain, unsigned int virq
20912060 irq_domain_free_irqs_top (domain , virq , nr_irqs );
20922061}
20932062
2063+
2064+ static void intcapxt_unmask_irq (struct irq_data * irqd )
2065+ {
2066+ struct amd_iommu * iommu = irqd -> chip_data ;
2067+ struct irq_cfg * cfg = irqd_cfg (irqd );
2068+ union intcapxt xt ;
2069+
2070+ xt .capxt = 0ULL ;
2071+ xt .dest_mode_logical = apic -> dest_mode_logical ;
2072+ xt .vector = cfg -> vector ;
2073+ xt .destid_0_23 = cfg -> dest_apicid & GENMASK (23 , 0 );
2074+ xt .destid_24_31 = cfg -> dest_apicid >> 24 ;
2075+
2076+ /**
2077+ * Current IOMMU implementation uses the same IRQ for all
2078+ * 3 IOMMU interrupts.
2079+ */
2080+ writeq (xt .capxt , iommu -> mmio_base + MMIO_INTCAPXT_EVT_OFFSET );
2081+ writeq (xt .capxt , iommu -> mmio_base + MMIO_INTCAPXT_PPR_OFFSET );
2082+ writeq (xt .capxt , iommu -> mmio_base + MMIO_INTCAPXT_GALOG_OFFSET );
2083+ }
2084+
2085+ static void intcapxt_mask_irq (struct irq_data * irqd )
2086+ {
2087+ struct amd_iommu * iommu = irqd -> chip_data ;
2088+
2089+ writeq (0 , iommu -> mmio_base + MMIO_INTCAPXT_EVT_OFFSET );
2090+ writeq (0 , iommu -> mmio_base + MMIO_INTCAPXT_PPR_OFFSET );
2091+ writeq (0 , iommu -> mmio_base + MMIO_INTCAPXT_GALOG_OFFSET );
2092+ }
2093+
2094+
20942095static int intcapxt_set_affinity (struct irq_data * irqd ,
20952096 const struct cpumask * mask , bool force )
20962097{
@@ -2100,8 +2101,12 @@ static int intcapxt_set_affinity(struct irq_data *irqd,
21002101 ret = parent -> chip -> irq_set_affinity (parent , mask , force );
21012102 if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE )
21022103 return ret ;
2104+ return 0 ;
2105+ }
21032106
2104- return intcapxt_irqdomain_activate (irqd -> domain , irqd , false);
2107+ static int intcapxt_set_wake (struct irq_data * irqd , unsigned int on )
2108+ {
2109+ return on ? - EOPNOTSUPP : 0 ;
21052110}
21062111
21072112static struct irq_chip intcapxt_controller = {
@@ -2111,7 +2116,8 @@ static struct irq_chip intcapxt_controller = {
21112116 .irq_ack = irq_chip_ack_parent ,
21122117 .irq_retrigger = irq_chip_retrigger_hierarchy ,
21132118 .irq_set_affinity = intcapxt_set_affinity ,
2114- .flags = IRQCHIP_SKIP_SET_WAKE ,
2119+ .irq_set_wake = intcapxt_set_wake ,
2120+ .flags = IRQCHIP_MASK_ON_SUSPEND ,
21152121};
21162122
21172123static const struct irq_domain_ops intcapxt_domain_ops = {
@@ -2173,7 +2179,6 @@ static int iommu_setup_intcapxt(struct amd_iommu *iommu)
21732179 return ret ;
21742180 }
21752181
2176- iommu_feature_enable (iommu , CONTROL_INTCAPXT_EN );
21772182 return 0 ;
21782183}
21792184
@@ -2196,6 +2201,10 @@ static int iommu_init_irq(struct amd_iommu *iommu)
21962201
21972202 iommu -> int_enabled = true;
21982203enable_faults :
2204+
2205+ if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE )
2206+ iommu_feature_enable (iommu , CONTROL_INTCAPXT_EN );
2207+
21992208 iommu_feature_enable (iommu , CONTROL_EVT_INT_EN );
22002209
22012210 if (iommu -> ppr_log != NULL )
0 commit comments