2020 * The third instance usage is through standard arm-smmu driver itself and
2121 * is out of scope of this implementation.
2222 */
23- #define NUM_SMMU_INSTANCES 2
23+ #define MAX_SMMU_INSTANCES 2
2424
2525struct nvidia_smmu {
26- struct arm_smmu_device smmu ;
27- void __iomem * bases [NUM_SMMU_INSTANCES ];
26+ struct arm_smmu_device smmu ;
27+ void __iomem * bases [MAX_SMMU_INSTANCES ];
28+ unsigned int num_instances ;
2829};
2930
31+ static inline struct nvidia_smmu * to_nvidia_smmu (struct arm_smmu_device * smmu )
32+ {
33+ return container_of (smmu , struct nvidia_smmu , smmu );
34+ }
35+
3036static inline void __iomem * nvidia_smmu_page (struct arm_smmu_device * smmu ,
3137 unsigned int inst , int page )
3238{
@@ -47,9 +53,10 @@ static u32 nvidia_smmu_read_reg(struct arm_smmu_device *smmu,
4753static void nvidia_smmu_write_reg (struct arm_smmu_device * smmu ,
4854 int page , int offset , u32 val )
4955{
56+ struct nvidia_smmu * nvidia = to_nvidia_smmu (smmu );
5057 unsigned int i ;
5158
52- for (i = 0 ; i < NUM_SMMU_INSTANCES ; i ++ ) {
59+ for (i = 0 ; i < nvidia -> num_instances ; i ++ ) {
5360 void __iomem * reg = nvidia_smmu_page (smmu , i , page ) + offset ;
5461
5562 writel_relaxed (val , reg );
@@ -67,9 +74,10 @@ static u64 nvidia_smmu_read_reg64(struct arm_smmu_device *smmu,
6774static void nvidia_smmu_write_reg64 (struct arm_smmu_device * smmu ,
6875 int page , int offset , u64 val )
6976{
77+ struct nvidia_smmu * nvidia = to_nvidia_smmu (smmu );
7078 unsigned int i ;
7179
72- for (i = 0 ; i < NUM_SMMU_INSTANCES ; i ++ ) {
80+ for (i = 0 ; i < nvidia -> num_instances ; i ++ ) {
7381 void __iomem * reg = nvidia_smmu_page (smmu , i , page ) + offset ;
7482
7583 writeq_relaxed (val , reg );
@@ -79,6 +87,7 @@ static void nvidia_smmu_write_reg64(struct arm_smmu_device *smmu,
7987static void nvidia_smmu_tlb_sync (struct arm_smmu_device * smmu , int page ,
8088 int sync , int status )
8189{
90+ struct nvidia_smmu * nvidia = to_nvidia_smmu (smmu );
8291 unsigned int delay ;
8392
8493 arm_smmu_writel (smmu , page , sync , 0 );
@@ -90,7 +99,7 @@ static void nvidia_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
9099 u32 val = 0 ;
91100 unsigned int i ;
92101
93- for (i = 0 ; i < NUM_SMMU_INSTANCES ; i ++ ) {
102+ for (i = 0 ; i < nvidia -> num_instances ; i ++ ) {
94103 void __iomem * reg ;
95104
96105 reg = nvidia_smmu_page (smmu , i , page ) + status ;
@@ -112,9 +121,10 @@ static void nvidia_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
112121
113122static int nvidia_smmu_reset (struct arm_smmu_device * smmu )
114123{
124+ struct nvidia_smmu * nvidia = to_nvidia_smmu (smmu );
115125 unsigned int i ;
116126
117- for (i = 0 ; i < NUM_SMMU_INSTANCES ; i ++ ) {
127+ for (i = 0 ; i < nvidia -> num_instances ; i ++ ) {
118128 u32 val ;
119129 void __iomem * reg = nvidia_smmu_page (smmu , i , ARM_SMMU_GR0 ) +
120130 ARM_SMMU_GR0_sGFSR ;
@@ -157,8 +167,9 @@ static irqreturn_t nvidia_smmu_global_fault(int irq, void *dev)
157167 unsigned int inst ;
158168 irqreturn_t ret = IRQ_NONE ;
159169 struct arm_smmu_device * smmu = dev ;
170+ struct nvidia_smmu * nvidia = to_nvidia_smmu (smmu );
160171
161- for (inst = 0 ; inst < NUM_SMMU_INSTANCES ; inst ++ ) {
172+ for (inst = 0 ; inst < nvidia -> num_instances ; inst ++ ) {
162173 irqreturn_t irq_ret ;
163174
164175 irq_ret = nvidia_smmu_global_fault_inst (irq , smmu , inst );
@@ -202,11 +213,13 @@ static irqreturn_t nvidia_smmu_context_fault(int irq, void *dev)
202213 struct arm_smmu_device * smmu ;
203214 struct iommu_domain * domain = dev ;
204215 struct arm_smmu_domain * smmu_domain ;
216+ struct nvidia_smmu * nvidia ;
205217
206218 smmu_domain = container_of (domain , struct arm_smmu_domain , domain );
207219 smmu = smmu_domain -> smmu ;
220+ nvidia = to_nvidia_smmu (smmu );
208221
209- for (inst = 0 ; inst < NUM_SMMU_INSTANCES ; inst ++ ) {
222+ for (inst = 0 ; inst < nvidia -> num_instances ; inst ++ ) {
210223 irqreturn_t irq_ret ;
211224
212225 /*
@@ -235,29 +248,41 @@ static const struct arm_smmu_impl nvidia_smmu_impl = {
235248 .context_fault = nvidia_smmu_context_fault ,
236249};
237250
251+ static const struct arm_smmu_impl nvidia_smmu_single_impl = {
252+ };
253+
238254struct arm_smmu_device * nvidia_smmu_impl_init (struct arm_smmu_device * smmu )
239255{
240256 struct resource * res ;
241257 struct device * dev = smmu -> dev ;
242258 struct nvidia_smmu * nvidia_smmu ;
243259 struct platform_device * pdev = to_platform_device (dev );
260+ unsigned int i ;
244261
245262 nvidia_smmu = devm_krealloc (dev , smmu , sizeof (* nvidia_smmu ), GFP_KERNEL );
246263 if (!nvidia_smmu )
247264 return ERR_PTR (- ENOMEM );
248265
249266 /* Instance 0 is ioremapped by arm-smmu.c. */
250267 nvidia_smmu -> bases [0 ] = smmu -> base ;
268+ nvidia_smmu -> num_instances ++ ;
251269
252- res = platform_get_resource (pdev , IORESOURCE_MEM , 1 );
253- if (!res )
254- return ERR_PTR (- ENODEV );
270+ for (i = 1 ; i < MAX_SMMU_INSTANCES ; i ++ ) {
271+ res = platform_get_resource (pdev , IORESOURCE_MEM , i );
272+ if (!res )
273+ break ;
255274
256- nvidia_smmu -> bases [1 ] = devm_ioremap_resource (dev , res );
257- if (IS_ERR (nvidia_smmu -> bases [1 ]))
258- return ERR_CAST (nvidia_smmu -> bases [1 ]);
275+ nvidia_smmu -> bases [i ] = devm_ioremap_resource (dev , res );
276+ if (IS_ERR (nvidia_smmu -> bases [i ]))
277+ return ERR_CAST (nvidia_smmu -> bases [i ]);
278+
279+ nvidia_smmu -> num_instances ++ ;
280+ }
259281
260- nvidia_smmu -> smmu .impl = & nvidia_smmu_impl ;
282+ if (nvidia_smmu -> num_instances == 1 )
283+ nvidia_smmu -> smmu .impl = & nvidia_smmu_single_impl ;
284+ else
285+ nvidia_smmu -> smmu .impl = & nvidia_smmu_impl ;
261286
262287 return & nvidia_smmu -> smmu ;
263288}
0 commit comments