@@ -39,7 +39,7 @@ static void iopf_put_dev_fault_param(struct iommu_fault_param *fault_param)
3939 kfree_rcu (fault_param , rcu );
4040}
4141
42- void iopf_free_group (struct iopf_group * group )
42+ static void __iopf_free_group (struct iopf_group * group )
4343{
4444 struct iopf_fault * iopf , * next ;
4545
@@ -50,6 +50,11 @@ void iopf_free_group(struct iopf_group *group)
5050
5151 /* Pair with iommu_report_device_fault(). */
5252 iopf_put_dev_fault_param (group -> fault_param );
53+ }
54+
55+ void iopf_free_group (struct iopf_group * group )
56+ {
57+ __iopf_free_group (group );
5358 kfree (group );
5459}
5560EXPORT_SYMBOL_GPL (iopf_free_group );
@@ -97,14 +102,49 @@ static int report_partial_fault(struct iommu_fault_param *fault_param,
97102 return 0 ;
98103}
99104
105+ static struct iopf_group * iopf_group_alloc (struct iommu_fault_param * iopf_param ,
106+ struct iopf_fault * evt ,
107+ struct iopf_group * abort_group )
108+ {
109+ struct iopf_fault * iopf , * next ;
110+ struct iopf_group * group ;
111+
112+ group = kzalloc (sizeof (* group ), GFP_KERNEL );
113+ if (!group ) {
114+ /*
115+ * We always need to construct the group as we need it to abort
116+ * the request at the driver if it can't be handled.
117+ */
118+ group = abort_group ;
119+ }
120+
121+ group -> fault_param = iopf_param ;
122+ group -> last_fault .fault = evt -> fault ;
123+ INIT_LIST_HEAD (& group -> faults );
124+ INIT_LIST_HEAD (& group -> pending_node );
125+ list_add (& group -> last_fault .list , & group -> faults );
126+
127+ /* See if we have partial faults for this group */
128+ mutex_lock (& iopf_param -> lock );
129+ list_for_each_entry_safe (iopf , next , & iopf_param -> partial , list ) {
130+ if (iopf -> fault .prm .grpid == evt -> fault .prm .grpid )
131+ /* Insert *before* the last fault */
132+ list_move (& iopf -> list , & group -> faults );
133+ }
134+ list_add (& group -> pending_node , & iopf_param -> faults );
135+ mutex_unlock (& iopf_param -> lock );
136+
137+ return group ;
138+ }
139+
100140/**
101141 * iommu_report_device_fault() - Report fault event to device driver
102142 * @dev: the device
103143 * @evt: fault event data
104144 *
105145 * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ
106- * handler. When this function fails and the fault is recoverable, it is the
107- * caller's responsibility to complete the fault .
146+ * handler. If this function fails then ops->page_response() was called to
147+ * complete evt if required .
108148 *
109149 * This module doesn't handle PCI PASID Stop Marker; IOMMU drivers must discard
110150 * them before reporting faults. A PASID Stop Marker (LRW = 0b100) doesn't
@@ -143,22 +183,18 @@ int iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)
143183{
144184 struct iommu_fault * fault = & evt -> fault ;
145185 struct iommu_fault_param * iopf_param ;
146- struct iopf_fault * iopf , * next ;
147- struct iommu_domain * domain ;
186+ struct iopf_group abort_group = {};
148187 struct iopf_group * group ;
149188 int ret ;
150189
151- if (fault -> type != IOMMU_FAULT_PAGE_REQ )
152- return - EOPNOTSUPP ;
153-
154190 iopf_param = iopf_get_dev_fault_param (dev );
155- if (!iopf_param )
191+ if (WARN_ON ( !iopf_param ) )
156192 return - ENODEV ;
157193
158194 if (!(fault -> prm .flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE )) {
159195 ret = report_partial_fault (iopf_param , fault );
160196 iopf_put_dev_fault_param (iopf_param );
161-
197+ /* A request that is not the last does not need to be ack'd */
162198 return ret ;
163199 }
164200
@@ -170,56 +206,33 @@ int iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)
170206 * will send a response to the hardware. We need to clean up before
171207 * leaving, otherwise partial faults will be stuck.
172208 */
173- domain = get_domain_for_iopf (dev , fault );
174- if (!domain ) {
175- ret = - EINVAL ;
176- goto cleanup_partial ;
177- }
178-
179- group = kzalloc (sizeof (* group ), GFP_KERNEL );
180- if (!group ) {
209+ group = iopf_group_alloc (iopf_param , evt , & abort_group );
210+ if (group == & abort_group ) {
181211 ret = - ENOMEM ;
182- goto cleanup_partial ;
212+ goto err_abort ;
183213 }
184214
185- group -> fault_param = iopf_param ;
186- group -> last_fault .fault = * fault ;
187- INIT_LIST_HEAD (& group -> faults );
188- INIT_LIST_HEAD (& group -> pending_node );
189- group -> domain = domain ;
190- list_add (& group -> last_fault .list , & group -> faults );
191-
192- /* See if we have partial faults for this group */
193- mutex_lock (& iopf_param -> lock );
194- list_for_each_entry_safe (iopf , next , & iopf_param -> partial , list ) {
195- if (iopf -> fault .prm .grpid == fault -> prm .grpid )
196- /* Insert *before* the last fault */
197- list_move (& iopf -> list , & group -> faults );
198- }
199- list_add (& group -> pending_node , & iopf_param -> faults );
200- mutex_unlock (& iopf_param -> lock );
201-
202- ret = domain -> iopf_handler (group );
203- if (ret ) {
204- mutex_lock (& iopf_param -> lock );
205- list_del_init (& group -> pending_node );
206- mutex_unlock (& iopf_param -> lock );
207- iopf_free_group (group );
215+ group -> domain = get_domain_for_iopf (dev , fault );
216+ if (!group -> domain ) {
217+ ret = - EINVAL ;
218+ goto err_abort ;
208219 }
209220
210- return ret ;
211-
212- cleanup_partial :
213- mutex_lock (& iopf_param -> lock );
214- list_for_each_entry_safe (iopf , next , & iopf_param -> partial , list ) {
215- if (iopf -> fault .prm .grpid == fault -> prm .grpid ) {
216- list_del (& iopf -> list );
217- kfree (iopf );
218- }
219- }
220- mutex_unlock (& iopf_param -> lock );
221- iopf_put_dev_fault_param (iopf_param );
221+ /*
222+ * On success iopf_handler must call iopf_group_response() and
223+ * iopf_free_group()
224+ */
225+ ret = group -> domain -> iopf_handler (group );
226+ if (ret )
227+ goto err_abort ;
228+ return 0 ;
222229
230+ err_abort :
231+ iopf_group_response (group , IOMMU_PAGE_RESP_FAILURE );
232+ if (group == & abort_group )
233+ __iopf_free_group (group );
234+ else
235+ iopf_free_group (group );
223236 return ret ;
224237}
225238EXPORT_SYMBOL_GPL (iommu_report_device_fault );
@@ -259,11 +272,9 @@ EXPORT_SYMBOL_GPL(iopf_queue_flush_dev);
259272 * iopf_group_response - Respond a group of page faults
260273 * @group: the group of faults with the same group id
261274 * @status: the response code
262- *
263- * Return 0 on success and <0 on error.
264275 */
265- int iopf_group_response (struct iopf_group * group ,
266- enum iommu_page_response_code status )
276+ void iopf_group_response (struct iopf_group * group ,
277+ enum iommu_page_response_code status )
267278{
268279 struct iommu_fault_param * fault_param = group -> fault_param ;
269280 struct iopf_fault * iopf = & group -> last_fault ;
@@ -274,17 +285,14 @@ int iopf_group_response(struct iopf_group *group,
274285 .grpid = iopf -> fault .prm .grpid ,
275286 .code = status ,
276287 };
277- int ret = - EINVAL ;
278288
279289 /* Only send response if there is a fault report pending */
280290 mutex_lock (& fault_param -> lock );
281291 if (!list_empty (& group -> pending_node )) {
282- ret = ops -> page_response (dev , & group -> last_fault , & resp );
292+ ops -> page_response (dev , & group -> last_fault , & resp );
283293 list_del_init (& group -> pending_node );
284294 }
285295 mutex_unlock (& fault_param -> lock );
286-
287- return ret ;
288296}
289297EXPORT_SYMBOL_GPL (iopf_group_response );
290298
0 commit comments