@@ -272,6 +272,108 @@ static void cxl_memdev_set_qos_class(struct cxl_dev_state *cxlds,
272272 devm_add_action_or_reset (& cxlds -> cxlmd -> dev , free_perf_ents , mds );
273273}
274274
275+ static int match_cxlrd_qos_class (struct device * dev , void * data )
276+ {
277+ int dev_qos_class = * (int * )data ;
278+ struct cxl_root_decoder * cxlrd ;
279+
280+ if (!is_root_decoder (dev ))
281+ return 0 ;
282+
283+ cxlrd = to_cxl_root_decoder (dev );
284+ if (cxlrd -> qos_class == CXL_QOS_CLASS_INVALID )
285+ return 0 ;
286+
287+ if (cxlrd -> qos_class == dev_qos_class )
288+ return 1 ;
289+
290+ return 0 ;
291+ }
292+
293+ static void cxl_qos_match (struct cxl_port * root_port ,
294+ struct list_head * work_list ,
295+ struct list_head * discard_list )
296+ {
297+ struct cxl_dpa_perf * dpa_perf , * n ;
298+
299+ list_for_each_entry_safe (dpa_perf , n , work_list , list ) {
300+ int rc ;
301+
302+ if (dpa_perf -> qos_class == CXL_QOS_CLASS_INVALID )
303+ return ;
304+
305+ rc = device_for_each_child (& root_port -> dev ,
306+ (void * )& dpa_perf -> qos_class ,
307+ match_cxlrd_qos_class );
308+ if (!rc )
309+ list_move_tail (& dpa_perf -> list , discard_list );
310+ }
311+ }
312+
313+ static int match_cxlrd_hb (struct device * dev , void * data )
314+ {
315+ struct device * host_bridge = data ;
316+ struct cxl_switch_decoder * cxlsd ;
317+ struct cxl_root_decoder * cxlrd ;
318+ unsigned int seq ;
319+
320+ if (!is_root_decoder (dev ))
321+ return 0 ;
322+
323+ cxlrd = to_cxl_root_decoder (dev );
324+ cxlsd = & cxlrd -> cxlsd ;
325+
326+ do {
327+ seq = read_seqbegin (& cxlsd -> target_lock );
328+ for (int i = 0 ; i < cxlsd -> nr_targets ; i ++ ) {
329+ if (host_bridge == cxlsd -> target [i ]-> dport_dev )
330+ return 1 ;
331+ }
332+ } while (read_seqretry (& cxlsd -> target_lock , seq ));
333+
334+ return 0 ;
335+ }
336+
337+ static void discard_dpa_perf (struct list_head * list )
338+ {
339+ struct cxl_dpa_perf * dpa_perf , * n ;
340+
341+ list_for_each_entry_safe (dpa_perf , n , list , list ) {
342+ list_del (& dpa_perf -> list );
343+ kfree (dpa_perf );
344+ }
345+ }
346+ DEFINE_FREE (dpa_perf , struct list_head * , if (!list_empty (_T )) discard_dpa_perf (_T ))
347+
348+ static int cxl_qos_class_verify (struct cxl_memdev * cxlmd )
349+ {
350+ struct cxl_dev_state * cxlds = cxlmd -> cxlds ;
351+ struct cxl_memdev_state * mds = to_cxl_memdev_state (cxlds );
352+ struct cxl_port * root_port __free (put_device ) = NULL ;
353+ LIST_HEAD (__discard );
354+ struct list_head * discard __free (dpa_perf ) = & __discard ;
355+ int rc ;
356+
357+ root_port = find_cxl_root (cxlmd -> endpoint );
358+ if (!root_port )
359+ return - ENODEV ;
360+
361+ /* Check that the QTG IDs are all sane between end device and root decoders */
362+ cxl_qos_match (root_port , & mds -> ram_perf_list , discard );
363+ cxl_qos_match (root_port , & mds -> pmem_perf_list , discard );
364+
365+ /* Check to make sure that the device's host bridge is under a root decoder */
366+ rc = device_for_each_child (& root_port -> dev ,
367+ (void * )cxlmd -> endpoint -> host_bridge ,
368+ match_cxlrd_hb );
369+ if (!rc ) {
370+ list_splice_tail_init (& mds -> ram_perf_list , discard );
371+ list_splice_tail_init (& mds -> pmem_perf_list , discard );
372+ }
373+
374+ return rc ;
375+ }
376+
275377static void discard_dsmas (struct xarray * xa )
276378{
277379 unsigned long index ;
@@ -310,6 +412,7 @@ void cxl_endpoint_parse_cdat(struct cxl_port *port)
310412 }
311413
312414 cxl_memdev_set_qos_class (cxlds , dsmas_xa );
415+ cxl_qos_class_verify (cxlmd );
313416}
314417EXPORT_SYMBOL_NS_GPL (cxl_endpoint_parse_cdat , CXL );
315418
0 commit comments