@@ -274,29 +274,13 @@ static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
274274 return tisci_rm -> tisci_udmap_ops -> tx_ch_cfg (tisci_rm -> tisci , & req );
275275}
276276
277- struct k3_udma_glue_tx_channel * k3_udma_glue_request_tx_chn (struct device * dev ,
278- const char * name , struct k3_udma_glue_tx_channel_cfg * cfg )
277+ static int
278+ k3_udma_glue_request_tx_chn_common (struct device * dev ,
279+ struct k3_udma_glue_tx_channel * tx_chn ,
280+ struct k3_udma_glue_tx_channel_cfg * cfg )
279281{
280- struct k3_udma_glue_tx_channel * tx_chn ;
281282 int ret ;
282283
283- tx_chn = devm_kzalloc (dev , sizeof (* tx_chn ), GFP_KERNEL );
284- if (!tx_chn )
285- return ERR_PTR (- ENOMEM );
286-
287- tx_chn -> common .dev = dev ;
288- tx_chn -> common .swdata_size = cfg -> swdata_size ;
289- tx_chn -> tx_pause_on_err = cfg -> tx_pause_on_err ;
290- tx_chn -> tx_filt_einfo = cfg -> tx_filt_einfo ;
291- tx_chn -> tx_filt_pswords = cfg -> tx_filt_pswords ;
292- tx_chn -> tx_supr_tdpkt = cfg -> tx_supr_tdpkt ;
293-
294- /* parse of udmap channel */
295- ret = of_k3_udma_glue_parse_chn (dev -> of_node , name ,
296- & tx_chn -> common , true);
297- if (ret )
298- goto err ;
299-
300284 tx_chn -> common .hdesc_size = cppi5_hdesc_calc_size (tx_chn -> common .epib ,
301285 tx_chn -> common .psdata_size ,
302286 tx_chn -> common .swdata_size );
@@ -312,7 +296,7 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
312296 if (IS_ERR (tx_chn -> udma_tchanx )) {
313297 ret = PTR_ERR (tx_chn -> udma_tchanx );
314298 dev_err (dev , "UDMAX tchanx get err %d\n" , ret );
315- goto err ;
299+ return ret ;
316300 }
317301 tx_chn -> udma_tchan_id = xudma_tchan_get_id (tx_chn -> udma_tchanx );
318302
@@ -325,7 +309,7 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
325309 dev_err (dev , "Channel Device registration failed %d\n" , ret );
326310 put_device (& tx_chn -> common .chan_dev );
327311 tx_chn -> common .chan_dev .parent = NULL ;
328- goto err ;
312+ return ret ;
329313 }
330314
331315 if (xudma_is_pktdma (tx_chn -> common .udmax )) {
@@ -349,7 +333,7 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
349333 & tx_chn -> ringtxcq );
350334 if (ret ) {
351335 dev_err (dev , "Failed to get TX/TXCQ rings %d\n" , ret );
352- goto err ;
336+ return ret ;
353337 }
354338
355339 /* Set the dma_dev for the rings to be configured */
@@ -365,13 +349,13 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
365349 ret = k3_ringacc_ring_cfg (tx_chn -> ringtx , & cfg -> tx_cfg );
366350 if (ret ) {
367351 dev_err (dev , "Failed to cfg ringtx %d\n" , ret );
368- goto err ;
352+ return ret ;
369353 }
370354
371355 ret = k3_ringacc_ring_cfg (tx_chn -> ringtxcq , & cfg -> txcq_cfg );
372356 if (ret ) {
373357 dev_err (dev , "Failed to cfg ringtx %d\n" , ret );
374- goto err ;
358+ return ret ;
375359 }
376360
377361 /* request and cfg psi-l */
@@ -382,11 +366,42 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
382366 ret = k3_udma_glue_cfg_tx_chn (tx_chn );
383367 if (ret ) {
384368 dev_err (dev , "Failed to cfg tchan %d\n" , ret );
385- goto err ;
369+ return ret ;
386370 }
387371
388372 k3_udma_glue_dump_tx_chn (tx_chn );
389373
374+ return 0 ;
375+ }
376+
377+ struct k3_udma_glue_tx_channel *
378+ k3_udma_glue_request_tx_chn (struct device * dev , const char * name ,
379+ struct k3_udma_glue_tx_channel_cfg * cfg )
380+ {
381+ struct k3_udma_glue_tx_channel * tx_chn ;
382+ int ret ;
383+
384+ tx_chn = devm_kzalloc (dev , sizeof (* tx_chn ), GFP_KERNEL );
385+ if (!tx_chn )
386+ return ERR_PTR (- ENOMEM );
387+
388+ tx_chn -> common .dev = dev ;
389+ tx_chn -> common .swdata_size = cfg -> swdata_size ;
390+ tx_chn -> tx_pause_on_err = cfg -> tx_pause_on_err ;
391+ tx_chn -> tx_filt_einfo = cfg -> tx_filt_einfo ;
392+ tx_chn -> tx_filt_pswords = cfg -> tx_filt_pswords ;
393+ tx_chn -> tx_supr_tdpkt = cfg -> tx_supr_tdpkt ;
394+
395+ /* parse of udmap channel */
396+ ret = of_k3_udma_glue_parse_chn (dev -> of_node , name ,
397+ & tx_chn -> common , true);
398+ if (ret )
399+ goto err ;
400+
401+ ret = k3_udma_glue_request_tx_chn_common (dev , tx_chn , cfg );
402+ if (ret )
403+ goto err ;
404+
390405 return tx_chn ;
391406
392407err :
@@ -395,6 +410,41 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
395410}
396411EXPORT_SYMBOL_GPL (k3_udma_glue_request_tx_chn );
397412
413+ struct k3_udma_glue_tx_channel *
414+ k3_udma_glue_request_tx_chn_for_thread_id (struct device * dev ,
415+ struct k3_udma_glue_tx_channel_cfg * cfg ,
416+ struct device_node * udmax_np , u32 thread_id )
417+ {
418+ struct k3_udma_glue_tx_channel * tx_chn ;
419+ int ret ;
420+
421+ tx_chn = devm_kzalloc (dev , sizeof (* tx_chn ), GFP_KERNEL );
422+ if (!tx_chn )
423+ return ERR_PTR (- ENOMEM );
424+
425+ tx_chn -> common .dev = dev ;
426+ tx_chn -> common .swdata_size = cfg -> swdata_size ;
427+ tx_chn -> tx_pause_on_err = cfg -> tx_pause_on_err ;
428+ tx_chn -> tx_filt_einfo = cfg -> tx_filt_einfo ;
429+ tx_chn -> tx_filt_pswords = cfg -> tx_filt_pswords ;
430+ tx_chn -> tx_supr_tdpkt = cfg -> tx_supr_tdpkt ;
431+
432+ ret = of_k3_udma_glue_parse_chn_by_id (udmax_np , & tx_chn -> common , true, thread_id );
433+ if (ret )
434+ goto err ;
435+
436+ ret = k3_udma_glue_request_tx_chn_common (dev , tx_chn , cfg );
437+ if (ret )
438+ goto err ;
439+
440+ return tx_chn ;
441+
442+ err :
443+ k3_udma_glue_release_tx_chn (tx_chn );
444+ return ERR_PTR (ret );
445+ }
446+ EXPORT_SYMBOL_GPL (k3_udma_glue_request_tx_chn_for_thread_id );
447+
398448void k3_udma_glue_release_tx_chn (struct k3_udma_glue_tx_channel * tx_chn )
399449{
400450 if (tx_chn -> psil_paired ) {
0 commit comments