1- // SPDX-License-Identifier: ISC
1+ // SPDX-License-Identifier: BSD-3-Clause-Clear
22/*
33 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
44 */
77#include "mt76.h"
88#include "dma.h"
99
10- #if IS_ENABLED (CONFIG_NET_MEDIATEK_SOC_WED )
11-
12- #define Q_READ (_q , _field ) ({ \
13- u32 _offset = offsetof(struct mt76_queue_regs, _field); \
14- u32 _val; \
15- if ((_q)->flags & MT_QFLAG_WED) \
16- _val = mtk_wed_device_reg_read((_q)->wed, \
17- ((_q)->wed_regs + \
18- _offset)); \
19- else \
20- _val = readl(&(_q)->regs->_field); \
21- _val; \
22- })
23-
24- #define Q_WRITE (_q , _field , _val ) do { \
25- u32 _offset = offsetof(struct mt76_queue_regs, _field); \
26- if ((_q)->flags & MT_QFLAG_WED) \
27- mtk_wed_device_reg_write((_q)->wed, \
28- ((_q)->wed_regs + _offset), \
29- _val); \
30- else \
31- writel(_val, &(_q)->regs->_field); \
32- } while (0)
33-
34- #else
35-
36- #define Q_READ (_q , _field ) readl(&(_q)->regs->_field)
37- #define Q_WRITE (_q , _field , _val ) writel(_val, &(_q)->regs->_field)
38-
39- #endif
40-
4110static struct mt76_txwi_cache *
4211mt76_alloc_txwi (struct mt76_dev * dev )
4312{
@@ -220,10 +189,15 @@ static void
220189mt76_dma_sync_idx (struct mt76_dev * dev , struct mt76_queue * q )
221190{
222191 Q_WRITE (q , desc_base , q -> desc_dma );
223- if (q -> flags & MT_QFLAG_WED_RRO_EN )
192+ if (( q -> flags & MT_QFLAG_WED_RRO_EN ) && ! mt76_npu_device_active ( dev ) )
224193 Q_WRITE (q , ring_size , MT_DMA_RRO_EN | q -> ndesc );
225194 else
226195 Q_WRITE (q , ring_size , q -> ndesc );
196+
197+ if (mt76_queue_is_npu_tx (q )) {
198+ writel (q -> desc_dma , & q -> regs -> desc_base );
199+ writel (q -> ndesc , & q -> regs -> ring_size );
200+ }
227201 q -> head = Q_READ (q , dma_idx );
228202 q -> tail = q -> head ;
229203}
@@ -235,7 +209,7 @@ void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
235209 return ;
236210
237211 if (!mt76_queue_is_wed_rro_ind (q ) &&
238- !mt76_queue_is_wed_rro_rxdmad_c (q )) {
212+ !mt76_queue_is_wed_rro_rxdmad_c (q ) && ! mt76_queue_is_npu ( q ) ) {
239213 int i ;
240214
241215 /* clear descriptors */
@@ -446,6 +420,7 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
446420
447421 while (q -> queued > 0 && q -> tail != last ) {
448422 mt76_dma_tx_cleanup_idx (dev , q , q -> tail , & entry );
423+ mt76_npu_txdesc_cleanup (q , q -> tail );
449424 mt76_queue_tx_complete (dev , q , & entry );
450425
451426 if (entry .txwi ) {
@@ -680,6 +655,10 @@ mt76_dma_tx_queue_skb(struct mt76_phy *phy, struct mt76_queue *q,
680655 if (test_bit (MT76_RESET , & phy -> state ))
681656 goto free_skb ;
682657
658+ /* TODO: Take into account unlinear skbs */
659+ if (mt76_npu_device_active (dev ) && skb_linearize (skb ))
660+ goto free_skb ;
661+
683662 t = mt76_get_txwi (dev );
684663 if (!t )
685664 goto free_skb ;
@@ -727,6 +706,9 @@ mt76_dma_tx_queue_skb(struct mt76_phy *phy, struct mt76_queue *q,
727706 if (ret < 0 )
728707 goto unmap ;
729708
709+ if (mt76_npu_device_active (dev ))
710+ return mt76_npu_dma_add_buf (phy , q , skb , & tx_info .buf [1 ], txwi );
711+
730712 return mt76_dma_add_buf (dev , q , tx_info .buf , tx_info .nbuf ,
731713 tx_info .info , tx_info .skb , t );
732714
@@ -825,9 +807,17 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
825807 q -> ndesc = n_desc ;
826808 q -> buf_size = bufsize ;
827809 q -> hw_idx = idx ;
810+ q -> dev = dev ;
811+
812+ if (mt76_queue_is_wed_rro_ind (q ))
813+ size = sizeof (struct mt76_wed_rro_desc );
814+ else if (mt76_queue_is_npu_tx (q ))
815+ size = sizeof (struct airoha_npu_tx_dma_desc );
816+ else if (mt76_queue_is_npu_rx (q ))
817+ size = sizeof (struct airoha_npu_rx_dma_desc );
818+ else
819+ size = sizeof (struct mt76_desc );
828820
829- size = mt76_queue_is_wed_rro_ind (q ) ? sizeof (struct mt76_wed_rro_desc )
830- : sizeof (struct mt76_desc );
831821 q -> desc = dmam_alloc_coherent (dev -> dma_dev , q -> ndesc * size ,
832822 & q -> desc_dma , GFP_KERNEL );
833823 if (!q -> desc )
@@ -843,6 +833,7 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
843833 if (ret )
844834 return ret ;
845835
836+ mt76_npu_queue_setup (dev , q );
846837 ret = mt76_wed_dma_setup (dev , q , false);
847838 if (ret )
848839 return ret ;
@@ -870,6 +861,11 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
870861 if (!q -> ndesc )
871862 return ;
872863
864+ if (mt76_queue_is_npu (q )) {
865+ mt76_npu_queue_cleanup (dev , q );
866+ return ;
867+ }
868+
873869 do {
874870 spin_lock_bh (& q -> lock );
875871 buf = mt76_dma_dequeue (dev , q , true, NULL , NULL , & more , NULL );
@@ -900,7 +896,7 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
900896 return ;
901897
902898 if (!mt76_queue_is_wed_rro_ind (q ) &&
903- !mt76_queue_is_wed_rro_rxdmad_c (q )) {
899+ !mt76_queue_is_wed_rro_rxdmad_c (q ) && ! mt76_queue_is_npu ( q ) ) {
904900 int i ;
905901
906902 for (i = 0 ; i < q -> ndesc ; i ++ )
@@ -920,7 +916,10 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
920916 return ;
921917
922918 mt76_dma_sync_idx (dev , q );
923- mt76_dma_rx_fill_buf (dev , q , false);
919+ if (mt76_queue_is_npu (q ))
920+ mt76_npu_fill_rx_queue (dev , q );
921+ else
922+ mt76_dma_rx_fill (dev , q , false);
924923}
925924
926925static void
0 commit comments