Skip to content

Commit 2ea0a28

Browse files
Gerhard Englederkuba-moo
authored andcommitted
tsnep: Add functions for queue enable/disable
Move queue enable and disable code to separate functions. This way the activation and deactivation of the queues are defined actions, which can be used in future execution paths. This functions will be used for the queue reconfiguration at runtime, which is necessary for XSK zero-copy support. Signed-off-by: Gerhard Engleder <gerhard@engleder-embedded.com> Reviewed-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
1 parent 33b0ee0 commit 2ea0a28

1 file changed

Lines changed: 64 additions & 33 deletions

File tree

drivers/net/ethernet/engleder/tsnep_main.c

Lines changed: 64 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -866,6 +866,24 @@ static void tsnep_rx_init(struct tsnep_rx *rx)
866866
rx->increment_owner_counter = TSNEP_RING_SIZE - 1;
867867
}
868868

869+
static void tsnep_rx_enable(struct tsnep_rx *rx)
870+
{
871+
/* descriptor properties shall be valid before hardware is notified */
872+
dma_wmb();
873+
874+
iowrite32(TSNEP_CONTROL_RX_ENABLE, rx->addr + TSNEP_CONTROL);
875+
}
876+
877+
static void tsnep_rx_disable(struct tsnep_rx *rx)
878+
{
879+
u32 val;
880+
881+
iowrite32(TSNEP_CONTROL_RX_DISABLE, rx->addr + TSNEP_CONTROL);
882+
readx_poll_timeout(ioread32, rx->addr + TSNEP_CONTROL, val,
883+
((val & TSNEP_CONTROL_RX_ENABLE) == 0), 10000,
884+
1000000);
885+
}
886+
869887
static int tsnep_rx_desc_available(struct tsnep_rx *rx)
870888
{
871889
if (rx->read <= rx->write)
@@ -932,19 +950,15 @@ static void tsnep_rx_activate(struct tsnep_rx *rx, int index)
932950
entry->desc->properties = __cpu_to_le32(entry->properties);
933951
}
934952

935-
static int tsnep_rx_refill(struct tsnep_rx *rx, int count, bool reuse)
953+
static int tsnep_rx_alloc(struct tsnep_rx *rx, int count, bool reuse)
936954
{
937-
int index;
938955
bool alloc_failed = false;
939-
bool enable = false;
940-
int i;
941-
int retval;
956+
int i, index;
942957

943958
for (i = 0; i < count && !alloc_failed; i++) {
944959
index = (rx->write + i) & TSNEP_RING_MASK;
945960

946-
retval = tsnep_rx_alloc_buffer(rx, index);
947-
if (unlikely(retval)) {
961+
if (unlikely(tsnep_rx_alloc_buffer(rx, index))) {
948962
rx->alloc_failed++;
949963
alloc_failed = true;
950964

@@ -956,22 +970,23 @@ static int tsnep_rx_refill(struct tsnep_rx *rx, int count, bool reuse)
956970
}
957971

958972
tsnep_rx_activate(rx, index);
959-
960-
enable = true;
961973
}
962974

963-
if (enable) {
975+
if (i)
964976
rx->write = (rx->write + i) & TSNEP_RING_MASK;
965977

966-
/* descriptor properties shall be valid before hardware is
967-
* notified
968-
*/
969-
dma_wmb();
978+
return i;
979+
}
970980

971-
iowrite32(TSNEP_CONTROL_RX_ENABLE, rx->addr + TSNEP_CONTROL);
972-
}
981+
static int tsnep_rx_refill(struct tsnep_rx *rx, int count, bool reuse)
982+
{
983+
int desc_refilled;
973984

974-
return i;
985+
desc_refilled = tsnep_rx_alloc(rx, count, reuse);
986+
if (desc_refilled)
987+
tsnep_rx_enable(rx);
988+
989+
return desc_refilled;
975990
}
976991

977992
static bool tsnep_xdp_run_prog(struct tsnep_rx *rx, struct bpf_prog *prog,
@@ -1199,6 +1214,7 @@ static bool tsnep_rx_pending(struct tsnep_rx *rx)
11991214

12001215
static int tsnep_rx_open(struct tsnep_rx *rx)
12011216
{
1217+
int desc_available;
12021218
int retval;
12031219

12041220
retval = tsnep_rx_ring_create(rx);
@@ -1207,20 +1223,19 @@ static int tsnep_rx_open(struct tsnep_rx *rx)
12071223

12081224
tsnep_rx_init(rx);
12091225

1210-
tsnep_rx_refill(rx, tsnep_rx_desc_available(rx), false);
1226+
desc_available = tsnep_rx_desc_available(rx);
1227+
retval = tsnep_rx_alloc(rx, desc_available, false);
1228+
if (retval != desc_available) {
1229+
tsnep_rx_ring_cleanup(rx);
1230+
1231+
return -ENOMEM;
1232+
}
12111233

12121234
return 0;
12131235
}
12141236

12151237
static void tsnep_rx_close(struct tsnep_rx *rx)
12161238
{
1217-
u32 val;
1218-
1219-
iowrite32(TSNEP_CONTROL_RX_DISABLE, rx->addr + TSNEP_CONTROL);
1220-
readx_poll_timeout(ioread32, rx->addr + TSNEP_CONTROL, val,
1221-
((val & TSNEP_CONTROL_RX_ENABLE) == 0), 10000,
1222-
1000000);
1223-
12241239
tsnep_rx_ring_cleanup(rx);
12251240
}
12261241

@@ -1377,6 +1392,27 @@ static int tsnep_queue_open(struct tsnep_adapter *adapter,
13771392
return retval;
13781393
}
13791394

1395+
static void tsnep_queue_enable(struct tsnep_queue *queue)
1396+
{
1397+
napi_enable(&queue->napi);
1398+
tsnep_enable_irq(queue->adapter, queue->irq_mask);
1399+
1400+
if (queue->rx)
1401+
tsnep_rx_enable(queue->rx);
1402+
}
1403+
1404+
static void tsnep_queue_disable(struct tsnep_queue *queue)
1405+
{
1406+
napi_disable(&queue->napi);
1407+
tsnep_disable_irq(queue->adapter, queue->irq_mask);
1408+
1409+
/* disable RX after NAPI polling has been disabled, because RX can be
1410+
* enabled during NAPI polling
1411+
*/
1412+
if (queue->rx)
1413+
tsnep_rx_disable(queue->rx);
1414+
}
1415+
13801416
static int tsnep_netdev_open(struct net_device *netdev)
13811417
{
13821418
struct tsnep_adapter *adapter = netdev_priv(netdev);
@@ -1413,11 +1449,8 @@ static int tsnep_netdev_open(struct net_device *netdev)
14131449
if (retval)
14141450
goto phy_failed;
14151451

1416-
for (i = 0; i < adapter->num_queues; i++) {
1417-
napi_enable(&adapter->queue[i].napi);
1418-
1419-
tsnep_enable_irq(adapter, adapter->queue[i].irq_mask);
1420-
}
1452+
for (i = 0; i < adapter->num_queues; i++)
1453+
tsnep_queue_enable(&adapter->queue[i]);
14211454

14221455
return 0;
14231456

@@ -1444,9 +1477,7 @@ static int tsnep_netdev_close(struct net_device *netdev)
14441477
tsnep_phy_close(adapter);
14451478

14461479
for (i = 0; i < adapter->num_queues; i++) {
1447-
tsnep_disable_irq(adapter, adapter->queue[i].irq_mask);
1448-
1449-
napi_disable(&adapter->queue[i].napi);
1480+
tsnep_queue_disable(&adapter->queue[i]);
14501481

14511482
tsnep_queue_close(&adapter->queue[i], i == 0);
14521483

0 commit comments

Comments
 (0)