Skip to content

Commit b35507a

Browse files
kot-begemot-ukrichardweinberger
authored andcommitted
um: Migrate vector drivers to NAPI
Migrate UML vector drivers from a bespoke scheduling mechanism to NAPI. Signed-off-by: Anton Ivanov <anton.ivanov@cambridgegreys.com> Signed-off-by: Richard Weinberger <richard@nod.at>
1 parent 39508aa commit b35507a

2 files changed

Lines changed: 51 additions & 57 deletions

File tree

arch/um/drivers/vector_kern.c

Lines changed: 49 additions & 56 deletions
Original file line numberDiff line numberDiff line change
@@ -67,6 +67,7 @@ static LIST_HEAD(vector_devices);
6767
static int driver_registered;
6868

6969
static void vector_eth_configure(int n, struct arglist *def);
70+
static int vector_mmsg_rx(struct vector_private *vp, int budget);
7071

7172
/* Argument accessors to set variables (and/or set default values)
7273
* mtu, buffer sizing, default headroom, etc
@@ -77,7 +78,6 @@ static void vector_eth_configure(int n, struct arglist *def);
7778
#define DEFAULT_VECTOR_SIZE 64
7879
#define TX_SMALL_PACKET 128
7980
#define MAX_IOV_SIZE (MAX_SKB_FRAGS + 1)
80-
#define MAX_ITERATIONS 64
8181

8282
static const struct {
8383
const char string[ETH_GSTRING_LEN];
@@ -458,7 +458,6 @@ static int vector_send(struct vector_queue *qi)
458458
vp->estats.tx_queue_running_average =
459459
(vp->estats.tx_queue_running_average + result) >> 1;
460460
}
461-
netif_trans_update(qi->dev);
462461
netif_wake_queue(qi->dev);
463462
/* if TX is busy, break out of the send loop,
464463
* poll write IRQ will reschedule xmit for us
@@ -470,8 +469,6 @@ static int vector_send(struct vector_queue *qi)
470469
}
471470
}
472471
spin_unlock(&qi->head_lock);
473-
} else {
474-
tasklet_schedule(&vp->tx_poll);
475472
}
476473
return queue_depth;
477474
}
@@ -608,7 +605,7 @@ static struct vector_queue *create_queue(
608605

609606
/*
610607
* We do not use the RX queue as a proper wraparound queue for now
611-
* This is not necessary because the consumption via netif_rx()
608+
* This is not necessary because the consumption via napi_gro_receive()
612609
* happens in-line. While we can try using the return code of
613610
* netif_rx() for flow control there are no drivers doing this today.
614611
* For this RX specific use we ignore the tail/head locks and
@@ -896,7 +893,7 @@ static int vector_legacy_rx(struct vector_private *vp)
896893
skb->protocol = eth_type_trans(skb, skb->dev);
897894
vp->dev->stats.rx_bytes += skb->len;
898895
vp->dev->stats.rx_packets++;
899-
netif_rx(skb);
896+
napi_gro_receive(&vp->napi, skb);
900897
} else {
901898
dev_kfree_skb_irq(skb);
902899
}
@@ -955,7 +952,7 @@ static int writev_tx(struct vector_private *vp, struct sk_buff *skb)
955952
* mmsg vector matched to an skb vector which we prepared earlier.
956953
*/
957954

958-
static int vector_mmsg_rx(struct vector_private *vp)
955+
static int vector_mmsg_rx(struct vector_private *vp, int budget)
959956
{
960957
int packet_count, i;
961958
struct vector_queue *qi = vp->rx_queue;
@@ -972,6 +969,9 @@ static int vector_mmsg_rx(struct vector_private *vp)
972969

973970
/* Fire the Lazy Gun - get as many packets as we can in one go. */
974971

972+
if (budget > qi->max_depth)
973+
budget = qi->max_depth;
974+
975975
packet_count = uml_vector_recvmmsg(
976976
vp->fds->rx_fd, qi->mmsg_vector, qi->max_depth, 0);
977977

@@ -1021,7 +1021,7 @@ static int vector_mmsg_rx(struct vector_private *vp)
10211021
*/
10221022
vp->dev->stats.rx_bytes += skb->len;
10231023
vp->dev->stats.rx_packets++;
1024-
netif_rx(skb);
1024+
napi_gro_receive(&vp->napi, skb);
10251025
} else {
10261026
/* Overlay header too short to do anything - discard.
10271027
* We can actually keep this skb and reuse it,
@@ -1044,23 +1044,6 @@ static int vector_mmsg_rx(struct vector_private *vp)
10441044
return packet_count;
10451045
}
10461046

1047-
static void vector_rx(struct vector_private *vp)
1048-
{
1049-
int err;
1050-
int iter = 0;
1051-
1052-
if ((vp->options & VECTOR_RX) > 0)
1053-
while (((err = vector_mmsg_rx(vp)) > 0) && (iter < MAX_ITERATIONS))
1054-
iter++;
1055-
else
1056-
while (((err = vector_legacy_rx(vp)) > 0) && (iter < MAX_ITERATIONS))
1057-
iter++;
1058-
if ((err != 0) && net_ratelimit())
1059-
netdev_err(vp->dev, "vector_rx: error(%d)\n", err);
1060-
if (iter == MAX_ITERATIONS)
1061-
netdev_err(vp->dev, "vector_rx: device stuck, remote end may have closed the connection\n");
1062-
}
1063-
10641047
static int vector_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
10651048
{
10661049
struct vector_private *vp = netdev_priv(dev);
@@ -1085,25 +1068,15 @@ static int vector_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
10851068
netdev_sent_queue(vp->dev, skb->len);
10861069
queue_depth = vector_enqueue(vp->tx_queue, skb);
10871070

1088-
/* if the device queue is full, stop the upper layers and
1089-
* flush it.
1090-
*/
1091-
1092-
if (queue_depth >= vp->tx_queue->max_depth - 1) {
1093-
vp->estats.tx_kicks++;
1094-
netif_stop_queue(dev);
1095-
vector_send(vp->tx_queue);
1096-
return NETDEV_TX_OK;
1097-
}
1098-
if (netdev_xmit_more()) {
1071+
if (queue_depth < vp->tx_queue->max_depth && netdev_xmit_more()) {
10991072
mod_timer(&vp->tl, vp->coalesce);
11001073
return NETDEV_TX_OK;
1074+
} else {
1075+
queue_depth = vector_send(vp->tx_queue);
1076+
if (queue_depth > 0)
1077+
napi_schedule(&vp->napi);
11011078
}
1102-
if (skb->len < TX_SMALL_PACKET) {
1103-
vp->estats.tx_kicks++;
1104-
vector_send(vp->tx_queue);
1105-
} else
1106-
tasklet_schedule(&vp->tx_poll);
1079+
11071080
return NETDEV_TX_OK;
11081081
}
11091082

@@ -1114,7 +1087,7 @@ static irqreturn_t vector_rx_interrupt(int irq, void *dev_id)
11141087

11151088
if (!netif_running(dev))
11161089
return IRQ_NONE;
1117-
vector_rx(vp);
1090+
napi_schedule(&vp->napi);
11181091
return IRQ_HANDLED;
11191092

11201093
}
@@ -1133,8 +1106,7 @@ static irqreturn_t vector_tx_interrupt(int irq, void *dev_id)
11331106
* tweaking the IRQ mask less costly
11341107
*/
11351108

1136-
if (vp->in_write_poll)
1137-
tasklet_schedule(&vp->tx_poll);
1109+
napi_schedule(&vp->napi);
11381110
return IRQ_HANDLED;
11391111

11401112
}
@@ -1161,7 +1133,8 @@ static int vector_net_close(struct net_device *dev)
11611133
um_free_irq(vp->tx_irq, dev);
11621134
vp->tx_irq = 0;
11631135
}
1164-
tasklet_kill(&vp->tx_poll);
1136+
napi_disable(&vp->napi);
1137+
netif_napi_del(&vp->napi);
11651138
if (vp->fds->rx_fd > 0) {
11661139
if (vp->bpf)
11671140
uml_vector_detach_bpf(vp->fds->rx_fd, vp->bpf);
@@ -1193,15 +1166,32 @@ static int vector_net_close(struct net_device *dev)
11931166
return 0;
11941167
}
11951168

1196-
/* TX tasklet */
1197-
1198-
static void vector_tx_poll(struct tasklet_struct *t)
1169+
static int vector_poll(struct napi_struct *napi, int budget)
11991170
{
1200-
struct vector_private *vp = from_tasklet(vp, t, tx_poll);
1171+
struct vector_private *vp = container_of(napi, struct vector_private, napi);
1172+
int work_done = 0;
1173+
int err;
1174+
bool tx_enqueued = false;
12011175

1202-
vp->estats.tx_kicks++;
1203-
vector_send(vp->tx_queue);
1176+
if ((vp->options & VECTOR_TX) != 0)
1177+
tx_enqueued = (vector_send(vp->tx_queue) > 0);
1178+
if ((vp->options & VECTOR_RX) > 0)
1179+
err = vector_mmsg_rx(vp, budget);
1180+
else {
1181+
err = vector_legacy_rx(vp);
1182+
if (err > 0)
1183+
err = 1;
1184+
}
1185+
if (err > 0)
1186+
work_done += err;
1187+
1188+
if (tx_enqueued || err > 0)
1189+
napi_schedule(napi);
1190+
if (work_done < budget)
1191+
napi_complete_done(napi, work_done);
1192+
return work_done;
12041193
}
1194+
12051195
static void vector_reset_tx(struct work_struct *work)
12061196
{
12071197
struct vector_private *vp =
@@ -1265,6 +1255,9 @@ static int vector_net_open(struct net_device *dev)
12651255
goto out_close;
12661256
}
12671257

1258+
netif_napi_add(vp->dev, &vp->napi, vector_poll, get_depth(vp->parsed));
1259+
napi_enable(&vp->napi);
1260+
12681261
/* READ IRQ */
12691262
err = um_request_irq(
12701263
irq_rr + VECTOR_BASE_IRQ, vp->fds->rx_fd,
@@ -1306,15 +1299,15 @@ static int vector_net_open(struct net_device *dev)
13061299
uml_vector_attach_bpf(vp->fds->rx_fd, vp->bpf);
13071300

13081301
netif_start_queue(dev);
1302+
vector_reset_stats(vp);
13091303

13101304
/* clear buffer - it can happen that the host side of the interface
13111305
* is full when we get here. In this case, new data is never queued,
13121306
* SIGIOs never arrive, and the net never works.
13131307
*/
13141308

1315-
vector_rx(vp);
1309+
napi_schedule(&vp->napi);
13161310

1317-
vector_reset_stats(vp);
13181311
vdevice = find_device(vp->unit);
13191312
vdevice->opened = 1;
13201313

@@ -1543,15 +1536,16 @@ static const struct net_device_ops vector_netdev_ops = {
15431536
#endif
15441537
};
15451538

1546-
15471539
static void vector_timer_expire(struct timer_list *t)
15481540
{
15491541
struct vector_private *vp = from_timer(vp, t, tl);
15501542

15511543
vp->estats.tx_kicks++;
1552-
vector_send(vp->tx_queue);
1544+
napi_schedule(&vp->napi);
15531545
}
15541546

1547+
1548+
15551549
static void vector_eth_configure(
15561550
int n,
15571551
struct arglist *def
@@ -1634,7 +1628,6 @@ static void vector_eth_configure(
16341628
});
16351629

16361630
dev->features = dev->hw_features = (NETIF_F_SG | NETIF_F_FRAGLIST);
1637-
tasklet_setup(&vp->tx_poll, vector_tx_poll);
16381631
INIT_WORK(&vp->reset_tx, vector_reset_tx);
16391632

16401633
timer_setup(&vp->tl, vector_timer_expire, 0);

arch/um/drivers/vector_kern.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
#include <linux/ctype.h>
1515
#include <linux/workqueue.h>
1616
#include <linux/interrupt.h>
17+
1718
#include "vector_user.h"
1819

1920
/* Queue structure specially adapted for multiple enqueue/dequeue
@@ -72,6 +73,7 @@ struct vector_private {
7273
struct list_head list;
7374
spinlock_t lock;
7475
struct net_device *dev;
76+
struct napi_struct napi ____cacheline_aligned;
7577

7678
int unit;
7779

@@ -115,7 +117,6 @@ struct vector_private {
115117

116118
spinlock_t stats_lock;
117119

118-
struct tasklet_struct tx_poll;
119120
bool rexmit_scheduled;
120121
bool opened;
121122
bool in_write_poll;

0 commit comments

Comments
 (0)