Skip to content

Commit b27d479

Browse files
committed
xen/netfront: harden netfront against event channel storms
The Xen netfront driver is still vulnerable for an attack via excessive number of events sent by the backend. Fix that by using lateeoi event channels. For being able to detect the case of no rx responses being added while the carrier is down a new lock is needed in order to update and test rsp_cons and the number of seen unconsumed responses atomically. This is part of XSA-391 Signed-off-by: Juergen Gross <jgross@suse.com> Reviewed-by: Jan Beulich <jbeulich@suse.com> --- V2: - don't eoi irq in case of interface set broken (Jan Beulich) - handle carrier off + no new responses added (Jan Beulich) V3: - add rx_ prefix to rsp_unconsumed (Jan Beulich) - correct xennet_set_rx_rsp_cons() spelling (Jan Beulich)
1 parent 0fd08a3 commit b27d479

1 file changed

Lines changed: 94 additions & 31 deletions

File tree

drivers/net/xen-netfront.c

Lines changed: 94 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -148,6 +148,9 @@ struct netfront_queue {
148148
grant_ref_t gref_rx_head;
149149
grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
150150

151+
unsigned int rx_rsp_unconsumed;
152+
spinlock_t rx_cons_lock;
153+
151154
struct page_pool *page_pool;
152155
struct xdp_rxq_info xdp_rxq;
153156
};
@@ -376,12 +379,13 @@ static int xennet_open(struct net_device *dev)
376379
return 0;
377380
}
378381

379-
static void xennet_tx_buf_gc(struct netfront_queue *queue)
382+
static bool xennet_tx_buf_gc(struct netfront_queue *queue)
380383
{
381384
RING_IDX cons, prod;
382385
unsigned short id;
383386
struct sk_buff *skb;
384387
bool more_to_do;
388+
bool work_done = false;
385389
const struct device *dev = &queue->info->netdev->dev;
386390

387391
BUG_ON(!netif_carrier_ok(queue->info->netdev));
@@ -398,6 +402,8 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
398402
for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
399403
struct xen_netif_tx_response txrsp;
400404

405+
work_done = true;
406+
401407
RING_COPY_RESPONSE(&queue->tx, cons, &txrsp);
402408
if (txrsp.status == XEN_NETIF_RSP_NULL)
403409
continue;
@@ -441,11 +447,13 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
441447

442448
xennet_maybe_wake_tx(queue);
443449

444-
return;
450+
return work_done;
445451

446452
err:
447453
queue->info->broken = true;
448454
dev_alert(dev, "Disabled for further use\n");
455+
456+
return work_done;
449457
}
450458

451459
struct xennet_gnttab_make_txreq {
@@ -834,6 +842,16 @@ static int xennet_close(struct net_device *dev)
834842
return 0;
835843
}
836844

845+
static void xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val)
846+
{
847+
unsigned long flags;
848+
849+
spin_lock_irqsave(&queue->rx_cons_lock, flags);
850+
queue->rx.rsp_cons = val;
851+
queue->rx_rsp_unconsumed = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx);
852+
spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
853+
}
854+
837855
static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb,
838856
grant_ref_t ref)
839857
{
@@ -885,7 +903,7 @@ static int xennet_get_extras(struct netfront_queue *queue,
885903
xennet_move_rx_slot(queue, skb, ref);
886904
} while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
887905

888-
queue->rx.rsp_cons = cons;
906+
xennet_set_rx_rsp_cons(queue, cons);
889907
return err;
890908
}
891909

@@ -1039,7 +1057,7 @@ static int xennet_get_responses(struct netfront_queue *queue,
10391057
}
10401058

10411059
if (unlikely(err))
1042-
queue->rx.rsp_cons = cons + slots;
1060+
xennet_set_rx_rsp_cons(queue, cons + slots);
10431061

10441062
return err;
10451063
}
@@ -1093,7 +1111,8 @@ static int xennet_fill_frags(struct netfront_queue *queue,
10931111
__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
10941112
}
10951113
if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
1096-
queue->rx.rsp_cons = ++cons + skb_queue_len(list);
1114+
xennet_set_rx_rsp_cons(queue,
1115+
++cons + skb_queue_len(list));
10971116
kfree_skb(nskb);
10981117
return -ENOENT;
10991118
}
@@ -1106,7 +1125,7 @@ static int xennet_fill_frags(struct netfront_queue *queue,
11061125
kfree_skb(nskb);
11071126
}
11081127

1109-
queue->rx.rsp_cons = cons;
1128+
xennet_set_rx_rsp_cons(queue, cons);
11101129

11111130
return 0;
11121131
}
@@ -1229,7 +1248,9 @@ static int xennet_poll(struct napi_struct *napi, int budget)
12291248

12301249
if (unlikely(xennet_set_skb_gso(skb, gso))) {
12311250
__skb_queue_head(&tmpq, skb);
1232-
queue->rx.rsp_cons += skb_queue_len(&tmpq);
1251+
xennet_set_rx_rsp_cons(queue,
1252+
queue->rx.rsp_cons +
1253+
skb_queue_len(&tmpq));
12331254
goto err;
12341255
}
12351256
}
@@ -1253,7 +1274,8 @@ static int xennet_poll(struct napi_struct *napi, int budget)
12531274

12541275
__skb_queue_tail(&rxq, skb);
12551276

1256-
i = ++queue->rx.rsp_cons;
1277+
i = queue->rx.rsp_cons + 1;
1278+
xennet_set_rx_rsp_cons(queue, i);
12571279
work_done++;
12581280
}
12591281
if (need_xdp_flush)
@@ -1417,40 +1439,79 @@ static int xennet_set_features(struct net_device *dev,
14171439
return 0;
14181440
}
14191441

1420-
static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
1442+
static bool xennet_handle_tx(struct netfront_queue *queue, unsigned int *eoi)
14211443
{
1422-
struct netfront_queue *queue = dev_id;
14231444
unsigned long flags;
14241445

1425-
if (queue->info->broken)
1426-
return IRQ_HANDLED;
1446+
if (unlikely(queue->info->broken))
1447+
return false;
14271448

14281449
spin_lock_irqsave(&queue->tx_lock, flags);
1429-
xennet_tx_buf_gc(queue);
1450+
if (xennet_tx_buf_gc(queue))
1451+
*eoi = 0;
14301452
spin_unlock_irqrestore(&queue->tx_lock, flags);
14311453

1454+
return true;
1455+
}
1456+
1457+
static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
1458+
{
1459+
unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
1460+
1461+
if (likely(xennet_handle_tx(dev_id, &eoiflag)))
1462+
xen_irq_lateeoi(irq, eoiflag);
1463+
14321464
return IRQ_HANDLED;
14331465
}
14341466

1435-
static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1467+
static bool xennet_handle_rx(struct netfront_queue *queue, unsigned int *eoi)
14361468
{
1437-
struct netfront_queue *queue = dev_id;
1438-
struct net_device *dev = queue->info->netdev;
1469+
unsigned int work_queued;
1470+
unsigned long flags;
14391471

1440-
if (queue->info->broken)
1441-
return IRQ_HANDLED;
1472+
if (unlikely(queue->info->broken))
1473+
return false;
1474+
1475+
spin_lock_irqsave(&queue->rx_cons_lock, flags);
1476+
work_queued = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx);
1477+
if (work_queued > queue->rx_rsp_unconsumed) {
1478+
queue->rx_rsp_unconsumed = work_queued;
1479+
*eoi = 0;
1480+
} else if (unlikely(work_queued < queue->rx_rsp_unconsumed)) {
1481+
const struct device *dev = &queue->info->netdev->dev;
1482+
1483+
spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
1484+
dev_alert(dev, "RX producer index going backwards\n");
1485+
dev_alert(dev, "Disabled for further use\n");
1486+
queue->info->broken = true;
1487+
return false;
1488+
}
1489+
spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
14421490

1443-
if (likely(netif_carrier_ok(dev) &&
1444-
RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)))
1491+
if (likely(netif_carrier_ok(queue->info->netdev) && work_queued))
14451492
napi_schedule(&queue->napi);
14461493

1494+
return true;
1495+
}
1496+
1497+
static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1498+
{
1499+
unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
1500+
1501+
if (likely(xennet_handle_rx(dev_id, &eoiflag)))
1502+
xen_irq_lateeoi(irq, eoiflag);
1503+
14471504
return IRQ_HANDLED;
14481505
}
14491506

14501507
static irqreturn_t xennet_interrupt(int irq, void *dev_id)
14511508
{
1452-
xennet_tx_interrupt(irq, dev_id);
1453-
xennet_rx_interrupt(irq, dev_id);
1509+
unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
1510+
1511+
if (xennet_handle_tx(dev_id, &eoiflag) &&
1512+
xennet_handle_rx(dev_id, &eoiflag))
1513+
xen_irq_lateeoi(irq, eoiflag);
1514+
14541515
return IRQ_HANDLED;
14551516
}
14561517

@@ -1768,9 +1829,10 @@ static int setup_netfront_single(struct netfront_queue *queue)
17681829
if (err < 0)
17691830
goto fail;
17701831

1771-
err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
1772-
xennet_interrupt,
1773-
0, queue->info->netdev->name, queue);
1832+
err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
1833+
xennet_interrupt, 0,
1834+
queue->info->netdev->name,
1835+
queue);
17741836
if (err < 0)
17751837
goto bind_fail;
17761838
queue->rx_evtchn = queue->tx_evtchn;
@@ -1798,18 +1860,18 @@ static int setup_netfront_split(struct netfront_queue *queue)
17981860

17991861
snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
18001862
"%s-tx", queue->name);
1801-
err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
1802-
xennet_tx_interrupt,
1803-
0, queue->tx_irq_name, queue);
1863+
err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
1864+
xennet_tx_interrupt, 0,
1865+
queue->tx_irq_name, queue);
18041866
if (err < 0)
18051867
goto bind_tx_fail;
18061868
queue->tx_irq = err;
18071869

18081870
snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
18091871
"%s-rx", queue->name);
1810-
err = bind_evtchn_to_irqhandler(queue->rx_evtchn,
1811-
xennet_rx_interrupt,
1812-
0, queue->rx_irq_name, queue);
1872+
err = bind_evtchn_to_irqhandler_lateeoi(queue->rx_evtchn,
1873+
xennet_rx_interrupt, 0,
1874+
queue->rx_irq_name, queue);
18131875
if (err < 0)
18141876
goto bind_rx_fail;
18151877
queue->rx_irq = err;
@@ -1911,6 +1973,7 @@ static int xennet_init_queue(struct netfront_queue *queue)
19111973

19121974
spin_lock_init(&queue->tx_lock);
19131975
spin_lock_init(&queue->rx_lock);
1976+
spin_lock_init(&queue->rx_cons_lock);
19141977

19151978
timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0);
19161979

0 commit comments

Comments
 (0)