Skip to content

Commit 59b3f94

Browse files
committed
Merge branch 'xsa' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip
Merge xen fixes from Juergen Gross: "Fixes for two issues related to Xen and malicious guests: - Guest can force the netback driver to hog large amounts of memory - Denial of Service in other guests due to event storms" * 'xsa' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: xen/netback: don't queue unlimited number of packages xen/netback: fix rx queue stall detection xen/console: harden hvc_xen against event channel storms xen/netfront: harden netfront against event channel storms xen/blkfront: harden blkfront against event channel storms
2 parents a7904a5 + be81992 commit 59b3f94

7 files changed

Lines changed: 190 additions & 65 deletions

File tree

drivers/block/xen-blkfront.c

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1512,9 +1512,12 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
15121512
unsigned long flags;
15131513
struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)dev_id;
15141514
struct blkfront_info *info = rinfo->dev_info;
1515+
unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
15151516

1516-
if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
1517+
if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
1518+
xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
15171519
return IRQ_HANDLED;
1520+
}
15181521

15191522
spin_lock_irqsave(&rinfo->ring_lock, flags);
15201523
again:
@@ -1530,6 +1533,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
15301533
unsigned long id;
15311534
unsigned int op;
15321535

1536+
eoiflag = 0;
1537+
15331538
RING_COPY_RESPONSE(&rinfo->ring, i, &bret);
15341539
id = bret.id;
15351540

@@ -1646,13 +1651,17 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
16461651

16471652
spin_unlock_irqrestore(&rinfo->ring_lock, flags);
16481653

1654+
xen_irq_lateeoi(irq, eoiflag);
1655+
16491656
return IRQ_HANDLED;
16501657

16511658
err:
16521659
info->connected = BLKIF_STATE_ERROR;
16531660

16541661
spin_unlock_irqrestore(&rinfo->ring_lock, flags);
16551662

1663+
/* No EOI in order to avoid further interrupts. */
1664+
16561665
pr_alert("%s disabled for further use\n", info->gd->disk_name);
16571666
return IRQ_HANDLED;
16581667
}
@@ -1692,8 +1701,8 @@ static int setup_blkring(struct xenbus_device *dev,
16921701
if (err)
16931702
goto fail;
16941703

1695-
err = bind_evtchn_to_irqhandler(rinfo->evtchn, blkif_interrupt, 0,
1696-
"blkif", rinfo);
1704+
err = bind_evtchn_to_irqhandler_lateeoi(rinfo->evtchn, blkif_interrupt,
1705+
0, "blkif", rinfo);
16971706
if (err <= 0) {
16981707
xenbus_dev_fatal(dev, err,
16991708
"bind_evtchn_to_irqhandler failed");

drivers/net/xen-netback/common.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -203,6 +203,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */
203203
unsigned int rx_queue_max;
204204
unsigned int rx_queue_len;
205205
unsigned long last_rx_time;
206+
unsigned int rx_slots_needed;
206207
bool stalled;
207208

208209
struct xenvif_copy_state rx_copy;

drivers/net/xen-netback/rx.c

Lines changed: 49 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -33,28 +33,36 @@
3333
#include <xen/xen.h>
3434
#include <xen/events.h>
3535

36-
static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
36+
/*
37+
* Update the needed ring page slots for the first SKB queued.
38+
* Note that any call sequence outside the RX thread calling this function
39+
* needs to wake up the RX thread via a call of xenvif_kick_thread()
40+
* afterwards in order to avoid a race with putting the thread to sleep.
41+
*/
42+
static void xenvif_update_needed_slots(struct xenvif_queue *queue,
43+
const struct sk_buff *skb)
3744
{
38-
RING_IDX prod, cons;
39-
struct sk_buff *skb;
40-
int needed;
41-
unsigned long flags;
42-
43-
spin_lock_irqsave(&queue->rx_queue.lock, flags);
45+
unsigned int needed = 0;
4446

45-
skb = skb_peek(&queue->rx_queue);
46-
if (!skb) {
47-
spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
48-
return false;
47+
if (skb) {
48+
needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
49+
if (skb_is_gso(skb))
50+
needed++;
51+
if (skb->sw_hash)
52+
needed++;
4953
}
5054

51-
needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
52-
if (skb_is_gso(skb))
53-
needed++;
54-
if (skb->sw_hash)
55-
needed++;
55+
WRITE_ONCE(queue->rx_slots_needed, needed);
56+
}
5657

57-
spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
58+
static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
59+
{
60+
RING_IDX prod, cons;
61+
unsigned int needed;
62+
63+
needed = READ_ONCE(queue->rx_slots_needed);
64+
if (!needed)
65+
return false;
5866

5967
do {
6068
prod = queue->rx.sring->req_prod;
@@ -80,13 +88,19 @@ void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
8088

8189
spin_lock_irqsave(&queue->rx_queue.lock, flags);
8290

83-
__skb_queue_tail(&queue->rx_queue, skb);
84-
85-
queue->rx_queue_len += skb->len;
86-
if (queue->rx_queue_len > queue->rx_queue_max) {
91+
if (queue->rx_queue_len >= queue->rx_queue_max) {
8792
struct net_device *dev = queue->vif->dev;
8893

8994
netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
95+
kfree_skb(skb);
96+
queue->vif->dev->stats.rx_dropped++;
97+
} else {
98+
if (skb_queue_empty(&queue->rx_queue))
99+
xenvif_update_needed_slots(queue, skb);
100+
101+
__skb_queue_tail(&queue->rx_queue, skb);
102+
103+
queue->rx_queue_len += skb->len;
90104
}
91105

92106
spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
@@ -100,6 +114,8 @@ static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
100114

101115
skb = __skb_dequeue(&queue->rx_queue);
102116
if (skb) {
117+
xenvif_update_needed_slots(queue, skb_peek(&queue->rx_queue));
118+
103119
queue->rx_queue_len -= skb->len;
104120
if (queue->rx_queue_len < queue->rx_queue_max) {
105121
struct netdev_queue *txq;
@@ -134,6 +150,7 @@ static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
134150
break;
135151
xenvif_rx_dequeue(queue);
136152
kfree_skb(skb);
153+
queue->vif->dev->stats.rx_dropped++;
137154
}
138155
}
139156

@@ -487,27 +504,31 @@ void xenvif_rx_action(struct xenvif_queue *queue)
487504
xenvif_rx_copy_flush(queue);
488505
}
489506

490-
static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)
507+
static RING_IDX xenvif_rx_queue_slots(const struct xenvif_queue *queue)
491508
{
492509
RING_IDX prod, cons;
493510

494511
prod = queue->rx.sring->req_prod;
495512
cons = queue->rx.req_cons;
496513

514+
return prod - cons;
515+
}
516+
517+
static bool xenvif_rx_queue_stalled(const struct xenvif_queue *queue)
518+
{
519+
unsigned int needed = READ_ONCE(queue->rx_slots_needed);
520+
497521
return !queue->stalled &&
498-
prod - cons < 1 &&
522+
xenvif_rx_queue_slots(queue) < needed &&
499523
time_after(jiffies,
500524
queue->last_rx_time + queue->vif->stall_timeout);
501525
}
502526

503527
static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
504528
{
505-
RING_IDX prod, cons;
506-
507-
prod = queue->rx.sring->req_prod;
508-
cons = queue->rx.req_cons;
529+
unsigned int needed = READ_ONCE(queue->rx_slots_needed);
509530

510-
return queue->stalled && prod - cons >= 1;
531+
return queue->stalled && xenvif_rx_queue_slots(queue) >= needed;
511532
}
512533

513534
bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread)

0 commit comments

Comments
 (0)