Skip to content

Commit a772289

Browse files
huangjie.albertmstsirkin
authored andcommitted
virtio_ring : keep used_wrap_counter in vq->last_used_idx
the used_wrap_counter and the vq->last_used_idx may get out of sync if they are separate assignment,and interrupt might use an incorrect value to check for the used index. for example:OOB access ksoftirqd may consume the packet and it will call: virtnet_poll -->virtnet_receive -->virtqueue_get_buf_ctx -->virtqueue_get_buf_ctx_packed and in virtqueue_get_buf_ctx_packed: vq->last_used_idx += vq->packed.desc_state[id].num; if (unlikely(vq->last_used_idx >= vq->packed.vring.num)) { vq->last_used_idx -= vq->packed.vring.num; vq->packed.used_wrap_counter ^= 1; } if at the same time, there comes a vring interrupt,in vring_interrupt: we will call: vring_interrupt -->more_used -->more_used_packed -->is_used_desc_packed in is_used_desc_packed, the last_used_idx maybe >= vq->packed.vring.num. so this could case a memory out of bounds bug. this patch is to keep the used_wrap_counter in vq->last_used_idx so we can get the correct value to check for used index in interrupt. v3->v4: - use READ_ONCE/WRITE_ONCE to get/set vq->last_used_idx v2->v3: - add inline function to get used_wrap_counter and last_used - when use vq->last_used_idx, only read once if vq->last_used_idx is read twice, the values can be inconsistent. - use last_used_idx & ~(-(1 << VRING_PACKED_EVENT_F_WRAP_CTR)) to get the all bits below VRING_PACKED_EVENT_F_WRAP_CTR v1->v2: - reuse the VRING_PACKED_EVENT_F_WRAP_CTR - Remove parameter judgment in is_used_desc_packed, because it can't be illegal Signed-off-by: huangjie.albert <huangjie.albert@bytedance.com> Message-Id: <20220617020411.80367-1-huangjie.albert@bytedance.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
1 parent 0e0348a commit a772289

1 file changed

Lines changed: 47 additions & 28 deletions

File tree

drivers/virtio/virtio_ring.c

Lines changed: 47 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -111,7 +111,12 @@ struct vring_virtqueue {
111111
/* Number we've added since last sync. */
112112
unsigned int num_added;
113113

114-
/* Last used index we've seen. */
114+
/* Last used index we've seen.
115+
* for split ring, it just contains last used index
116+
* for packed ring:
117+
* bits up to VRING_PACKED_EVENT_F_WRAP_CTR include the last used index.
118+
* bits from VRING_PACKED_EVENT_F_WRAP_CTR include the used wrap counter.
119+
*/
115120
u16 last_used_idx;
116121

117122
/* Hint for event idx: already triggered no need to disable. */
@@ -154,9 +159,6 @@ struct vring_virtqueue {
154159
/* Driver ring wrap counter. */
155160
bool avail_wrap_counter;
156161

157-
/* Device ring wrap counter. */
158-
bool used_wrap_counter;
159-
160162
/* Avail used flags. */
161163
u16 avail_used_flags;
162164

@@ -973,6 +975,15 @@ static struct virtqueue *vring_create_virtqueue_split(
973975
/*
974976
* Packed ring specific functions - *_packed().
975977
*/
978+
static inline bool packed_used_wrap_counter(u16 last_used_idx)
979+
{
980+
return !!(last_used_idx & (1 << VRING_PACKED_EVENT_F_WRAP_CTR));
981+
}
982+
983+
static inline u16 packed_last_used(u16 last_used_idx)
984+
{
985+
return last_used_idx & ~(-(1 << VRING_PACKED_EVENT_F_WRAP_CTR));
986+
}
976987

977988
static void vring_unmap_extra_packed(const struct vring_virtqueue *vq,
978989
struct vring_desc_extra *extra)
@@ -1406,16 +1417,23 @@ static inline bool is_used_desc_packed(const struct vring_virtqueue *vq,
14061417

14071418
static inline bool more_used_packed(const struct vring_virtqueue *vq)
14081419
{
1409-
return is_used_desc_packed(vq, vq->last_used_idx,
1410-
vq->packed.used_wrap_counter);
1420+
u16 last_used;
1421+
u16 last_used_idx;
1422+
bool used_wrap_counter;
1423+
1424+
last_used_idx = READ_ONCE(vq->last_used_idx);
1425+
last_used = packed_last_used(last_used_idx);
1426+
used_wrap_counter = packed_used_wrap_counter(last_used_idx);
1427+
return is_used_desc_packed(vq, last_used, used_wrap_counter);
14111428
}
14121429

14131430
static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
14141431
unsigned int *len,
14151432
void **ctx)
14161433
{
14171434
struct vring_virtqueue *vq = to_vvq(_vq);
1418-
u16 last_used, id;
1435+
u16 last_used, id, last_used_idx;
1436+
bool used_wrap_counter;
14191437
void *ret;
14201438

14211439
START_USE(vq);
@@ -1434,7 +1452,9 @@ static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
14341452
/* Only get used elements after they have been exposed by host. */
14351453
virtio_rmb(vq->weak_barriers);
14361454

1437-
last_used = vq->last_used_idx;
1455+
last_used_idx = READ_ONCE(vq->last_used_idx);
1456+
used_wrap_counter = packed_used_wrap_counter(last_used_idx);
1457+
last_used = packed_last_used(last_used_idx);
14381458
id = le16_to_cpu(vq->packed.vring.desc[last_used].id);
14391459
*len = le32_to_cpu(vq->packed.vring.desc[last_used].len);
14401460

@@ -1451,12 +1471,15 @@ static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
14511471
ret = vq->packed.desc_state[id].data;
14521472
detach_buf_packed(vq, id, ctx);
14531473

1454-
vq->last_used_idx += vq->packed.desc_state[id].num;
1455-
if (unlikely(vq->last_used_idx >= vq->packed.vring.num)) {
1456-
vq->last_used_idx -= vq->packed.vring.num;
1457-
vq->packed.used_wrap_counter ^= 1;
1474+
last_used += vq->packed.desc_state[id].num;
1475+
if (unlikely(last_used >= vq->packed.vring.num)) {
1476+
last_used -= vq->packed.vring.num;
1477+
used_wrap_counter ^= 1;
14581478
}
14591479

1480+
last_used = (last_used | (used_wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR));
1481+
WRITE_ONCE(vq->last_used_idx, last_used);
1482+
14601483
/*
14611484
* If we expect an interrupt for the next entry, tell host
14621485
* by writing event index and flush out the write before
@@ -1465,9 +1488,7 @@ static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
14651488
if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC)
14661489
virtio_store_mb(vq->weak_barriers,
14671490
&vq->packed.vring.driver->off_wrap,
1468-
cpu_to_le16(vq->last_used_idx |
1469-
(vq->packed.used_wrap_counter <<
1470-
VRING_PACKED_EVENT_F_WRAP_CTR)));
1491+
cpu_to_le16(vq->last_used_idx));
14711492

14721493
LAST_ADD_TIME_INVALID(vq);
14731494

@@ -1499,9 +1520,7 @@ static unsigned int virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq)
14991520

15001521
if (vq->event) {
15011522
vq->packed.vring.driver->off_wrap =
1502-
cpu_to_le16(vq->last_used_idx |
1503-
(vq->packed.used_wrap_counter <<
1504-
VRING_PACKED_EVENT_F_WRAP_CTR));
1523+
cpu_to_le16(vq->last_used_idx);
15051524
/*
15061525
* We need to update event offset and event wrap
15071526
* counter first before updating event flags.
@@ -1518,8 +1537,7 @@ static unsigned int virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq)
15181537
}
15191538

15201539
END_USE(vq);
1521-
return vq->last_used_idx | ((u16)vq->packed.used_wrap_counter <<
1522-
VRING_PACKED_EVENT_F_WRAP_CTR);
1540+
return vq->last_used_idx;
15231541
}
15241542

15251543
static bool virtqueue_poll_packed(struct virtqueue *_vq, u16 off_wrap)
@@ -1537,7 +1555,7 @@ static bool virtqueue_poll_packed(struct virtqueue *_vq, u16 off_wrap)
15371555
static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
15381556
{
15391557
struct vring_virtqueue *vq = to_vvq(_vq);
1540-
u16 used_idx, wrap_counter;
1558+
u16 used_idx, wrap_counter, last_used_idx;
15411559
u16 bufs;
15421560

15431561
START_USE(vq);
@@ -1550,9 +1568,10 @@ static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
15501568
if (vq->event) {
15511569
/* TODO: tune this threshold */
15521570
bufs = (vq->packed.vring.num - vq->vq.num_free) * 3 / 4;
1553-
wrap_counter = vq->packed.used_wrap_counter;
1571+
last_used_idx = READ_ONCE(vq->last_used_idx);
1572+
wrap_counter = packed_used_wrap_counter(last_used_idx);
15541573

1555-
used_idx = vq->last_used_idx + bufs;
1574+
used_idx = packed_last_used(last_used_idx) + bufs;
15561575
if (used_idx >= vq->packed.vring.num) {
15571576
used_idx -= vq->packed.vring.num;
15581577
wrap_counter ^= 1;
@@ -1582,9 +1601,10 @@ static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
15821601
*/
15831602
virtio_mb(vq->weak_barriers);
15841603

1585-
if (is_used_desc_packed(vq,
1586-
vq->last_used_idx,
1587-
vq->packed.used_wrap_counter)) {
1604+
last_used_idx = READ_ONCE(vq->last_used_idx);
1605+
wrap_counter = packed_used_wrap_counter(last_used_idx);
1606+
used_idx = packed_last_used(last_used_idx);
1607+
if (is_used_desc_packed(vq, used_idx, wrap_counter)) {
15881608
END_USE(vq);
15891609
return false;
15901610
}
@@ -1689,7 +1709,7 @@ static struct virtqueue *vring_create_virtqueue_packed(
16891709
vq->notify = notify;
16901710
vq->weak_barriers = weak_barriers;
16911711
vq->broken = true;
1692-
vq->last_used_idx = 0;
1712+
vq->last_used_idx = 0 | (1 << VRING_PACKED_EVENT_F_WRAP_CTR);
16931713
vq->event_triggered = false;
16941714
vq->num_added = 0;
16951715
vq->packed_ring = true;
@@ -1720,7 +1740,6 @@ static struct virtqueue *vring_create_virtqueue_packed(
17201740

17211741
vq->packed.next_avail_idx = 0;
17221742
vq->packed.avail_wrap_counter = 1;
1723-
vq->packed.used_wrap_counter = 1;
17241743
vq->packed.event_flags_shadow = 0;
17251744
vq->packed.avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL;
17261745

0 commit comments

Comments
 (0)