Skip to content

Commit fa56d17

Browse files
jasowangmstsirkin
authored andcommitted
virtio_ring: factor out core logic for updating last_used_idx
Factor out the core logic for updating last_used_idx to be reused by the packed in order implementation. Acked-by: Eugenio Pérez <eperezma@redhat.com> Reviewed-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com> Signed-off-by: Jason Wang <jasowang@redhat.com> Tested-by: Lei Yang <leiyang@redhat.com> Reviewed-by: Eugenio Pérez <eperezma@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Message-Id: <20251230064649.55597-17-jasowang@redhat.com>
1 parent c623106 commit fa56d17

1 file changed

Lines changed: 25 additions & 18 deletions

File tree

drivers/virtio/virtio_ring.c

Lines changed: 25 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -1754,6 +1754,30 @@ static bool more_used_packed(const struct vring_virtqueue *vq)
17541754
return virtqueue_poll_packed(vq, READ_ONCE(vq->last_used_idx));
17551755
}
17561756

1757+
static void update_last_used_idx_packed(struct vring_virtqueue *vq,
1758+
u16 id, u16 last_used,
1759+
u16 used_wrap_counter)
1760+
{
1761+
last_used += vq->packed.desc_state[id].num;
1762+
if (unlikely(last_used >= vq->packed.vring.num)) {
1763+
last_used -= vq->packed.vring.num;
1764+
used_wrap_counter ^= 1;
1765+
}
1766+
1767+
last_used = (last_used | (used_wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR));
1768+
WRITE_ONCE(vq->last_used_idx, last_used);
1769+
1770+
/*
1771+
* If we expect an interrupt for the next entry, tell host
1772+
* by writing event index and flush out the write before
1773+
* the read in the next get_buf call.
1774+
*/
1775+
if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC)
1776+
virtio_store_mb(vq->weak_barriers,
1777+
&vq->packed.vring.driver->off_wrap,
1778+
cpu_to_le16(vq->last_used_idx));
1779+
}
1780+
17571781
static void *virtqueue_get_buf_ctx_packed(struct vring_virtqueue *vq,
17581782
unsigned int *len,
17591783
void **ctx)
@@ -1797,24 +1821,7 @@ static void *virtqueue_get_buf_ctx_packed(struct vring_virtqueue *vq,
17971821
ret = vq->packed.desc_state[id].data;
17981822
detach_buf_packed(vq, id, ctx);
17991823

1800-
last_used += vq->packed.desc_state[id].num;
1801-
if (unlikely(last_used >= vq->packed.vring.num)) {
1802-
last_used -= vq->packed.vring.num;
1803-
used_wrap_counter ^= 1;
1804-
}
1805-
1806-
last_used = (last_used | (used_wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR));
1807-
WRITE_ONCE(vq->last_used_idx, last_used);
1808-
1809-
/*
1810-
* If we expect an interrupt for the next entry, tell host
1811-
* by writing event index and flush out the write before
1812-
* the read in the next get_buf call.
1813-
*/
1814-
if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC)
1815-
virtio_store_mb(vq->weak_barriers,
1816-
&vq->packed.vring.driver->off_wrap,
1817-
cpu_to_le16(vq->last_used_idx));
1824+
update_last_used_idx_packed(vq, id, last_used, used_wrap_counter);
18181825

18191826
LAST_ADD_TIME_INVALID(vq);
18201827

0 commit comments

Comments
 (0)