Skip to content

Commit c011bb6

Browse files
mikechristiemstsirkin
authored andcommitted
vhost: dynamically allocate vhost_worker
This patchset allows us to allocate multiple workers, so this has us move from the vhost_worker that's embedded in the vhost_dev to dynamically allocating it. Signed-off-by: Mike Christie <michael.christie@oracle.com> Message-Id: <20230626232307.97930-3-michael.christie@oracle.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
1 parent 3e11c6e commit c011bb6

2 files changed

Lines changed: 45 additions & 25 deletions

File tree

drivers/vhost/vhost.c

Lines changed: 43 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -235,36 +235,40 @@ void vhost_dev_flush(struct vhost_dev *dev)
235235
{
236236
struct vhost_flush_struct flush;
237237

238-
if (dev->worker.vtsk) {
239-
init_completion(&flush.wait_event);
240-
vhost_work_init(&flush.work, vhost_flush_work);
238+
init_completion(&flush.wait_event);
239+
vhost_work_init(&flush.work, vhost_flush_work);
241240

242-
vhost_work_queue(dev, &flush.work);
241+
if (vhost_work_queue(dev, &flush.work))
243242
wait_for_completion(&flush.wait_event);
244-
}
245243
}
246244
EXPORT_SYMBOL_GPL(vhost_dev_flush);
247245

248-
void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
246+
bool vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
249247
{
250-
if (!dev->worker.vtsk)
251-
return;
252-
248+
if (!dev->worker)
249+
return false;
250+
/*
251+
* vsock can queue while we do a VHOST_SET_OWNER, so we have a smp_wmb
252+
* when setting up the worker. We don't have a smp_rmb here because
253+
* test_and_set_bit gives us a mb already.
254+
*/
253255
if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) {
254256
/* We can only add the work to the list after we're
255257
* sure it was not in the list.
256258
* test_and_set_bit() implies a memory barrier.
257259
*/
258-
llist_add(&work->node, &dev->worker.work_list);
259-
vhost_task_wake(dev->worker.vtsk);
260+
llist_add(&work->node, &dev->worker->work_list);
261+
vhost_task_wake(dev->worker->vtsk);
260262
}
263+
264+
return true;
261265
}
262266
EXPORT_SYMBOL_GPL(vhost_work_queue);
263267

264268
/* A lockless hint for busy polling code to exit the loop */
265269
bool vhost_has_work(struct vhost_dev *dev)
266270
{
267-
return !llist_empty(&dev->worker.work_list);
271+
return !llist_empty(&dev->worker->work_list);
268272
}
269273
EXPORT_SYMBOL_GPL(vhost_has_work);
270274

@@ -458,8 +462,7 @@ void vhost_dev_init(struct vhost_dev *dev,
458462
dev->umem = NULL;
459463
dev->iotlb = NULL;
460464
dev->mm = NULL;
461-
memset(&dev->worker, 0, sizeof(dev->worker));
462-
init_llist_head(&dev->worker.work_list);
465+
dev->worker = NULL;
463466
dev->iov_limit = iov_limit;
464467
dev->weight = weight;
465468
dev->byte_weight = byte_weight;
@@ -533,30 +536,47 @@ static void vhost_detach_mm(struct vhost_dev *dev)
533536

534537
static void vhost_worker_free(struct vhost_dev *dev)
535538
{
536-
if (!dev->worker.vtsk)
539+
if (!dev->worker)
537540
return;
538541

539-
WARN_ON(!llist_empty(&dev->worker.work_list));
540-
vhost_task_stop(dev->worker.vtsk);
541-
dev->worker.kcov_handle = 0;
542-
dev->worker.vtsk = NULL;
542+
WARN_ON(!llist_empty(&dev->worker->work_list));
543+
vhost_task_stop(dev->worker->vtsk);
544+
kfree(dev->worker);
545+
dev->worker = NULL;
543546
}
544547

545548
static int vhost_worker_create(struct vhost_dev *dev)
546549
{
550+
struct vhost_worker *worker;
547551
struct vhost_task *vtsk;
548552
char name[TASK_COMM_LEN];
549553

554+
worker = kzalloc(sizeof(*worker), GFP_KERNEL_ACCOUNT);
555+
if (!worker)
556+
return -ENOMEM;
557+
550558
snprintf(name, sizeof(name), "vhost-%d", current->pid);
551559

552-
vtsk = vhost_task_create(vhost_worker, &dev->worker, name);
560+
vtsk = vhost_task_create(vhost_worker, worker, name);
553561
if (!vtsk)
554-
return -ENOMEM;
562+
goto free_worker;
563+
564+
init_llist_head(&worker->work_list);
565+
worker->kcov_handle = kcov_common_handle();
566+
worker->vtsk = vtsk;
567+
/*
568+
* vsock can already try to queue so make sure llist and vtsk are both
569+
* set before vhost_work_queue sees dev->worker is set.
570+
*/
571+
smp_wmb();
572+
dev->worker = worker;
555573

556-
dev->worker.kcov_handle = kcov_common_handle();
557-
dev->worker.vtsk = vtsk;
558574
vhost_task_start(vtsk);
559575
return 0;
576+
577+
free_worker:
578+
kfree(worker);
579+
return -ENOMEM;
560580
}
561581

562582
/* Caller should have device mutex */

drivers/vhost/vhost.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ struct vhost_poll {
4444
};
4545

4646
void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn);
47-
void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work);
47+
bool vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work);
4848
bool vhost_has_work(struct vhost_dev *dev);
4949

5050
void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
@@ -158,7 +158,7 @@ struct vhost_dev {
158158
struct vhost_virtqueue **vqs;
159159
int nvqs;
160160
struct eventfd_ctx *log_ctx;
161-
struct vhost_worker worker;
161+
struct vhost_worker *worker;
162162
struct vhost_iotlb *umem;
163163
struct vhost_iotlb *iotlb;
164164
spinlock_t iotlb_lock;

0 commit comments

Comments
 (0)