@@ -235,7 +235,7 @@ void vhost_dev_flush(struct vhost_dev *dev)
235235{
236236 struct vhost_flush_struct flush ;
237237
238- if (dev -> worker ) {
238+ if (dev -> worker . vtsk ) {
239239 init_completion (& flush .wait_event );
240240 vhost_work_init (& flush .work , vhost_flush_work );
241241
@@ -247,24 +247,24 @@ EXPORT_SYMBOL_GPL(vhost_dev_flush);
247247
248248void vhost_work_queue (struct vhost_dev * dev , struct vhost_work * work )
249249{
250- if (!dev -> worker )
250+ if (!dev -> worker . vtsk )
251251 return ;
252252
253253 if (!test_and_set_bit (VHOST_WORK_QUEUED , & work -> flags )) {
254254 /* We can only add the work to the list after we're
255255 * sure it was not in the list.
256256 * test_and_set_bit() implies a memory barrier.
257257 */
258- llist_add (& work -> node , & dev -> worker -> work_list );
259- vhost_task_wake (dev -> worker -> vtsk );
258+ llist_add (& work -> node , & dev -> worker . work_list );
259+ vhost_task_wake (dev -> worker . vtsk );
260260 }
261261}
262262EXPORT_SYMBOL_GPL (vhost_work_queue );
263263
264264/* A lockless hint for busy polling code to exit the loop */
265265bool vhost_has_work (struct vhost_dev * dev )
266266{
267- return dev -> worker && !llist_empty (& dev -> worker -> work_list );
267+ return !llist_empty (& dev -> worker . work_list );
268268}
269269EXPORT_SYMBOL_GPL (vhost_has_work );
270270
@@ -341,6 +341,8 @@ static bool vhost_worker(void *data)
341341
342342 node = llist_del_all (& worker -> work_list );
343343 if (node ) {
344+ __set_current_state (TASK_RUNNING );
345+
344346 node = llist_reverse_order (node );
345347 /* make sure flag is seen after deletion */
346348 smp_wmb ();
@@ -456,7 +458,8 @@ void vhost_dev_init(struct vhost_dev *dev,
456458 dev -> umem = NULL ;
457459 dev -> iotlb = NULL ;
458460 dev -> mm = NULL ;
459- dev -> worker = NULL ;
461+ memset (& dev -> worker , 0 , sizeof (dev -> worker ));
462+ init_llist_head (& dev -> worker .work_list );
460463 dev -> iov_limit = iov_limit ;
461464 dev -> weight = weight ;
462465 dev -> byte_weight = byte_weight ;
@@ -530,47 +533,30 @@ static void vhost_detach_mm(struct vhost_dev *dev)
530533
531534static void vhost_worker_free (struct vhost_dev * dev )
532535{
533- struct vhost_worker * worker = dev -> worker ;
534-
535- if (!worker )
536+ if (!dev -> worker .vtsk )
536537 return ;
537538
538- dev -> worker = NULL ;
539- WARN_ON (! llist_empty ( & worker -> work_list ) );
540- vhost_task_stop ( worker -> vtsk ) ;
541- kfree ( worker ) ;
539+ WARN_ON (! llist_empty ( & dev -> worker . work_list )) ;
540+ vhost_task_stop ( dev -> worker . vtsk );
541+ dev -> worker . kcov_handle = 0 ;
542+ dev -> worker . vtsk = NULL ;
542543}
543544
544545static int vhost_worker_create (struct vhost_dev * dev )
545546{
546- struct vhost_worker * worker ;
547547 struct vhost_task * vtsk ;
548548 char name [TASK_COMM_LEN ];
549- int ret ;
550-
551- worker = kzalloc (sizeof (* worker ), GFP_KERNEL_ACCOUNT );
552- if (!worker )
553- return - ENOMEM ;
554549
555- dev -> worker = worker ;
556- worker -> kcov_handle = kcov_common_handle ();
557- init_llist_head (& worker -> work_list );
558550 snprintf (name , sizeof (name ), "vhost-%d" , current -> pid );
559551
560- vtsk = vhost_task_create (vhost_worker , worker , name );
561- if (!vtsk ) {
562- ret = - ENOMEM ;
563- goto free_worker ;
564- }
552+ vtsk = vhost_task_create (vhost_worker , & dev -> worker , name );
553+ if (!vtsk )
554+ return - ENOMEM ;
565555
566- worker -> vtsk = vtsk ;
556+ dev -> worker .kcov_handle = kcov_common_handle ();
557+ dev -> worker .vtsk = vtsk ;
567558 vhost_task_start (vtsk );
568559 return 0 ;
569-
570- free_worker :
571- kfree (worker );
572- dev -> worker = NULL ;
573- return ret ;
574560}
575561
576562/* Caller should have device mutex */
@@ -1614,17 +1600,25 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg
16141600 r = - EFAULT ;
16151601 break ;
16161602 }
1617- if (s .num > 0xffff ) {
1618- r = - EINVAL ;
1619- break ;
1603+ if (vhost_has_feature (vq , VIRTIO_F_RING_PACKED )) {
1604+ vq -> last_avail_idx = s .num & 0xffff ;
1605+ vq -> last_used_idx = (s .num >> 16 ) & 0xffff ;
1606+ } else {
1607+ if (s .num > 0xffff ) {
1608+ r = - EINVAL ;
1609+ break ;
1610+ }
1611+ vq -> last_avail_idx = s .num ;
16201612 }
1621- vq -> last_avail_idx = s .num ;
16221613 /* Forget the cached index value. */
16231614 vq -> avail_idx = vq -> last_avail_idx ;
16241615 break ;
16251616 case VHOST_GET_VRING_BASE :
16261617 s .index = idx ;
1627- s .num = vq -> last_avail_idx ;
1618+ if (vhost_has_feature (vq , VIRTIO_F_RING_PACKED ))
1619+ s .num = (u32 )vq -> last_avail_idx | ((u32 )vq -> last_used_idx << 16 );
1620+ else
1621+ s .num = vq -> last_avail_idx ;
16281622 if (copy_to_user (argp , & s , sizeof s ))
16291623 r = - EFAULT ;
16301624 break ;
@@ -2563,12 +2557,11 @@ EXPORT_SYMBOL_GPL(vhost_disable_notify);
25632557/* Create a new message. */
25642558struct vhost_msg_node * vhost_new_msg (struct vhost_virtqueue * vq , int type )
25652559{
2566- struct vhost_msg_node * node = kmalloc (sizeof * node , GFP_KERNEL );
2560+ /* Make sure all padding within the structure is initialized. */
2561+ struct vhost_msg_node * node = kzalloc (sizeof (* node ), GFP_KERNEL );
25672562 if (!node )
25682563 return NULL ;
25692564
2570- /* Make sure all padding within the structure is initialized. */
2571- memset (& node -> msg , 0 , sizeof node -> msg );
25722565 node -> vq = vq ;
25732566 node -> msg .type = type ;
25742567 return node ;
0 commit comments