@@ -86,6 +86,22 @@ void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
8686 vgdev -> vbufs = NULL ;
8787}
8888
89+ /* For drm_panic */
90+ static struct virtio_gpu_vbuffer *
91+ virtio_gpu_panic_get_vbuf (struct virtio_gpu_device * vgdev , int size )
92+ {
93+ struct virtio_gpu_vbuffer * vbuf ;
94+
95+ vbuf = kmem_cache_zalloc (vgdev -> vbufs , GFP_ATOMIC );
96+
97+ vbuf -> buf = (void * )vbuf + sizeof (* vbuf );
98+ vbuf -> size = size ;
99+ vbuf -> resp_cb = NULL ;
100+ vbuf -> resp_size = sizeof (struct virtio_gpu_ctrl_hdr );
101+ vbuf -> resp_buf = (void * )vbuf -> buf + size ;
102+ return vbuf ;
103+ }
104+
89105static struct virtio_gpu_vbuffer *
90106virtio_gpu_get_vbuf (struct virtio_gpu_device * vgdev ,
91107 int size , int resp_size , void * resp_buf ,
@@ -137,6 +153,18 @@ virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
137153 return (struct virtio_gpu_update_cursor * )vbuf -> buf ;
138154}
139155
156+ /* For drm_panic */
157+ static void * virtio_gpu_panic_alloc_cmd_resp (struct virtio_gpu_device * vgdev ,
158+ struct virtio_gpu_vbuffer * * vbuffer_p ,
159+ int cmd_size )
160+ {
161+ struct virtio_gpu_vbuffer * vbuf ;
162+
163+ vbuf = virtio_gpu_panic_get_vbuf (vgdev , cmd_size );
164+ * vbuffer_p = vbuf ;
165+ return (struct virtio_gpu_command * )vbuf -> buf ;
166+ }
167+
140168static void * virtio_gpu_alloc_cmd_resp (struct virtio_gpu_device * vgdev ,
141169 virtio_gpu_resp_cb cb ,
142170 struct virtio_gpu_vbuffer * * vbuffer_p ,
@@ -311,6 +339,34 @@ static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
311339 return sgt ;
312340}
313341
342+ /* For drm_panic */
343+ static int virtio_gpu_panic_queue_ctrl_sgs (struct virtio_gpu_device * vgdev ,
344+ struct virtio_gpu_vbuffer * vbuf ,
345+ int elemcnt ,
346+ struct scatterlist * * sgs ,
347+ int outcnt ,
348+ int incnt )
349+ {
350+ struct virtqueue * vq = vgdev -> ctrlq .vq ;
351+ int ret ;
352+
353+ if (vgdev -> has_indirect )
354+ elemcnt = 1 ;
355+
356+ if (vq -> num_free < elemcnt )
357+ return - ENOMEM ;
358+
359+ ret = virtqueue_add_sgs (vq , sgs , outcnt , incnt , vbuf , GFP_ATOMIC );
360+ WARN_ON (ret );
361+
362+ vbuf -> seqno = ++ vgdev -> ctrlq .seqno ;
363+ trace_virtio_gpu_cmd_queue (vq , virtio_gpu_vbuf_ctrl_hdr (vbuf ), vbuf -> seqno );
364+
365+ atomic_inc (& vgdev -> pending_commands );
366+
367+ return 0 ;
368+ }
369+
314370static int virtio_gpu_queue_ctrl_sgs (struct virtio_gpu_device * vgdev ,
315371 struct virtio_gpu_vbuffer * vbuf ,
316372 struct virtio_gpu_fence * fence ,
@@ -368,6 +424,32 @@ static int virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
368424 return 0 ;
369425}
370426
427+ /* For drm_panic */
428+ static int virtio_gpu_panic_queue_ctrl_buffer (struct virtio_gpu_device * vgdev ,
429+ struct virtio_gpu_vbuffer * vbuf )
430+ {
431+ struct scatterlist * sgs [3 ], vcmd , vresp ;
432+ int elemcnt = 0 , outcnt = 0 , incnt = 0 ;
433+
434+ /* set up vcmd */
435+ sg_init_one (& vcmd , vbuf -> buf , vbuf -> size );
436+ elemcnt ++ ;
437+ sgs [outcnt ] = & vcmd ;
438+ outcnt ++ ;
439+
440+ /* set up vresp */
441+ if (vbuf -> resp_size ) {
442+ sg_init_one (& vresp , vbuf -> resp_buf , vbuf -> resp_size );
443+ elemcnt ++ ;
444+ sgs [outcnt + incnt ] = & vresp ;
445+ incnt ++ ;
446+ }
447+
448+ return virtio_gpu_panic_queue_ctrl_sgs (vgdev , vbuf ,
449+ elemcnt , sgs ,
450+ outcnt , incnt );
451+ }
452+
371453static int virtio_gpu_queue_fenced_ctrl_buffer (struct virtio_gpu_device * vgdev ,
372454 struct virtio_gpu_vbuffer * vbuf ,
373455 struct virtio_gpu_fence * fence )
@@ -422,6 +504,21 @@ static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
422504 return ret ;
423505}
424506
507+ /* For drm_panic */
508+ void virtio_gpu_panic_notify (struct virtio_gpu_device * vgdev )
509+ {
510+ bool notify ;
511+
512+ if (!atomic_read (& vgdev -> pending_commands ))
513+ return ;
514+
515+ atomic_set (& vgdev -> pending_commands , 0 );
516+ notify = virtqueue_kick_prepare (vgdev -> ctrlq .vq );
517+
518+ if (notify )
519+ virtqueue_notify (vgdev -> ctrlq .vq );
520+ }
521+
425522void virtio_gpu_notify (struct virtio_gpu_device * vgdev )
426523{
427524 bool notify ;
@@ -567,6 +664,29 @@ void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
567664 virtio_gpu_queue_ctrl_buffer (vgdev , vbuf );
568665}
569666
667+ /* For drm_panic */
668+ void virtio_gpu_panic_cmd_resource_flush (struct virtio_gpu_device * vgdev ,
669+ uint32_t resource_id ,
670+ uint32_t x , uint32_t y ,
671+ uint32_t width , uint32_t height )
672+ {
673+ struct virtio_gpu_resource_flush * cmd_p ;
674+ struct virtio_gpu_vbuffer * vbuf ;
675+
676+ cmd_p = virtio_gpu_panic_alloc_cmd_resp (vgdev , & vbuf , sizeof (* cmd_p ));
677+ memset (cmd_p , 0 , sizeof (* cmd_p ));
678+ vbuf -> objs = NULL ;
679+
680+ cmd_p -> hdr .type = cpu_to_le32 (VIRTIO_GPU_CMD_RESOURCE_FLUSH );
681+ cmd_p -> resource_id = cpu_to_le32 (resource_id );
682+ cmd_p -> r .width = cpu_to_le32 (width );
683+ cmd_p -> r .height = cpu_to_le32 (height );
684+ cmd_p -> r .x = cpu_to_le32 (x );
685+ cmd_p -> r .y = cpu_to_le32 (y );
686+
687+ virtio_gpu_panic_queue_ctrl_buffer (vgdev , vbuf );
688+ }
689+
570690void virtio_gpu_cmd_resource_flush (struct virtio_gpu_device * vgdev ,
571691 uint32_t resource_id ,
572692 uint32_t x , uint32_t y ,
@@ -591,6 +711,37 @@ void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
591711 virtio_gpu_queue_fenced_ctrl_buffer (vgdev , vbuf , fence );
592712}
593713
714+ /* For drm_panic */
715+ int virtio_gpu_panic_cmd_transfer_to_host_2d (struct virtio_gpu_device * vgdev ,
716+ uint64_t offset ,
717+ uint32_t width , uint32_t height ,
718+ uint32_t x , uint32_t y ,
719+ struct virtio_gpu_object_array * objs )
720+ {
721+ struct virtio_gpu_object * bo = gem_to_virtio_gpu_obj (objs -> objs [0 ]);
722+ struct virtio_gpu_transfer_to_host_2d * cmd_p ;
723+ struct virtio_gpu_vbuffer * vbuf ;
724+ bool use_dma_api = !virtio_has_dma_quirk (vgdev -> vdev );
725+
726+ if (virtio_gpu_is_shmem (bo ) && use_dma_api )
727+ dma_sync_sgtable_for_device (vgdev -> vdev -> dev .parent ,
728+ bo -> base .sgt , DMA_TO_DEVICE );
729+
730+ cmd_p = virtio_gpu_panic_alloc_cmd_resp (vgdev , & vbuf , sizeof (* cmd_p ));
731+ memset (cmd_p , 0 , sizeof (* cmd_p ));
732+ vbuf -> objs = objs ;
733+
734+ cmd_p -> hdr .type = cpu_to_le32 (VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D );
735+ cmd_p -> resource_id = cpu_to_le32 (bo -> hw_res_handle );
736+ cmd_p -> offset = cpu_to_le64 (offset );
737+ cmd_p -> r .width = cpu_to_le32 (width );
738+ cmd_p -> r .height = cpu_to_le32 (height );
739+ cmd_p -> r .x = cpu_to_le32 (x );
740+ cmd_p -> r .y = cpu_to_le32 (y );
741+
742+ return virtio_gpu_panic_queue_ctrl_buffer (vgdev , vbuf );
743+ }
744+
594745void virtio_gpu_cmd_transfer_to_host_2d (struct virtio_gpu_device * vgdev ,
595746 uint64_t offset ,
596747 uint32_t width , uint32_t height ,
0 commit comments