@@ -100,35 +100,35 @@ static void pds_vfio_dirty_free_bitmaps(struct pds_vfio_dirty *dirty)
100100}
101101
102102static void __pds_vfio_dirty_free_sgl (struct pds_vfio_pci_device * pds_vfio ,
103- struct pds_vfio_dirty * dirty )
103+ struct pds_vfio_region * region )
104104{
105105 struct pci_dev * pdev = pds_vfio -> vfio_coredev .pdev ;
106106 struct device * pdsc_dev = & pci_physfn (pdev )-> dev ;
107107
108- dma_unmap_single (pdsc_dev , dirty -> region . sgl_addr ,
109- dirty -> region . num_sge * sizeof (struct pds_lm_sg_elem ),
108+ dma_unmap_single (pdsc_dev , region -> sgl_addr ,
109+ region -> num_sge * sizeof (struct pds_lm_sg_elem ),
110110 DMA_BIDIRECTIONAL );
111- kfree (dirty -> region . sgl );
111+ kfree (region -> sgl );
112112
113- dirty -> region . num_sge = 0 ;
114- dirty -> region . sgl = NULL ;
115- dirty -> region . sgl_addr = 0 ;
113+ region -> num_sge = 0 ;
114+ region -> sgl = NULL ;
115+ region -> sgl_addr = 0 ;
116116}
117117
118118static void pds_vfio_dirty_free_sgl (struct pds_vfio_pci_device * pds_vfio )
119119{
120- struct pds_vfio_dirty * dirty = & pds_vfio -> dirty ;
120+ struct pds_vfio_region * region = & pds_vfio -> dirty . region ;
121121
122- if (dirty -> region . sgl )
123- __pds_vfio_dirty_free_sgl (pds_vfio , dirty );
122+ if (region -> sgl )
123+ __pds_vfio_dirty_free_sgl (pds_vfio , region );
124124}
125125
126126static int pds_vfio_dirty_alloc_sgl (struct pds_vfio_pci_device * pds_vfio ,
127+ struct pds_vfio_region * region ,
127128 u32 page_count )
128129{
129130 struct pci_dev * pdev = pds_vfio -> vfio_coredev .pdev ;
130131 struct device * pdsc_dev = & pci_physfn (pdev )-> dev ;
131- struct pds_vfio_dirty * dirty = & pds_vfio -> dirty ;
132132 struct pds_lm_sg_elem * sgl ;
133133 dma_addr_t sgl_addr ;
134134 size_t sgl_size ;
@@ -147,9 +147,9 @@ static int pds_vfio_dirty_alloc_sgl(struct pds_vfio_pci_device *pds_vfio,
147147 return - EIO ;
148148 }
149149
150- dirty -> region . sgl = sgl ;
151- dirty -> region . num_sge = max_sge ;
152- dirty -> region . sgl_addr = sgl_addr ;
150+ region -> sgl = sgl ;
151+ region -> num_sge = max_sge ;
152+ region -> sgl_addr = sgl_addr ;
153153
154154 return 0 ;
155155}
@@ -260,7 +260,7 @@ static int pds_vfio_dirty_enable(struct pds_vfio_pci_device *pds_vfio,
260260 goto out_free_region_info ;
261261 }
262262
263- err = pds_vfio_dirty_alloc_sgl (pds_vfio , page_count );
263+ err = pds_vfio_dirty_alloc_sgl (pds_vfio , & dirty -> region , page_count );
264264 if (err ) {
265265 dev_err (& pdev -> dev , "Failed to alloc dirty sg lists: %pe\n" ,
266266 ERR_PTR (err ));
@@ -300,11 +300,11 @@ void pds_vfio_dirty_disable(struct pds_vfio_pci_device *pds_vfio, bool send_cmd)
300300}
301301
302302static int pds_vfio_dirty_seq_ack (struct pds_vfio_pci_device * pds_vfio ,
303+ struct pds_vfio_region * region ,
303304 struct pds_vfio_bmp_info * bmp_info ,
304305 u32 offset , u32 bmp_bytes , bool read_seq )
305306{
306307 const char * bmp_type_str = read_seq ? "read_seq" : "write_ack" ;
307- struct pds_vfio_region * region = & pds_vfio -> dirty .region ;
308308 u8 dma_dir = read_seq ? DMA_FROM_DEVICE : DMA_TO_DEVICE ;
309309 struct pci_dev * pdev = pds_vfio -> vfio_coredev .pdev ;
310310 struct device * pdsc_dev = & pci_physfn (pdev )-> dev ;
@@ -383,36 +383,36 @@ static int pds_vfio_dirty_seq_ack(struct pds_vfio_pci_device *pds_vfio,
383383}
384384
385385static int pds_vfio_dirty_write_ack (struct pds_vfio_pci_device * pds_vfio ,
386+ struct pds_vfio_region * region ,
386387 u32 offset , u32 len )
387388{
388- struct pds_vfio_region * region = & pds_vfio -> dirty .region ;
389389
390- return pds_vfio_dirty_seq_ack (pds_vfio , & region -> host_ack ,
390+ return pds_vfio_dirty_seq_ack (pds_vfio , region , & region -> host_ack ,
391391 offset , len , WRITE_ACK );
392392}
393393
394394static int pds_vfio_dirty_read_seq (struct pds_vfio_pci_device * pds_vfio ,
395+ struct pds_vfio_region * region ,
395396 u32 offset , u32 len )
396397{
397- struct pds_vfio_region * region = & pds_vfio -> dirty .region ;
398-
399- return pds_vfio_dirty_seq_ack (pds_vfio , & region -> host_seq ,
398+ return pds_vfio_dirty_seq_ack (pds_vfio , region , & region -> host_seq ,
400399 offset , len , READ_SEQ );
401400}
402401
403402static int pds_vfio_dirty_process_bitmaps (struct pds_vfio_pci_device * pds_vfio ,
403+ struct pds_vfio_region * region ,
404404 struct iova_bitmap * dirty_bitmap ,
405405 u32 bmp_offset , u32 len_bytes )
406406{
407- u64 page_size = pds_vfio -> dirty . region . page_size ;
408- u64 region_start = pds_vfio -> dirty . region . start ;
407+ u64 page_size = region -> page_size ;
408+ u64 region_start = region -> start ;
409409 u32 bmp_offset_bit ;
410410 __le64 * seq , * ack ;
411411 int dword_count ;
412412
413413 dword_count = len_bytes / sizeof (u64 );
414- seq = (__le64 * )((u64 )pds_vfio -> dirty . region . host_seq .bmp + bmp_offset );
415- ack = (__le64 * )((u64 )pds_vfio -> dirty . region . host_ack .bmp + bmp_offset );
414+ seq = (__le64 * )((u64 )region -> host_seq .bmp + bmp_offset );
415+ ack = (__le64 * )((u64 )region -> host_ack .bmp + bmp_offset );
416416 bmp_offset_bit = bmp_offset * 8 ;
417417
418418 for (int i = 0 ; i < dword_count ; i ++ ) {
@@ -441,6 +441,7 @@ static int pds_vfio_dirty_sync(struct pds_vfio_pci_device *pds_vfio,
441441{
442442 struct device * dev = & pds_vfio -> vfio_coredev .pdev -> dev ;
443443 struct pds_vfio_dirty * dirty = & pds_vfio -> dirty ;
444+ struct pds_vfio_region * region = & dirty -> region ;
444445 u64 bmp_offset , bmp_bytes ;
445446 u64 bitmap_size , pages ;
446447 int err ;
@@ -453,23 +454,23 @@ static int pds_vfio_dirty_sync(struct pds_vfio_pci_device *pds_vfio,
453454 return - EINVAL ;
454455 }
455456
456- pages = DIV_ROUND_UP (length , pds_vfio -> dirty . region . page_size );
457+ pages = DIV_ROUND_UP (length , region -> page_size );
457458 bitmap_size =
458459 round_up (pages , sizeof (u64 ) * BITS_PER_BYTE ) / BITS_PER_BYTE ;
459460
460461 dev_dbg (dev ,
461462 "vf%u: iova 0x%lx length %lu page_size %llu pages %llu bitmap_size %llu\n" ,
462- pds_vfio -> vf_id , iova , length , pds_vfio -> dirty . region . page_size ,
463+ pds_vfio -> vf_id , iova , length , region -> page_size ,
463464 pages , bitmap_size );
464465
465- if (!length || ((iova - dirty -> region . start + length ) > dirty -> region . size )) {
466+ if (!length || ((iova - region -> start + length ) > region -> size )) {
466467 dev_err (dev , "Invalid iova 0x%lx and/or length 0x%lx to sync\n" ,
467468 iova , length );
468469 return - EINVAL ;
469470 }
470471
471472 /* bitmap is modified in 64 bit chunks */
472- bmp_bytes = ALIGN (DIV_ROUND_UP (length / dirty -> region . page_size ,
473+ bmp_bytes = ALIGN (DIV_ROUND_UP (length / region -> page_size ,
473474 sizeof (u64 )), sizeof (u64 ));
474475 if (bmp_bytes != bitmap_size ) {
475476 dev_err (dev ,
@@ -478,23 +479,23 @@ static int pds_vfio_dirty_sync(struct pds_vfio_pci_device *pds_vfio,
478479 return - EINVAL ;
479480 }
480481
481- bmp_offset = DIV_ROUND_UP ((iova - dirty -> region . start ) /
482- dirty -> region . page_size , sizeof (u64 ));
482+ bmp_offset = DIV_ROUND_UP ((iova - region -> start ) /
483+ region -> page_size , sizeof (u64 ));
483484
484485 dev_dbg (dev ,
485486 "Syncing dirty bitmap, iova 0x%lx length 0x%lx, bmp_offset %llu bmp_bytes %llu\n" ,
486487 iova , length , bmp_offset , bmp_bytes );
487488
488- err = pds_vfio_dirty_read_seq (pds_vfio , bmp_offset , bmp_bytes );
489+ err = pds_vfio_dirty_read_seq (pds_vfio , region , bmp_offset , bmp_bytes );
489490 if (err )
490491 return err ;
491492
492- err = pds_vfio_dirty_process_bitmaps (pds_vfio , dirty_bitmap , bmp_offset ,
493- bmp_bytes );
493+ err = pds_vfio_dirty_process_bitmaps (pds_vfio , region , dirty_bitmap ,
494+ bmp_offset , bmp_bytes );
494495 if (err )
495496 return err ;
496497
497- err = pds_vfio_dirty_write_ack (pds_vfio , bmp_offset , bmp_bytes );
498+ err = pds_vfio_dirty_write_ack (pds_vfio , region , bmp_offset , bmp_bytes );
498499 if (err )
499500 return err ;
500501
0 commit comments