@@ -45,15 +45,18 @@ static inline struct io_zcrx_area *io_zcrx_iov_to_area(const struct net_iov *nio
4545static inline struct page * io_zcrx_iov_page (const struct net_iov * niov )
4646{
4747 struct io_zcrx_area * area = io_zcrx_iov_to_area (niov );
48+ unsigned niov_pages_shift ;
4849
4950 lockdep_assert (!area -> mem .is_dmabuf );
5051
51- return area -> mem .pages [net_iov_idx (niov )];
52+ niov_pages_shift = area -> ifq -> niov_shift - PAGE_SHIFT ;
53+ return area -> mem .pages [net_iov_idx (niov ) << niov_pages_shift ];
5254}
5355
5456static int io_populate_area_dma (struct io_zcrx_ifq * ifq ,
5557 struct io_zcrx_area * area )
5658{
59+ unsigned niov_size = 1U << ifq -> niov_shift ;
5760 struct sg_table * sgt = area -> mem .sgt ;
5861 struct scatterlist * sg ;
5962 unsigned i , niov_idx = 0 ;
@@ -62,13 +65,16 @@ static int io_populate_area_dma(struct io_zcrx_ifq *ifq,
6265 dma_addr_t dma = sg_dma_address (sg );
6366 unsigned long sg_len = sg_dma_len (sg );
6467
68+ if (WARN_ON_ONCE (sg_len % niov_size ))
69+ return - EINVAL ;
70+
6571 while (sg_len && niov_idx < area -> nia .num_niovs ) {
6672 struct net_iov * niov = & area -> nia .niovs [niov_idx ];
6773
6874 if (net_mp_niov_set_dma_addr (niov , dma ))
6975 return - EFAULT ;
70- sg_len -= PAGE_SIZE ;
71- dma += PAGE_SIZE ;
76+ sg_len -= niov_size ;
77+ dma += niov_size ;
7278 niov_idx ++ ;
7379 }
7480 }
@@ -284,18 +290,21 @@ static int io_zcrx_map_area(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area)
284290 return ret ;
285291}
286292
287- static void io_zcrx_sync_for_device (const struct page_pool * pool ,
293+ static void io_zcrx_sync_for_device (struct page_pool * pool ,
288294 struct net_iov * niov )
289295{
290296#if defined(CONFIG_HAS_DMA ) && defined(CONFIG_DMA_NEED_SYNC )
291297 dma_addr_t dma_addr ;
292298
299+ unsigned niov_size ;
300+
293301 if (!dma_dev_need_sync (pool -> p .dev ))
294302 return ;
295303
304+ niov_size = 1U << io_pp_to_ifq (pool )-> niov_shift ;
296305 dma_addr = page_pool_get_dma_addr_netmem (net_iov_to_netmem (niov ));
297306 __dma_sync_single_for_device (pool -> p .dev , dma_addr + pool -> p .offset ,
298- PAGE_SIZE , pool -> p .dma_dir );
307+ niov_size , pool -> p .dma_dir );
299308#endif
300309}
301310
@@ -413,7 +422,8 @@ static int io_zcrx_create_area(struct io_zcrx_ifq *ifq,
413422 if (ret )
414423 goto err ;
415424
416- nr_iovs = area -> mem .size >> PAGE_SHIFT ;
425+ ifq -> niov_shift = PAGE_SHIFT ;
426+ nr_iovs = area -> mem .size >> ifq -> niov_shift ;
417427 area -> nia .num_niovs = nr_iovs ;
418428
419429 ret = - ENOMEM ;
@@ -764,7 +774,7 @@ static void io_zcrx_ring_refill(struct page_pool *pp,
764774 unsigned niov_idx , area_idx ;
765775
766776 area_idx = rqe -> off >> IORING_ZCRX_AREA_SHIFT ;
767- niov_idx = (rqe -> off & ~IORING_ZCRX_AREA_MASK ) >> PAGE_SHIFT ;
777+ niov_idx = (rqe -> off & ~IORING_ZCRX_AREA_MASK ) >> ifq -> niov_shift ;
768778
769779 if (unlikely (rqe -> __pad || area_idx ))
770780 continue ;
@@ -854,8 +864,8 @@ static int io_pp_zc_init(struct page_pool *pp)
854864 return - EINVAL ;
855865 if (WARN_ON_ONCE (!pp -> dma_map ))
856866 return - EOPNOTSUPP ;
857- if (pp -> p .order != 0 )
858- return - EOPNOTSUPP ;
867+ if (pp -> p .order + PAGE_SHIFT != ifq -> niov_shift )
868+ return - EINVAL ;
859869 if (pp -> p .dma_dir != DMA_FROM_DEVICE )
860870 return - EOPNOTSUPP ;
861871
@@ -930,7 +940,7 @@ static bool io_zcrx_queue_cqe(struct io_kiocb *req, struct net_iov *niov,
930940 cqe -> flags |= IORING_CQE_F_32 ;
931941
932942 area = io_zcrx_iov_to_area (niov );
933- offset = off + (net_iov_idx (niov ) << PAGE_SHIFT );
943+ offset = off + (net_iov_idx (niov ) << ifq -> niov_shift );
934944 rcqe = (struct io_uring_zcrx_cqe * )(cqe + 1 );
935945 rcqe -> off = offset + ((u64 )area -> area_id << IORING_ZCRX_AREA_SHIFT );
936946 rcqe -> __pad = 0 ;
0 commit comments