1919
2020#define MSHV_MAP_FAULT_IN_PAGES PTRS_PER_PMD
2121
22+ /**
23+ * mshv_chunk_stride - Compute stride for mapping guest memory
24+ * @page : The page to check for huge page backing
25+ * @gfn : Guest frame number for the mapping
26+ * @page_count: Total number of pages in the mapping
27+ *
28+ * Determines the appropriate stride (in pages) for mapping guest memory.
29+ * Uses huge page stride if the backing page is huge and the guest mapping
30+ * is properly aligned; otherwise falls back to single page stride.
31+ *
32+ * Return: Stride in pages, or -EINVAL if page order is unsupported.
33+ */
34+ static int mshv_chunk_stride (struct page * page ,
35+ u64 gfn , u64 page_count )
36+ {
37+ unsigned int page_order ;
38+
39+ /*
40+ * Use single page stride by default. For huge page stride, the
41+ * page must be compound and point to the head of the compound
42+ * page, and both gfn and page_count must be huge-page aligned.
43+ */
44+ if (!PageCompound (page ) || !PageHead (page ) ||
45+ !IS_ALIGNED (gfn , PTRS_PER_PMD ) ||
46+ !IS_ALIGNED (page_count , PTRS_PER_PMD ))
47+ return 1 ;
48+
49+ page_order = folio_order (page_folio (page ));
50+ /* The hypervisor only supports 2M huge page */
51+ if (page_order != PMD_ORDER )
52+ return - EINVAL ;
53+
54+ return 1 << page_order ;
55+ }
56+
2257/**
2358 * mshv_region_process_chunk - Processes a contiguous chunk of memory pages
2459 * in a region.
@@ -45,38 +80,37 @@ static long mshv_region_process_chunk(struct mshv_mem_region *region,
4580 int (* handler )(struct mshv_mem_region * region ,
4681 u32 flags ,
4782 u64 page_offset ,
48- u64 page_count ))
83+ u64 page_count ,
84+ bool huge_page ))
4985{
50- u64 count , stride ;
51- unsigned int page_order ;
86+ u64 gfn = region -> start_gfn + page_offset ;
87+ u64 count ;
5288 struct page * page ;
53- int ret ;
89+ int stride , ret ;
5490
5591 page = region -> pages [page_offset ];
5692 if (!page )
5793 return - EINVAL ;
5894
59- page_order = folio_order (page_folio (page ));
60- /* The hypervisor only supports 4K and 2M page sizes */
61- if (page_order && page_order != PMD_ORDER )
62- return - EINVAL ;
95+ stride = mshv_chunk_stride (page , gfn , page_count );
96+ if (stride < 0 )
97+ return stride ;
6398
64- stride = 1 << page_order ;
65-
66- /* Start at stride since the first page is validated */
99+ /* Start at stride since the first stride is validated */
67100 for (count = stride ; count < page_count ; count += stride ) {
68101 page = region -> pages [page_offset + count ];
69102
70103 /* Break if current page is not present */
71104 if (!page )
72105 break ;
73106
74- /* Break if page size changes */
75- if (page_order != folio_order (page_folio (page )))
107+ /* Break if stride size changes */
108+ if (stride != mshv_chunk_stride (page , gfn + count ,
109+ page_count - count ))
76110 break ;
77111 }
78112
79- ret = handler (region , flags , page_offset , count );
113+ ret = handler (region , flags , page_offset , count , stride > 1 );
80114 if (ret )
81115 return ret ;
82116
@@ -108,7 +142,8 @@ static int mshv_region_process_range(struct mshv_mem_region *region,
108142 int (* handler )(struct mshv_mem_region * region ,
109143 u32 flags ,
110144 u64 page_offset ,
111- u64 page_count ))
145+ u64 page_count ,
146+ bool huge_page ))
112147{
113148 long ret ;
114149
@@ -162,11 +197,10 @@ struct mshv_mem_region *mshv_region_create(u64 guest_pfn, u64 nr_pages,
162197
163198static int mshv_region_chunk_share (struct mshv_mem_region * region ,
164199 u32 flags ,
165- u64 page_offset , u64 page_count )
200+ u64 page_offset , u64 page_count ,
201+ bool huge_page )
166202{
167- struct page * page = region -> pages [page_offset ];
168-
169- if (PageHuge (page ) || PageTransCompound (page ))
203+ if (huge_page )
170204 flags |= HV_MODIFY_SPA_PAGE_HOST_ACCESS_LARGE_PAGE ;
171205
172206 return hv_call_modify_spa_host_access (region -> partition -> pt_id ,
@@ -188,11 +222,10 @@ int mshv_region_share(struct mshv_mem_region *region)
188222
189223static int mshv_region_chunk_unshare (struct mshv_mem_region * region ,
190224 u32 flags ,
191- u64 page_offset , u64 page_count )
225+ u64 page_offset , u64 page_count ,
226+ bool huge_page )
192227{
193- struct page * page = region -> pages [page_offset ];
194-
195- if (PageHuge (page ) || PageTransCompound (page ))
228+ if (huge_page )
196229 flags |= HV_MODIFY_SPA_PAGE_HOST_ACCESS_LARGE_PAGE ;
197230
198231 return hv_call_modify_spa_host_access (region -> partition -> pt_id ,
@@ -212,11 +245,10 @@ int mshv_region_unshare(struct mshv_mem_region *region)
212245
213246static int mshv_region_chunk_remap (struct mshv_mem_region * region ,
214247 u32 flags ,
215- u64 page_offset , u64 page_count )
248+ u64 page_offset , u64 page_count ,
249+ bool huge_page )
216250{
217- struct page * page = region -> pages [page_offset ];
218-
219- if (PageHuge (page ) || PageTransCompound (page ))
251+ if (huge_page )
220252 flags |= HV_MAP_GPA_LARGE_PAGE ;
221253
222254 return hv_call_map_gpa_pages (region -> partition -> pt_id ,
@@ -295,11 +327,10 @@ int mshv_region_pin(struct mshv_mem_region *region)
295327
296328static int mshv_region_chunk_unmap (struct mshv_mem_region * region ,
297329 u32 flags ,
298- u64 page_offset , u64 page_count )
330+ u64 page_offset , u64 page_count ,
331+ bool huge_page )
299332{
300- struct page * page = region -> pages [page_offset ];
301-
302- if (PageHuge (page ) || PageTransCompound (page ))
333+ if (huge_page )
303334 flags |= HV_UNMAP_GPA_LARGE_PAGE ;
304335
305336 return hv_call_unmap_gpa_pages (region -> partition -> pt_id ,
0 commit comments