Skip to content

Commit e950c30

Browse files
Stanislav Kinsburskiiliuw
authored andcommitted
mshv: Move region management to mshv_regions.c
Refactor memory region management functions from mshv_root_main.c into mshv_regions.c for better modularity and code organization. Adjust function calls and headers to use the new implementation. Improve maintainability and separation of concerns in the mshv_root module. Signed-off-by: Stanislav Kinsburskii <skinsburskii@linux.microsoft.com> Reviewed-by: Anirudh Rayabharam (Microsoft) <anirudh@anirudhrb.com> Reviewed-by: Nuno Das Neves <nunodasneves@linux.microsoft.com> Signed-off-by: Wei Liu <wei.liu@kernel.org>
1 parent 6f6aed2 commit e950c30

4 files changed

Lines changed: 198 additions & 165 deletions

File tree

drivers/hv/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ hv_vmbus-y := vmbus_drv.o \
1414
hv_vmbus-$(CONFIG_HYPERV_TESTING) += hv_debugfs.o
1515
hv_utils-y := hv_util.o hv_kvp.o hv_snapshot.o hv_utils_transport.o
1616
mshv_root-y := mshv_root_main.o mshv_synic.o mshv_eventfd.o mshv_irq.o \
17-
mshv_root_hv_call.o mshv_portid_table.o
17+
mshv_root_hv_call.o mshv_portid_table.o mshv_regions.o
1818
mshv_vtl-y := mshv_vtl_main.o
1919

2020
# Code that must be built-in

drivers/hv/mshv_regions.c

Lines changed: 175 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,175 @@
1+
// SPDX-License-Identifier: GPL-2.0-only
2+
/*
3+
* Copyright (c) 2025, Microsoft Corporation.
4+
*
5+
* Memory region management for mshv_root module.
6+
*
7+
* Authors: Microsoft Linux virtualization team
8+
*/
9+
10+
#include <linux/mm.h>
11+
#include <linux/vmalloc.h>
12+
13+
#include <asm/mshyperv.h>
14+
15+
#include "mshv_root.h"
16+
17+
struct mshv_mem_region *mshv_region_create(u64 guest_pfn, u64 nr_pages,
18+
u64 uaddr, u32 flags,
19+
bool is_mmio)
20+
{
21+
struct mshv_mem_region *region;
22+
23+
region = vzalloc(sizeof(*region) + sizeof(struct page *) * nr_pages);
24+
if (!region)
25+
return ERR_PTR(-ENOMEM);
26+
27+
region->nr_pages = nr_pages;
28+
region->start_gfn = guest_pfn;
29+
region->start_uaddr = uaddr;
30+
region->hv_map_flags = HV_MAP_GPA_READABLE | HV_MAP_GPA_ADJUSTABLE;
31+
if (flags & BIT(MSHV_SET_MEM_BIT_WRITABLE))
32+
region->hv_map_flags |= HV_MAP_GPA_WRITABLE;
33+
if (flags & BIT(MSHV_SET_MEM_BIT_EXECUTABLE))
34+
region->hv_map_flags |= HV_MAP_GPA_EXECUTABLE;
35+
36+
/* Note: large_pages flag populated when we pin the pages */
37+
if (!is_mmio)
38+
region->flags.range_pinned = true;
39+
40+
return region;
41+
}
42+
43+
int mshv_region_share(struct mshv_mem_region *region)
44+
{
45+
u32 flags = HV_MODIFY_SPA_PAGE_HOST_ACCESS_MAKE_SHARED;
46+
47+
if (region->flags.large_pages)
48+
flags |= HV_MODIFY_SPA_PAGE_HOST_ACCESS_LARGE_PAGE;
49+
50+
return hv_call_modify_spa_host_access(region->partition->pt_id,
51+
region->pages, region->nr_pages,
52+
HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE,
53+
flags, true);
54+
}
55+
56+
int mshv_region_unshare(struct mshv_mem_region *region)
57+
{
58+
u32 flags = HV_MODIFY_SPA_PAGE_HOST_ACCESS_MAKE_EXCLUSIVE;
59+
60+
if (region->flags.large_pages)
61+
flags |= HV_MODIFY_SPA_PAGE_HOST_ACCESS_LARGE_PAGE;
62+
63+
return hv_call_modify_spa_host_access(region->partition->pt_id,
64+
region->pages, region->nr_pages,
65+
0,
66+
flags, false);
67+
}
68+
69+
static int mshv_region_remap_pages(struct mshv_mem_region *region,
70+
u32 map_flags,
71+
u64 page_offset, u64 page_count)
72+
{
73+
if (page_offset + page_count > region->nr_pages)
74+
return -EINVAL;
75+
76+
if (region->flags.large_pages)
77+
map_flags |= HV_MAP_GPA_LARGE_PAGE;
78+
79+
return hv_call_map_gpa_pages(region->partition->pt_id,
80+
region->start_gfn + page_offset,
81+
page_count, map_flags,
82+
region->pages + page_offset);
83+
}
84+
85+
int mshv_region_map(struct mshv_mem_region *region)
86+
{
87+
u32 map_flags = region->hv_map_flags;
88+
89+
return mshv_region_remap_pages(region, map_flags,
90+
0, region->nr_pages);
91+
}
92+
93+
static void mshv_region_invalidate_pages(struct mshv_mem_region *region,
94+
u64 page_offset, u64 page_count)
95+
{
96+
if (region->flags.range_pinned)
97+
unpin_user_pages(region->pages + page_offset, page_count);
98+
99+
memset(region->pages + page_offset, 0,
100+
page_count * sizeof(struct page *));
101+
}
102+
103+
void mshv_region_invalidate(struct mshv_mem_region *region)
104+
{
105+
mshv_region_invalidate_pages(region, 0, region->nr_pages);
106+
}
107+
108+
int mshv_region_pin(struct mshv_mem_region *region)
109+
{
110+
u64 done_count, nr_pages;
111+
struct page **pages;
112+
__u64 userspace_addr;
113+
int ret;
114+
115+
for (done_count = 0; done_count < region->nr_pages; done_count += ret) {
116+
pages = region->pages + done_count;
117+
userspace_addr = region->start_uaddr +
118+
done_count * HV_HYP_PAGE_SIZE;
119+
nr_pages = min(region->nr_pages - done_count,
120+
MSHV_PIN_PAGES_BATCH_SIZE);
121+
122+
/*
123+
* Pinning assuming 4k pages works for large pages too.
124+
* All page structs within the large page are returned.
125+
*
126+
* Pin requests are batched because pin_user_pages_fast
127+
* with the FOLL_LONGTERM flag does a large temporary
128+
* allocation of contiguous memory.
129+
*/
130+
ret = pin_user_pages_fast(userspace_addr, nr_pages,
131+
FOLL_WRITE | FOLL_LONGTERM,
132+
pages);
133+
if (ret < 0)
134+
goto release_pages;
135+
}
136+
137+
if (PageHuge(region->pages[0]))
138+
region->flags.large_pages = true;
139+
140+
return 0;
141+
142+
release_pages:
143+
mshv_region_invalidate_pages(region, 0, done_count);
144+
return ret;
145+
}
146+
147+
void mshv_region_destroy(struct mshv_mem_region *region)
148+
{
149+
struct mshv_partition *partition = region->partition;
150+
u32 unmap_flags = 0;
151+
int ret;
152+
153+
hlist_del(&region->hnode);
154+
155+
if (mshv_partition_encrypted(partition)) {
156+
ret = mshv_region_share(region);
157+
if (ret) {
158+
pt_err(partition,
159+
"Failed to regain access to memory, unpinning user pages will fail and crash the host error: %d\n",
160+
ret);
161+
return;
162+
}
163+
}
164+
165+
if (region->flags.large_pages)
166+
unmap_flags |= HV_UNMAP_GPA_LARGE_PAGE;
167+
168+
/* ignore unmap failures and continue as process may be exiting */
169+
hv_call_unmap_gpa_pages(partition->pt_id, region->start_gfn,
170+
region->nr_pages, unmap_flags);
171+
172+
mshv_region_invalidate(region);
173+
174+
vfree(region);
175+
}

drivers/hv/mshv_root.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -312,4 +312,14 @@ extern struct mshv_root mshv_root;
312312
extern enum hv_scheduler_type hv_scheduler_type;
313313
extern u8 * __percpu *hv_synic_eventring_tail;
314314

315+
struct mshv_mem_region *mshv_region_create(u64 guest_pfn, u64 nr_pages,
316+
u64 uaddr, u32 flags,
317+
bool is_mmio);
318+
int mshv_region_share(struct mshv_mem_region *region);
319+
int mshv_region_unshare(struct mshv_mem_region *region);
320+
int mshv_region_map(struct mshv_mem_region *region);
321+
void mshv_region_invalidate(struct mshv_mem_region *region);
322+
int mshv_region_pin(struct mshv_mem_region *region);
323+
void mshv_region_destroy(struct mshv_mem_region *region);
324+
315325
#endif /* _MSHV_ROOT_H_ */

0 commit comments

Comments
 (0)