Skip to content

Commit 7c350ea

Browse files
Quentin PerretMarc Zyngier
authored andcommitted
KVM: arm64: Unify MMIO and mem host stage-2 pools
We currently maintain two separate memory pools for the host stage-2, one for pages used in the page-table when mapping memory regions, and the other to map MMIO regions. The former is large enough to map all of memory with page granularity and the latter can cover an arbitrary portion of IPA space, but allows to 'recycle' pages. However, this split makes accounting difficult to manage as pages at intermediate levels of the page-table may be used to map both memory and MMIO regions. Simplify the scheme by merging both pools into one. This means we can now hit the -ENOMEM case in the memory abort path, but we're still guaranteed forward-progress in the worst case by unmapping MMIO regions. On the plus side this also means we can usually map a lot more MMIO space at once if memory ranges happen to be mapped with block mappings. Signed-off-by: Quentin Perret <qperret@google.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20210608114518.748712-5-qperret@google.com
1 parent 914cde5 commit 7c350ea

5 files changed

Lines changed: 32 additions & 48 deletions

File tree

arch/arm64/kvm/hyp/include/nvhe/mem_protect.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ extern struct host_kvm host_kvm;
2323
int __pkvm_prot_finalize(void);
2424
int __pkvm_mark_hyp(phys_addr_t start, phys_addr_t end);
2525

26-
int kvm_host_prepare_stage2(void *mem_pgt_pool, void *dev_pgt_pool);
26+
int kvm_host_prepare_stage2(void *pgt_pool_base);
2727
void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt);
2828

2929
static __always_inline void __load_host_stage2(void)

arch/arm64/kvm/hyp/include/nvhe/mm.h

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -78,19 +78,20 @@ static inline unsigned long hyp_s1_pgtable_pages(void)
7878
return res;
7979
}
8080

81-
static inline unsigned long host_s2_mem_pgtable_pages(void)
81+
static inline unsigned long host_s2_pgtable_pages(void)
8282
{
83+
unsigned long res;
84+
8385
/*
8486
* Include an extra 16 pages to safely upper-bound the worst case of
8587
* concatenated pgds.
8688
*/
87-
return __hyp_pgtable_total_pages() + 16;
88-
}
89+
res = __hyp_pgtable_total_pages() + 16;
8990

90-
static inline unsigned long host_s2_dev_pgtable_pages(void)
91-
{
9291
/* Allow 1 GiB for MMIO mappings */
93-
return __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
92+
res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
93+
94+
return res;
9495
}
9596

9697
#endif /* __KVM_HYP_MM_H */

arch/arm64/kvm/hyp/nvhe/mem_protect.c

Lines changed: 18 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -23,8 +23,7 @@
2323
extern unsigned long hyp_nr_cpus;
2424
struct host_kvm host_kvm;
2525

26-
static struct hyp_pool host_s2_mem;
27-
static struct hyp_pool host_s2_dev;
26+
static struct hyp_pool host_s2_pool;
2827

2928
/*
3029
* Copies of the host's CPU features registers holding sanitized values.
@@ -36,28 +35,22 @@ static const u8 pkvm_hyp_id = 1;
3635

3736
static void *host_s2_zalloc_pages_exact(size_t size)
3837
{
39-
return hyp_alloc_pages(&host_s2_mem, get_order(size));
38+
return hyp_alloc_pages(&host_s2_pool, get_order(size));
4039
}
4140

4241
static void *host_s2_zalloc_page(void *pool)
4342
{
4443
return hyp_alloc_pages(pool, 0);
4544
}
4645

47-
static int prepare_s2_pools(void *mem_pgt_pool, void *dev_pgt_pool)
46+
static int prepare_s2_pool(void *pgt_pool_base)
4847
{
4948
unsigned long nr_pages, pfn;
5049
int ret;
5150

52-
pfn = hyp_virt_to_pfn(mem_pgt_pool);
53-
nr_pages = host_s2_mem_pgtable_pages();
54-
ret = hyp_pool_init(&host_s2_mem, pfn, nr_pages, 0);
55-
if (ret)
56-
return ret;
57-
58-
pfn = hyp_virt_to_pfn(dev_pgt_pool);
59-
nr_pages = host_s2_dev_pgtable_pages();
60-
ret = hyp_pool_init(&host_s2_dev, pfn, nr_pages, 0);
51+
pfn = hyp_virt_to_pfn(pgt_pool_base);
52+
nr_pages = host_s2_pgtable_pages();
53+
ret = hyp_pool_init(&host_s2_pool, pfn, nr_pages, 0);
6154
if (ret)
6255
return ret;
6356

@@ -86,15 +79,15 @@ static void prepare_host_vtcr(void)
8679
id_aa64mmfr1_el1_sys_val, phys_shift);
8780
}
8881

89-
int kvm_host_prepare_stage2(void *mem_pgt_pool, void *dev_pgt_pool)
82+
int kvm_host_prepare_stage2(void *pgt_pool_base)
9083
{
9184
struct kvm_s2_mmu *mmu = &host_kvm.arch.mmu;
9285
int ret;
9386

9487
prepare_host_vtcr();
9588
hyp_spin_lock_init(&host_kvm.lock);
9689

97-
ret = prepare_s2_pools(mem_pgt_pool, dev_pgt_pool);
90+
ret = prepare_s2_pool(pgt_pool_base);
9891
if (ret)
9992
return ret;
10093

@@ -199,19 +192,17 @@ static bool range_is_memory(u64 start, u64 end)
199192
}
200193

201194
static inline int __host_stage2_idmap(u64 start, u64 end,
202-
enum kvm_pgtable_prot prot,
203-
struct hyp_pool *pool)
195+
enum kvm_pgtable_prot prot)
204196
{
205197
return kvm_pgtable_stage2_map(&host_kvm.pgt, start, end - start, start,
206-
prot, pool);
198+
prot, &host_s2_pool);
207199
}
208200

209201
static int host_stage2_idmap(u64 addr)
210202
{
211203
enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W;
212204
struct kvm_mem_range range;
213205
bool is_memory = find_mem_range(addr, &range);
214-
struct hyp_pool *pool = is_memory ? &host_s2_mem : &host_s2_dev;
215206
int ret;
216207

217208
if (is_memory)
@@ -222,22 +213,21 @@ static int host_stage2_idmap(u64 addr)
222213
if (ret)
223214
goto unlock;
224215

225-
ret = __host_stage2_idmap(range.start, range.end, prot, pool);
226-
if (is_memory || ret != -ENOMEM)
216+
ret = __host_stage2_idmap(range.start, range.end, prot);
217+
if (ret != -ENOMEM)
227218
goto unlock;
228219

229220
/*
230-
* host_s2_mem has been provided with enough pages to cover all of
231-
* memory with page granularity, so we should never hit the ENOMEM case.
232-
* However, it is difficult to know how much of the MMIO range we will
233-
* need to cover upfront, so we may need to 'recycle' the pages if we
234-
* run out.
221+
* The pool has been provided with enough pages to cover all of memory
222+
* with page granularity, but it is difficult to know how much of the
223+
* MMIO range we will need to cover upfront, so we may need to 'recycle'
224+
* the pages if we run out.
235225
*/
236226
ret = host_stage2_unmap_dev_all();
237227
if (ret)
238228
goto unlock;
239229

240-
ret = __host_stage2_idmap(range.start, range.end, prot, pool);
230+
ret = __host_stage2_idmap(range.start, range.end, prot);
241231

242232
unlock:
243233
hyp_spin_unlock(&host_kvm.lock);
@@ -258,7 +248,7 @@ int __pkvm_mark_hyp(phys_addr_t start, phys_addr_t end)
258248

259249
hyp_spin_lock(&host_kvm.lock);
260250
ret = kvm_pgtable_stage2_set_owner(&host_kvm.pgt, start, end - start,
261-
&host_s2_mem, pkvm_hyp_id);
251+
&host_s2_pool, pkvm_hyp_id);
262252
hyp_spin_unlock(&host_kvm.lock);
263253

264254
return ret != -EAGAIN ? ret : 0;

arch/arm64/kvm/hyp/nvhe/setup.c

Lines changed: 5 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,7 @@ unsigned long hyp_nr_cpus;
2424

2525
static void *vmemmap_base;
2626
static void *hyp_pgt_base;
27-
static void *host_s2_mem_pgt_base;
28-
static void *host_s2_dev_pgt_base;
27+
static void *host_s2_pgt_base;
2928
static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;
3029

3130
static int divide_memory_pool(void *virt, unsigned long size)
@@ -45,14 +44,9 @@ static int divide_memory_pool(void *virt, unsigned long size)
4544
if (!hyp_pgt_base)
4645
return -ENOMEM;
4746

48-
nr_pages = host_s2_mem_pgtable_pages();
49-
host_s2_mem_pgt_base = hyp_early_alloc_contig(nr_pages);
50-
if (!host_s2_mem_pgt_base)
51-
return -ENOMEM;
52-
53-
nr_pages = host_s2_dev_pgtable_pages();
54-
host_s2_dev_pgt_base = hyp_early_alloc_contig(nr_pages);
55-
if (!host_s2_dev_pgt_base)
47+
nr_pages = host_s2_pgtable_pages();
48+
host_s2_pgt_base = hyp_early_alloc_contig(nr_pages);
49+
if (!host_s2_pgt_base)
5650
return -ENOMEM;
5751

5852
return 0;
@@ -158,7 +152,7 @@ void __noreturn __pkvm_init_finalise(void)
158152
if (ret)
159153
goto out;
160154

161-
ret = kvm_host_prepare_stage2(host_s2_mem_pgt_base, host_s2_dev_pgt_base);
155+
ret = kvm_host_prepare_stage2(host_s2_pgt_base);
162156
if (ret)
163157
goto out;
164158

arch/arm64/kvm/hyp/reserved_mem.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -71,8 +71,7 @@ void __init kvm_hyp_reserve(void)
7171
}
7272

7373
hyp_mem_pages += hyp_s1_pgtable_pages();
74-
hyp_mem_pages += host_s2_mem_pgtable_pages();
75-
hyp_mem_pages += host_s2_dev_pgtable_pages();
74+
hyp_mem_pages += host_s2_pgtable_pages();
7675

7776
/*
7877
* The hyp_vmemmap needs to be backed by pages, but these pages

0 commit comments

Comments
 (0)