Skip to content

Commit 46c8862

Browse files
author
Marc Zyngier
committed
Merge branch kvm-arm64/mmu/reduce-vmemmap-overhead into kvmarm-master/next
Host stage-2 optimisations from Quentin Perret * kvm-arm64/mmu/reduce-vmemmap-overhead: KVM: arm64: Use less bits for hyp_page refcount KVM: arm64: Use less bits for hyp_page order KVM: arm64: Remove hyp_pool pointer from struct hyp_page KVM: arm64: Unify MMIO and mem host stage-2 pools KVM: arm64: Remove list_head from hyp_page KVM: arm64: Use refcount at hyp to check page availability KVM: arm64: Move hyp_pool locking out of refcount helpers
2 parents 32ab5a5 + 6929586 commit 46c8862

8 files changed

Lines changed: 145 additions & 127 deletions

File tree

arch/arm64/kvm/hyp/include/nvhe/gfp.h

Lines changed: 5 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
#include <nvhe/memory.h>
88
#include <nvhe/spinlock.h>
99

10-
#define HYP_NO_ORDER UINT_MAX
10+
#define HYP_NO_ORDER USHRT_MAX
1111

1212
struct hyp_pool {
1313
/*
@@ -19,48 +19,13 @@ struct hyp_pool {
1919
struct list_head free_area[MAX_ORDER];
2020
phys_addr_t range_start;
2121
phys_addr_t range_end;
22-
unsigned int max_order;
22+
unsigned short max_order;
2323
};
2424

25-
static inline void hyp_page_ref_inc(struct hyp_page *p)
26-
{
27-
struct hyp_pool *pool = hyp_page_to_pool(p);
28-
29-
hyp_spin_lock(&pool->lock);
30-
p->refcount++;
31-
hyp_spin_unlock(&pool->lock);
32-
}
33-
34-
static inline int hyp_page_ref_dec_and_test(struct hyp_page *p)
35-
{
36-
struct hyp_pool *pool = hyp_page_to_pool(p);
37-
int ret;
38-
39-
hyp_spin_lock(&pool->lock);
40-
p->refcount--;
41-
ret = (p->refcount == 0);
42-
hyp_spin_unlock(&pool->lock);
43-
44-
return ret;
45-
}
46-
47-
static inline void hyp_set_page_refcounted(struct hyp_page *p)
48-
{
49-
struct hyp_pool *pool = hyp_page_to_pool(p);
50-
51-
hyp_spin_lock(&pool->lock);
52-
if (p->refcount) {
53-
hyp_spin_unlock(&pool->lock);
54-
BUG();
55-
}
56-
p->refcount = 1;
57-
hyp_spin_unlock(&pool->lock);
58-
}
59-
6025
/* Allocation */
61-
void *hyp_alloc_pages(struct hyp_pool *pool, unsigned int order);
62-
void hyp_get_page(void *addr);
63-
void hyp_put_page(void *addr);
26+
void *hyp_alloc_pages(struct hyp_pool *pool, unsigned short order);
27+
void hyp_get_page(struct hyp_pool *pool, void *addr);
28+
void hyp_put_page(struct hyp_pool *pool, void *addr);
6429

6530
/* Used pages cannot be freed */
6631
int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages,

arch/arm64/kvm/hyp/include/nvhe/mem_protect.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ extern struct host_kvm host_kvm;
2323
int __pkvm_prot_finalize(void);
2424
int __pkvm_mark_hyp(phys_addr_t start, phys_addr_t end);
2525

26-
int kvm_host_prepare_stage2(void *mem_pgt_pool, void *dev_pgt_pool);
26+
int kvm_host_prepare_stage2(void *pgt_pool_base);
2727
void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt);
2828

2929
static __always_inline void __load_host_stage2(void)

arch/arm64/kvm/hyp/include/nvhe/memory.h

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -7,12 +7,9 @@
77

88
#include <linux/types.h>
99

10-
struct hyp_pool;
1110
struct hyp_page {
12-
unsigned int refcount;
13-
unsigned int order;
14-
struct hyp_pool *pool;
15-
struct list_head node;
11+
unsigned short refcount;
12+
unsigned short order;
1613
};
1714

1815
extern u64 __hyp_vmemmap;

arch/arm64/kvm/hyp/include/nvhe/mm.h

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -78,19 +78,20 @@ static inline unsigned long hyp_s1_pgtable_pages(void)
7878
return res;
7979
}
8080

81-
static inline unsigned long host_s2_mem_pgtable_pages(void)
81+
static inline unsigned long host_s2_pgtable_pages(void)
8282
{
83+
unsigned long res;
84+
8385
/*
8486
* Include an extra 16 pages to safely upper-bound the worst case of
8587
* concatenated pgds.
8688
*/
87-
return __hyp_pgtable_total_pages() + 16;
88-
}
89+
res = __hyp_pgtable_total_pages() + 16;
8990

90-
static inline unsigned long host_s2_dev_pgtable_pages(void)
91-
{
9291
/* Allow 1 GiB for MMIO mappings */
93-
return __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
92+
res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
93+
94+
return res;
9495
}
9596

9697
#endif /* __KVM_HYP_MM_H */

arch/arm64/kvm/hyp/nvhe/mem_protect.c

Lines changed: 30 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -23,8 +23,7 @@
2323
extern unsigned long hyp_nr_cpus;
2424
struct host_kvm host_kvm;
2525

26-
static struct hyp_pool host_s2_mem;
27-
static struct hyp_pool host_s2_dev;
26+
static struct hyp_pool host_s2_pool;
2827

2928
/*
3029
* Copies of the host's CPU features registers holding sanitized values.
@@ -36,28 +35,32 @@ static const u8 pkvm_hyp_id = 1;
3635

3736
static void *host_s2_zalloc_pages_exact(size_t size)
3837
{
39-
return hyp_alloc_pages(&host_s2_mem, get_order(size));
38+
return hyp_alloc_pages(&host_s2_pool, get_order(size));
4039
}
4140

4241
static void *host_s2_zalloc_page(void *pool)
4342
{
4443
return hyp_alloc_pages(pool, 0);
4544
}
4645

47-
static int prepare_s2_pools(void *mem_pgt_pool, void *dev_pgt_pool)
46+
static void host_s2_get_page(void *addr)
47+
{
48+
hyp_get_page(&host_s2_pool, addr);
49+
}
50+
51+
static void host_s2_put_page(void *addr)
52+
{
53+
hyp_put_page(&host_s2_pool, addr);
54+
}
55+
56+
static int prepare_s2_pool(void *pgt_pool_base)
4857
{
4958
unsigned long nr_pages, pfn;
5059
int ret;
5160

52-
pfn = hyp_virt_to_pfn(mem_pgt_pool);
53-
nr_pages = host_s2_mem_pgtable_pages();
54-
ret = hyp_pool_init(&host_s2_mem, pfn, nr_pages, 0);
55-
if (ret)
56-
return ret;
57-
58-
pfn = hyp_virt_to_pfn(dev_pgt_pool);
59-
nr_pages = host_s2_dev_pgtable_pages();
60-
ret = hyp_pool_init(&host_s2_dev, pfn, nr_pages, 0);
61+
pfn = hyp_virt_to_pfn(pgt_pool_base);
62+
nr_pages = host_s2_pgtable_pages();
63+
ret = hyp_pool_init(&host_s2_pool, pfn, nr_pages, 0);
6164
if (ret)
6265
return ret;
6366

@@ -67,8 +70,8 @@ static int prepare_s2_pools(void *mem_pgt_pool, void *dev_pgt_pool)
6770
.phys_to_virt = hyp_phys_to_virt,
6871
.virt_to_phys = hyp_virt_to_phys,
6972
.page_count = hyp_page_count,
70-
.get_page = hyp_get_page,
71-
.put_page = hyp_put_page,
73+
.get_page = host_s2_get_page,
74+
.put_page = host_s2_put_page,
7275
};
7376

7477
return 0;
@@ -86,15 +89,15 @@ static void prepare_host_vtcr(void)
8689
id_aa64mmfr1_el1_sys_val, phys_shift);
8790
}
8891

89-
int kvm_host_prepare_stage2(void *mem_pgt_pool, void *dev_pgt_pool)
92+
int kvm_host_prepare_stage2(void *pgt_pool_base)
9093
{
9194
struct kvm_s2_mmu *mmu = &host_kvm.arch.mmu;
9295
int ret;
9396

9497
prepare_host_vtcr();
9598
hyp_spin_lock_init(&host_kvm.lock);
9699

97-
ret = prepare_s2_pools(mem_pgt_pool, dev_pgt_pool);
100+
ret = prepare_s2_pool(pgt_pool_base);
98101
if (ret)
99102
return ret;
100103

@@ -199,19 +202,17 @@ static bool range_is_memory(u64 start, u64 end)
199202
}
200203

201204
static inline int __host_stage2_idmap(u64 start, u64 end,
202-
enum kvm_pgtable_prot prot,
203-
struct hyp_pool *pool)
205+
enum kvm_pgtable_prot prot)
204206
{
205207
return kvm_pgtable_stage2_map(&host_kvm.pgt, start, end - start, start,
206-
prot, pool);
208+
prot, &host_s2_pool);
207209
}
208210

209211
static int host_stage2_idmap(u64 addr)
210212
{
211213
enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W;
212214
struct kvm_mem_range range;
213215
bool is_memory = find_mem_range(addr, &range);
214-
struct hyp_pool *pool = is_memory ? &host_s2_mem : &host_s2_dev;
215216
int ret;
216217

217218
if (is_memory)
@@ -222,22 +223,21 @@ static int host_stage2_idmap(u64 addr)
222223
if (ret)
223224
goto unlock;
224225

225-
ret = __host_stage2_idmap(range.start, range.end, prot, pool);
226-
if (is_memory || ret != -ENOMEM)
226+
ret = __host_stage2_idmap(range.start, range.end, prot);
227+
if (ret != -ENOMEM)
227228
goto unlock;
228229

229230
/*
230-
* host_s2_mem has been provided with enough pages to cover all of
231-
* memory with page granularity, so we should never hit the ENOMEM case.
232-
* However, it is difficult to know how much of the MMIO range we will
233-
* need to cover upfront, so we may need to 'recycle' the pages if we
234-
* run out.
231+
* The pool has been provided with enough pages to cover all of memory
232+
* with page granularity, but it is difficult to know how much of the
233+
* MMIO range we will need to cover upfront, so we may need to 'recycle'
234+
* the pages if we run out.
235235
*/
236236
ret = host_stage2_unmap_dev_all();
237237
if (ret)
238238
goto unlock;
239239

240-
ret = __host_stage2_idmap(range.start, range.end, prot, pool);
240+
ret = __host_stage2_idmap(range.start, range.end, prot);
241241

242242
unlock:
243243
hyp_spin_unlock(&host_kvm.lock);
@@ -258,7 +258,7 @@ int __pkvm_mark_hyp(phys_addr_t start, phys_addr_t end)
258258

259259
hyp_spin_lock(&host_kvm.lock);
260260
ret = kvm_pgtable_stage2_set_owner(&host_kvm.pgt, start, end - start,
261-
&host_s2_mem, pkvm_hyp_id);
261+
&host_s2_pool, pkvm_hyp_id);
262262
hyp_spin_unlock(&host_kvm.lock);
263263

264264
return ret != -EAGAIN ? ret : 0;

0 commit comments

Comments
 (0)