2323extern unsigned long hyp_nr_cpus ;
2424struct host_kvm host_kvm ;
2525
26- static struct hyp_pool host_s2_mem ;
27- static struct hyp_pool host_s2_dev ;
26+ static struct hyp_pool host_s2_pool ;
2827
2928/*
3029 * Copies of the host's CPU features registers holding sanitized values.
@@ -36,28 +35,32 @@ static const u8 pkvm_hyp_id = 1;
3635
3736static void * host_s2_zalloc_pages_exact (size_t size )
3837{
39- return hyp_alloc_pages (& host_s2_mem , get_order (size ));
38+ return hyp_alloc_pages (& host_s2_pool , get_order (size ));
4039}
4140
4241static void * host_s2_zalloc_page (void * pool )
4342{
4443 return hyp_alloc_pages (pool , 0 );
4544}
4645
47- static int prepare_s2_pools (void * mem_pgt_pool , void * dev_pgt_pool )
46+ static void host_s2_get_page (void * addr )
47+ {
48+ hyp_get_page (& host_s2_pool , addr );
49+ }
50+
51+ static void host_s2_put_page (void * addr )
52+ {
53+ hyp_put_page (& host_s2_pool , addr );
54+ }
55+
56+ static int prepare_s2_pool (void * pgt_pool_base )
4857{
4958 unsigned long nr_pages , pfn ;
5059 int ret ;
5160
52- pfn = hyp_virt_to_pfn (mem_pgt_pool );
53- nr_pages = host_s2_mem_pgtable_pages ();
54- ret = hyp_pool_init (& host_s2_mem , pfn , nr_pages , 0 );
55- if (ret )
56- return ret ;
57-
58- pfn = hyp_virt_to_pfn (dev_pgt_pool );
59- nr_pages = host_s2_dev_pgtable_pages ();
60- ret = hyp_pool_init (& host_s2_dev , pfn , nr_pages , 0 );
61+ pfn = hyp_virt_to_pfn (pgt_pool_base );
62+ nr_pages = host_s2_pgtable_pages ();
63+ ret = hyp_pool_init (& host_s2_pool , pfn , nr_pages , 0 );
6164 if (ret )
6265 return ret ;
6366
@@ -67,8 +70,8 @@ static int prepare_s2_pools(void *mem_pgt_pool, void *dev_pgt_pool)
6770 .phys_to_virt = hyp_phys_to_virt ,
6871 .virt_to_phys = hyp_virt_to_phys ,
6972 .page_count = hyp_page_count ,
70- .get_page = hyp_get_page ,
71- .put_page = hyp_put_page ,
73+ .get_page = host_s2_get_page ,
74+ .put_page = host_s2_put_page ,
7275 };
7376
7477 return 0 ;
@@ -86,15 +89,15 @@ static void prepare_host_vtcr(void)
8689 id_aa64mmfr1_el1_sys_val , phys_shift );
8790}
8891
89- int kvm_host_prepare_stage2 (void * mem_pgt_pool , void * dev_pgt_pool )
92+ int kvm_host_prepare_stage2 (void * pgt_pool_base )
9093{
9194 struct kvm_s2_mmu * mmu = & host_kvm .arch .mmu ;
9295 int ret ;
9396
9497 prepare_host_vtcr ();
9598 hyp_spin_lock_init (& host_kvm .lock );
9699
97- ret = prepare_s2_pools ( mem_pgt_pool , dev_pgt_pool );
100+ ret = prepare_s2_pool ( pgt_pool_base );
98101 if (ret )
99102 return ret ;
100103
@@ -199,19 +202,17 @@ static bool range_is_memory(u64 start, u64 end)
199202}
200203
201204static inline int __host_stage2_idmap (u64 start , u64 end ,
202- enum kvm_pgtable_prot prot ,
203- struct hyp_pool * pool )
205+ enum kvm_pgtable_prot prot )
204206{
205207 return kvm_pgtable_stage2_map (& host_kvm .pgt , start , end - start , start ,
206- prot , pool );
208+ prot , & host_s2_pool );
207209}
208210
209211static int host_stage2_idmap (u64 addr )
210212{
211213 enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W ;
212214 struct kvm_mem_range range ;
213215 bool is_memory = find_mem_range (addr , & range );
214- struct hyp_pool * pool = is_memory ? & host_s2_mem : & host_s2_dev ;
215216 int ret ;
216217
217218 if (is_memory )
@@ -222,22 +223,21 @@ static int host_stage2_idmap(u64 addr)
222223 if (ret )
223224 goto unlock ;
224225
225- ret = __host_stage2_idmap (range .start , range .end , prot , pool );
226- if (is_memory || ret != - ENOMEM )
226+ ret = __host_stage2_idmap (range .start , range .end , prot );
227+ if (ret != - ENOMEM )
227228 goto unlock ;
228229
229230 /*
230- * host_s2_mem has been provided with enough pages to cover all of
231- * memory with page granularity, so we should never hit the ENOMEM case.
232- * However, it is difficult to know how much of the MMIO range we will
233- * need to cover upfront, so we may need to 'recycle' the pages if we
234- * run out.
231+ * The pool has been provided with enough pages to cover all of memory
232+ * with page granularity, but it is difficult to know how much of the
233+ * MMIO range we will need to cover upfront, so we may need to 'recycle'
234+ * the pages if we run out.
235235 */
236236 ret = host_stage2_unmap_dev_all ();
237237 if (ret )
238238 goto unlock ;
239239
240- ret = __host_stage2_idmap (range .start , range .end , prot , pool );
240+ ret = __host_stage2_idmap (range .start , range .end , prot );
241241
242242unlock :
243243 hyp_spin_unlock (& host_kvm .lock );
@@ -258,7 +258,7 @@ int __pkvm_mark_hyp(phys_addr_t start, phys_addr_t end)
258258
259259 hyp_spin_lock (& host_kvm .lock );
260260 ret = kvm_pgtable_stage2_set_owner (& host_kvm .pgt , start , end - start ,
261- & host_s2_mem , pkvm_hyp_id );
261+ & host_s2_pool , pkvm_hyp_id );
262262 hyp_spin_unlock (& host_kvm .lock );
263263
264264 return ret != - EAGAIN ? ret : 0 ;
0 commit comments