@@ -135,18 +135,18 @@ static void gstage_remote_tlb_flush(struct kvm *kvm, u32 level, gpa_t addr)
135135 kvm_riscv_hfence_gvma_vmid_gpa (kvm , -1UL , 0 , addr , BIT (order ), order );
136136}
137137
138- static int gstage_set_pte (struct kvm * kvm , u32 level ,
139- struct kvm_mmu_memory_cache * pcache ,
140- gpa_t addr , const pte_t * new_pte )
138+ static int gstage_set_pte (struct kvm * kvm ,
139+ struct kvm_mmu_memory_cache * pcache ,
140+ const struct kvm_gstage_mapping * map )
141141{
142142 u32 current_level = gstage_pgd_levels - 1 ;
143143 pte_t * next_ptep = (pte_t * )kvm -> arch .pgd ;
144- pte_t * ptep = & next_ptep [gstage_pte_index (addr , current_level )];
144+ pte_t * ptep = & next_ptep [gstage_pte_index (map -> addr , current_level )];
145145
146- if (current_level < level )
146+ if (current_level < map -> level )
147147 return - EINVAL ;
148148
149- while (current_level != level ) {
149+ while (current_level != map -> level ) {
150150 if (gstage_pte_leaf (ptep ))
151151 return - EEXIST ;
152152
@@ -165,13 +165,13 @@ static int gstage_set_pte(struct kvm *kvm, u32 level,
165165 }
166166
167167 current_level -- ;
168- ptep = & next_ptep [gstage_pte_index (addr , current_level )];
168+ ptep = & next_ptep [gstage_pte_index (map -> addr , current_level )];
169169 }
170170
171- if (pte_val (* ptep ) != pte_val (* new_pte )) {
172- set_pte (ptep , * new_pte );
171+ if (pte_val (* ptep ) != pte_val (map -> pte )) {
172+ set_pte (ptep , map -> pte );
173173 if (gstage_pte_leaf (ptep ))
174- gstage_remote_tlb_flush (kvm , current_level , addr );
174+ gstage_remote_tlb_flush (kvm , current_level , map -> addr );
175175 }
176176
177177 return 0 ;
@@ -181,14 +181,16 @@ static int gstage_map_page(struct kvm *kvm,
181181 struct kvm_mmu_memory_cache * pcache ,
182182 gpa_t gpa , phys_addr_t hpa ,
183183 unsigned long page_size ,
184- bool page_rdonly , bool page_exec )
184+ bool page_rdonly , bool page_exec ,
185+ struct kvm_gstage_mapping * out_map )
185186{
186- int ret ;
187- u32 level = 0 ;
188- pte_t new_pte ;
189187 pgprot_t prot ;
188+ int ret ;
190189
191- ret = gstage_page_size_to_level (page_size , & level );
190+ out_map -> addr = gpa ;
191+ out_map -> level = 0 ;
192+
193+ ret = gstage_page_size_to_level (page_size , & out_map -> level );
192194 if (ret )
193195 return ret ;
194196
@@ -216,10 +218,10 @@ static int gstage_map_page(struct kvm *kvm,
216218 else
217219 prot = PAGE_WRITE ;
218220 }
219- new_pte = pfn_pte (PFN_DOWN (hpa ), prot );
220- new_pte = pte_mkdirty (new_pte );
221+ out_map -> pte = pfn_pte (PFN_DOWN (hpa ), prot );
222+ out_map -> pte = pte_mkdirty (out_map -> pte );
221223
222- return gstage_set_pte (kvm , level , pcache , gpa , & new_pte );
224+ return gstage_set_pte (kvm , pcache , out_map );
223225}
224226
225227enum gstage_op {
@@ -352,30 +354,32 @@ int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa,
352354 phys_addr_t hpa , unsigned long size ,
353355 bool writable , bool in_atomic )
354356{
355- pte_t pte ;
356357 int ret = 0 ;
357358 unsigned long pfn ;
358359 phys_addr_t addr , end ;
359360 struct kvm_mmu_memory_cache pcache = {
360361 .gfp_custom = (in_atomic ) ? GFP_ATOMIC | __GFP_ACCOUNT : 0 ,
361362 .gfp_zero = __GFP_ZERO ,
362363 };
364+ struct kvm_gstage_mapping map ;
363365
364366 end = (gpa + size + PAGE_SIZE - 1 ) & PAGE_MASK ;
365367 pfn = __phys_to_pfn (hpa );
366368
367369 for (addr = gpa ; addr < end ; addr += PAGE_SIZE ) {
368- pte = pfn_pte (pfn , PAGE_KERNEL_IO );
370+ map .addr = addr ;
371+ map .pte = pfn_pte (pfn , PAGE_KERNEL_IO );
372+ map .level = 0 ;
369373
370374 if (!writable )
371- pte = pte_wrprotect (pte );
375+ map . pte = pte_wrprotect (map . pte );
372376
373377 ret = kvm_mmu_topup_memory_cache (& pcache , gstage_pgd_levels );
374378 if (ret )
375379 goto out ;
376380
377381 spin_lock (& kvm -> mmu_lock );
378- ret = gstage_set_pte (kvm , 0 , & pcache , addr , & pte );
382+ ret = gstage_set_pte (kvm , & pcache , & map );
379383 spin_unlock (& kvm -> mmu_lock );
380384 if (ret )
381385 goto out ;
@@ -593,7 +597,8 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
593597
594598int kvm_riscv_gstage_map (struct kvm_vcpu * vcpu ,
595599 struct kvm_memory_slot * memslot ,
596- gpa_t gpa , unsigned long hva , bool is_write )
600+ gpa_t gpa , unsigned long hva , bool is_write ,
601+ struct kvm_gstage_mapping * out_map )
597602{
598603 int ret ;
599604 kvm_pfn_t hfn ;
@@ -608,6 +613,9 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
608613 unsigned long vma_pagesize , mmu_seq ;
609614 struct page * page ;
610615
616+ /* Setup initial state of output mapping */
617+ memset (out_map , 0 , sizeof (* out_map ));
618+
611619 /* We need minimum second+third level pages */
612620 ret = kvm_mmu_topup_memory_cache (pcache , gstage_pgd_levels );
613621 if (ret ) {
@@ -677,10 +685,10 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
677685 if (writable ) {
678686 mark_page_dirty (kvm , gfn );
679687 ret = gstage_map_page (kvm , pcache , gpa , hfn << PAGE_SHIFT ,
680- vma_pagesize , false, true);
688+ vma_pagesize , false, true, out_map );
681689 } else {
682690 ret = gstage_map_page (kvm , pcache , gpa , hfn << PAGE_SHIFT ,
683- vma_pagesize , true, true);
691+ vma_pagesize , true, true, out_map );
684692 }
685693
686694 if (ret )
0 commit comments