@@ -343,8 +343,9 @@ static void gstage_wp_memory_region(struct kvm *kvm, int slot)
343343 kvm_flush_remote_tlbs (kvm );
344344}
345345
346- static int gstage_ioremap (struct kvm * kvm , gpa_t gpa , phys_addr_t hpa ,
347- unsigned long size , bool writable )
346+ int kvm_riscv_gstage_ioremap (struct kvm * kvm , gpa_t gpa ,
347+ phys_addr_t hpa , unsigned long size ,
348+ bool writable , bool in_atomic )
348349{
349350 pte_t pte ;
350351 int ret = 0 ;
@@ -353,13 +354,14 @@ static int gstage_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,
353354 struct kvm_mmu_memory_cache pcache ;
354355
355356 memset (& pcache , 0 , sizeof (pcache ));
357+ pcache .gfp_custom = (in_atomic ) ? GFP_ATOMIC | __GFP_ACCOUNT : 0 ;
356358 pcache .gfp_zero = __GFP_ZERO ;
357359
358360 end = (gpa + size + PAGE_SIZE - 1 ) & PAGE_MASK ;
359361 pfn = __phys_to_pfn (hpa );
360362
361363 for (addr = gpa ; addr < end ; addr += PAGE_SIZE ) {
362- pte = pfn_pte (pfn , PAGE_KERNEL );
364+ pte = pfn_pte (pfn , PAGE_KERNEL_IO );
363365
364366 if (!writable )
365367 pte = pte_wrprotect (pte );
@@ -382,6 +384,13 @@ static int gstage_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,
382384 return ret ;
383385}
384386
387+ void kvm_riscv_gstage_iounmap (struct kvm * kvm , gpa_t gpa , unsigned long size )
388+ {
389+ spin_lock (& kvm -> mmu_lock );
390+ gstage_unmap_range (kvm , gpa , size , false);
391+ spin_unlock (& kvm -> mmu_lock );
392+ }
393+
385394void kvm_arch_mmu_enable_log_dirty_pt_masked (struct kvm * kvm ,
386395 struct kvm_memory_slot * slot ,
387396 gfn_t gfn_offset ,
@@ -517,8 +526,9 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
517526 goto out ;
518527 }
519528
520- ret = gstage_ioremap (kvm , gpa , pa ,
521- vm_end - vm_start , writable );
529+ ret = kvm_riscv_gstage_ioremap (kvm , gpa , pa ,
530+ vm_end - vm_start ,
531+ writable , false);
522532 if (ret )
523533 break ;
524534 }
@@ -611,7 +621,7 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
611621{
612622 int ret ;
613623 kvm_pfn_t hfn ;
614- bool writeable ;
624+ bool writable ;
615625 short vma_pageshift ;
616626 gfn_t gfn = gpa >> PAGE_SHIFT ;
617627 struct vm_area_struct * vma ;
@@ -659,7 +669,7 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
659669
660670 mmu_seq = kvm -> mmu_notifier_seq ;
661671
662- hfn = gfn_to_pfn_prot (kvm , gfn , is_write , & writeable );
672+ hfn = gfn_to_pfn_prot (kvm , gfn , is_write , & writable );
663673 if (hfn == KVM_PFN_ERR_HWPOISON ) {
664674 send_sig_mceerr (BUS_MCEERR_AR , (void __user * )hva ,
665675 vma_pageshift , current );
@@ -673,14 +683,14 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
673683 * for write faults.
674684 */
675685 if (logging && !is_write )
676- writeable = false;
686+ writable = false;
677687
678688 spin_lock (& kvm -> mmu_lock );
679689
680690 if (mmu_notifier_retry (kvm , mmu_seq ))
681691 goto out_unlock ;
682692
683- if (writeable ) {
693+ if (writable ) {
684694 kvm_set_pfn_dirty (hfn );
685695 mark_page_dirty (kvm , gfn );
686696 ret = gstage_map_page (kvm , pcache , gpa , hfn << PAGE_SHIFT ,
0 commit comments