Skip to content

Commit fd6f17b

Browse files
Keqian ZhuMarc Zyngier
authored andcommitted
KVM: arm64: Remove the creation time's mapping of MMIO regions
The MMIO regions may be unmapped for many reasons and can be remapped by stage2 fault path. Map MMIO regions at creation time becomes a minor optimization and makes these two mapping path hard to sync. Remove the mapping code while keep the useful sanity check. Signed-off-by: Keqian Zhu <zhukeqian1@huawei.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20210507110322.23348-2-zhukeqian1@huawei.com
1 parent 8124c8a commit fd6f17b

1 file changed

Lines changed: 3 additions & 35 deletions

File tree

arch/arm64/kvm/mmu.c

Lines changed: 3 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -1346,7 +1346,6 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
13461346
{
13471347
hva_t hva = mem->userspace_addr;
13481348
hva_t reg_end = hva + mem->memory_size;
1349-
bool writable = !(mem->flags & KVM_MEM_READONLY);
13501349
int ret = 0;
13511350

13521351
if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
@@ -1363,8 +1362,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
13631362
mmap_read_lock(current->mm);
13641363
/*
13651364
* A memory region could potentially cover multiple VMAs, and any holes
1366-
* between them, so iterate over all of them to find out if we can map
1367-
* any of them right now.
1365+
* between them, so iterate over all of them.
13681366
*
13691367
* +--------------------------------------------+
13701368
* +---------------+----------------+ +----------------+
@@ -1375,51 +1373,21 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
13751373
*/
13761374
do {
13771375
struct vm_area_struct *vma;
1378-
hva_t vm_start, vm_end;
13791376

13801377
vma = find_vma_intersection(current->mm, hva, reg_end);
13811378
if (!vma)
13821379
break;
13831380

1384-
/*
1385-
* Take the intersection of this VMA with the memory region
1386-
*/
1387-
vm_start = max(hva, vma->vm_start);
1388-
vm_end = min(reg_end, vma->vm_end);
1389-
13901381
if (vma->vm_flags & VM_PFNMAP) {
1391-
gpa_t gpa = mem->guest_phys_addr +
1392-
(vm_start - mem->userspace_addr);
1393-
phys_addr_t pa;
1394-
1395-
pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
1396-
pa += vm_start - vma->vm_start;
1397-
13981382
/* IO region dirty page logging not allowed */
13991383
if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
14001384
ret = -EINVAL;
1401-
goto out;
1402-
}
1403-
1404-
ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
1405-
vm_end - vm_start,
1406-
writable);
1407-
if (ret)
14081385
break;
1386+
}
14091387
}
1410-
hva = vm_end;
1388+
hva = min(reg_end, vma->vm_end);
14111389
} while (hva < reg_end);
14121390

1413-
if (change == KVM_MR_FLAGS_ONLY)
1414-
goto out;
1415-
1416-
spin_lock(&kvm->mmu_lock);
1417-
if (ret)
1418-
unmap_stage2_range(&kvm->arch.mmu, mem->guest_phys_addr, mem->memory_size);
1419-
else if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
1420-
stage2_flush_memslot(kvm, memslot);
1421-
spin_unlock(&kvm->mmu_lock);
1422-
out:
14231391
mmap_read_unlock(current->mm);
14241392
return ret;
14251393
}

0 commit comments

Comments
 (0)