Skip to content

Commit 9c30fc6

Browse files
Fuad TabbaMarc Zyngier
authored andcommitted
KVM: arm64: Move setting the page as dirty out of the critical section
Move the unlock earlier in user_mem_abort() to shorten the critical section. This also helps for future refactoring and reuse of similar code. This moves out marking the page as dirty outside of the critical section. That code does not interact with the stage-2 page tables, which the read lock in the critical section protects. Signed-off-by: Fuad Tabba <tabba@google.com> Acked-by: Oliver Upton <oliver.upton@linux.dev> Link: https://lore.kernel.org/r/20240423150538.2103045-16-tabba@google.com Signed-off-by: Marc Zyngier <maz@kernel.org>
1 parent cc81b6d commit 9c30fc6

1 file changed

Lines changed: 5 additions & 3 deletions

File tree

arch/arm64/kvm/mmu.c

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1522,8 +1522,10 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
15221522

15231523
read_lock(&kvm->mmu_lock);
15241524
pgt = vcpu->arch.hw_mmu->pgt;
1525-
if (mmu_invalidate_retry(kvm, mmu_seq))
1525+
if (mmu_invalidate_retry(kvm, mmu_seq)) {
1526+
ret = -EAGAIN;
15261527
goto out_unlock;
1528+
}
15271529

15281530
/*
15291531
* If we are not forced to use page mapping, check if we are
@@ -1581,15 +1583,15 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
15811583
memcache,
15821584
KVM_PGTABLE_WALK_HANDLE_FAULT |
15831585
KVM_PGTABLE_WALK_SHARED);
1586+
out_unlock:
1587+
read_unlock(&kvm->mmu_lock);
15841588

15851589
/* Mark the page dirty only if the fault is handled successfully */
15861590
if (writable && !ret) {
15871591
kvm_set_pfn_dirty(pfn);
15881592
mark_page_dirty_in_slot(kvm, memslot, gfn);
15891593
}
15901594

1591-
out_unlock:
1592-
read_unlock(&kvm->mmu_lock);
15931595
kvm_release_pfn_clean(pfn);
15941596
return ret != -EAGAIN ? ret : 0;
15951597
}

0 commit comments

Comments
 (0)