Skip to content

Commit 3005f6f

Browse files
Ricardo Kolleroupton
authored andcommitted
KVM: arm64: Open-code kvm_mmu_write_protect_pt_masked()
Move the functionality of kvm_mmu_write_protect_pt_masked() into its caller, kvm_arch_mmu_enable_log_dirty_pt_masked(). This will be used in a subsequent commit in order to share some of the code in kvm_arch_mmu_enable_log_dirty_pt_masked(). Signed-off-by: Ricardo Koller <ricarkol@google.com> Reviewed-by: Gavin Shan <gshan@redhat.com> Link: https://lore.kernel.org/r/20230426172330.1439644-11-ricarkol@google.com Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
1 parent e7bf7a4 commit 3005f6f

1 file changed

Lines changed: 15 additions & 27 deletions

File tree

arch/arm64/kvm/mmu.c

Lines changed: 15 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -1078,28 +1078,6 @@ static void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
10781078
kvm_flush_remote_tlbs(kvm);
10791079
}
10801080

1081-
/**
1082-
* kvm_mmu_write_protect_pt_masked() - write protect dirty pages
1083-
* @kvm: The KVM pointer
1084-
* @slot: The memory slot associated with mask
1085-
* @gfn_offset: The gfn offset in memory slot
1086-
* @mask: The mask of dirty pages at offset 'gfn_offset' in this memory
1087-
* slot to be write protected
1088-
*
1089-
* Walks bits set in mask write protects the associated pte's. Caller must
1090-
* acquire kvm_mmu_lock.
1091-
*/
1092-
static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
1093-
struct kvm_memory_slot *slot,
1094-
gfn_t gfn_offset, unsigned long mask)
1095-
{
1096-
phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
1097-
phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT;
1098-
phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
1099-
1100-
stage2_wp_range(&kvm->arch.mmu, start, end);
1101-
}
1102-
11031081
/**
11041082
* kvm_mmu_split_memory_region() - split the stage 2 blocks into PAGE_SIZE
11051083
* pages for memory slot
@@ -1129,17 +1107,27 @@ static void kvm_mmu_split_memory_region(struct kvm *kvm, int slot)
11291107
}
11301108

11311109
/*
1132-
* kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
1133-
* dirty pages.
1110+
* kvm_arch_mmu_enable_log_dirty_pt_masked() - enable dirty logging for selected pages.
1111+
* @kvm: The KVM pointer
1112+
* @slot: The memory slot associated with mask
1113+
* @gfn_offset: The gfn offset in memory slot
1114+
* @mask: The mask of pages at offset 'gfn_offset' in this memory
1115+
* slot to enable dirty logging on
11341116
*
1135-
* It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
1136-
* enable dirty logging for them.
1117+
* Writes protect selected pages to enable dirty logging for them. Caller must
1118+
* acquire kvm->mmu_lock.
11371119
*/
11381120
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
11391121
struct kvm_memory_slot *slot,
11401122
gfn_t gfn_offset, unsigned long mask)
11411123
{
1142-
kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
1124+
phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
1125+
phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT;
1126+
phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
1127+
1128+
lockdep_assert_held_write(&kvm->mmu_lock);
1129+
1130+
stage2_wp_range(&kvm->arch.mmu, start, end);
11431131
}
11441132

11451133
static void kvm_send_hwpoison_signal(unsigned long address, short lsb)

0 commit comments

Comments
 (0)