Skip to content

Commit cfbdc54

Browse files
Fuad TabbaMarc Zyngier
authored andcommitted
KVM: arm64: Rename __tlb_switch_to_{guest,host}() in VHE
Rename __tlb_switch_to_{guest,host}() to {enter,exit}_vmid_context() in VHE code to maintain symmetry between the nVHE and VHE TLB invalidations. No functional change intended. Suggested-by: Oliver Upton <oliver.upton@linux.dev> Signed-off-by: Fuad Tabba <tabba@google.com> Acked-by: Oliver Upton <oliver.upton@linux.dev> Link: https://lore.kernel.org/r/20240423150538.2103045-11-tabba@google.com Signed-off-by: Marc Zyngier <maz@kernel.org>
1 parent 58f3b0f commit cfbdc54

1 file changed

Lines changed: 13 additions & 13 deletions

File tree

  • arch/arm64/kvm/hyp/vhe

arch/arm64/kvm/hyp/vhe/tlb.c

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -17,8 +17,8 @@ struct tlb_inv_context {
1717
u64 sctlr;
1818
};
1919

20-
static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
21-
struct tlb_inv_context *cxt)
20+
static void enter_vmid_context(struct kvm_s2_mmu *mmu,
21+
struct tlb_inv_context *cxt)
2222
{
2323
struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
2424
u64 val;
@@ -67,7 +67,7 @@ static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
6767
isb();
6868
}
6969

70-
static void __tlb_switch_to_host(struct tlb_inv_context *cxt)
70+
static void exit_vmid_context(struct tlb_inv_context *cxt)
7171
{
7272
/*
7373
* We're done with the TLB operation, let's restore the host's
@@ -97,7 +97,7 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
9797
dsb(ishst);
9898

9999
/* Switch to requested VMID */
100-
__tlb_switch_to_guest(mmu, &cxt);
100+
enter_vmid_context(mmu, &cxt);
101101

102102
/*
103103
* We could do so much better if we had the VA as well.
@@ -118,7 +118,7 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
118118
dsb(ish);
119119
isb();
120120

121-
__tlb_switch_to_host(&cxt);
121+
exit_vmid_context(&cxt);
122122
}
123123

124124
void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
@@ -129,7 +129,7 @@ void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
129129
dsb(nshst);
130130

131131
/* Switch to requested VMID */
132-
__tlb_switch_to_guest(mmu, &cxt);
132+
enter_vmid_context(mmu, &cxt);
133133

134134
/*
135135
* We could do so much better if we had the VA as well.
@@ -150,7 +150,7 @@ void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
150150
dsb(nsh);
151151
isb();
152152

153-
__tlb_switch_to_host(&cxt);
153+
exit_vmid_context(&cxt);
154154
}
155155

156156
void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
@@ -169,7 +169,7 @@ void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
169169
dsb(ishst);
170170

171171
/* Switch to requested VMID */
172-
__tlb_switch_to_guest(mmu, &cxt);
172+
enter_vmid_context(mmu, &cxt);
173173

174174
__flush_s2_tlb_range_op(ipas2e1is, start, pages, stride, 0);
175175

@@ -178,7 +178,7 @@ void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
178178
dsb(ish);
179179
isb();
180180

181-
__tlb_switch_to_host(&cxt);
181+
exit_vmid_context(&cxt);
182182
}
183183

184184
void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
@@ -188,28 +188,28 @@ void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
188188
dsb(ishst);
189189

190190
/* Switch to requested VMID */
191-
__tlb_switch_to_guest(mmu, &cxt);
191+
enter_vmid_context(mmu, &cxt);
192192

193193
__tlbi(vmalls12e1is);
194194
dsb(ish);
195195
isb();
196196

197-
__tlb_switch_to_host(&cxt);
197+
exit_vmid_context(&cxt);
198198
}
199199

200200
void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
201201
{
202202
struct tlb_inv_context cxt;
203203

204204
/* Switch to requested VMID */
205-
__tlb_switch_to_guest(mmu, &cxt);
205+
enter_vmid_context(mmu, &cxt);
206206

207207
__tlbi(vmalle1);
208208
asm volatile("ic iallu");
209209
dsb(nsh);
210210
isb();
211211

212-
__tlb_switch_to_host(&cxt);
212+
exit_vmid_context(&cxt);
213213
}
214214

215215
void __kvm_flush_vm_context(void)

0 commit comments

Comments
 (0)