Skip to content

Commit 607475c

Browse files
Binbin Wusean-jc
authored andcommitted
KVM: x86: Add helpers to query individual CR0/CR4 bits
Add helpers to check if a specific CR0/CR4 bit is set to avoid a plethora of implicit casts from the "unsigned long" return of kvm_read_cr*_bits(), and to make each caller's intent more obvious. Defer converting helpers that do truly ugly casts from "unsigned long" to "int", e.g. is_pse(), to a future commit so that their conversion is more isolated. Opportunistically drop the superfluous pcid_enabled from kvm_set_cr3(); the local variable is used only once, immediately after its declaration. Suggested-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Binbin Wu <binbin.wu@linux.intel.com> Link: https://lore.kernel.org/r/20230322045824.22970-2-binbin.wu@linux.intel.com [sean: move "obvious" conversions to this commit, massage changelog] Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent 0c928ff commit 607475c

8 files changed

Lines changed: 35 additions & 21 deletions

File tree

arch/x86/kvm/cpuid.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -266,7 +266,7 @@ static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_e
266266
/* Update OSXSAVE bit */
267267
if (boot_cpu_has(X86_FEATURE_XSAVE))
268268
cpuid_entry_change(best, X86_FEATURE_OSXSAVE,
269-
kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE));
269+
kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE));
270270

271271
cpuid_entry_change(best, X86_FEATURE_APIC,
272272
vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE);
@@ -275,7 +275,7 @@ static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_e
275275
best = cpuid_entry2_find(entries, nent, 7, 0);
276276
if (best && boot_cpu_has(X86_FEATURE_PKU) && best->function == 0x7)
277277
cpuid_entry_change(best, X86_FEATURE_OSPKE,
278-
kvm_read_cr4_bits(vcpu, X86_CR4_PKE));
278+
kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE));
279279

280280
best = cpuid_entry2_find(entries, nent, 0xD, 0);
281281
if (best)

arch/x86/kvm/kvm_cache_regs.h

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -157,6 +157,14 @@ static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
157157
return vcpu->arch.cr0 & mask;
158158
}
159159

160+
static __always_inline bool kvm_is_cr0_bit_set(struct kvm_vcpu *vcpu,
161+
unsigned long cr0_bit)
162+
{
163+
BUILD_BUG_ON(!is_power_of_2(cr0_bit));
164+
165+
return !!kvm_read_cr0_bits(vcpu, cr0_bit);
166+
}
167+
160168
static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
161169
{
162170
return kvm_read_cr0_bits(vcpu, ~0UL);
@@ -171,6 +179,14 @@ static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
171179
return vcpu->arch.cr4 & mask;
172180
}
173181

182+
static __always_inline bool kvm_is_cr4_bit_set(struct kvm_vcpu *vcpu,
183+
unsigned long cr4_bit)
184+
{
185+
BUILD_BUG_ON(!is_power_of_2(cr4_bit));
186+
187+
return !!kvm_read_cr4_bits(vcpu, cr4_bit);
188+
}
189+
174190
static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
175191
{
176192
if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))

arch/x86/kvm/mmu.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -132,7 +132,7 @@ static inline unsigned long kvm_get_pcid(struct kvm_vcpu *vcpu, gpa_t cr3)
132132
{
133133
BUILD_BUG_ON((X86_CR3_PCID_MASK & PAGE_MASK) != 0);
134134

135-
return kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)
135+
return kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE)
136136
? cr3 & X86_CR3_PCID_MASK
137137
: 0;
138138
}

arch/x86/kvm/pmu.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -540,9 +540,9 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
540540
if (!pmc)
541541
return 1;
542542

543-
if (!(kvm_read_cr4_bits(vcpu, X86_CR4_PCE)) &&
543+
if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_PCE) &&
544544
(static_call(kvm_x86_get_cpl)(vcpu) != 0) &&
545-
(kvm_read_cr0_bits(vcpu, X86_CR0_PE)))
545+
kvm_is_cr0_bit_set(vcpu, X86_CR0_PE))
546546
return 1;
547547

548548
*data = pmc_read_counter(pmc) & mask;

arch/x86/kvm/vmx/nested.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5154,7 +5154,7 @@ static int handle_vmxon(struct kvm_vcpu *vcpu)
51545154
* does force CR0.PE=1, but only to also force VM86 in order to emulate
51555155
* Real Mode, and so there's no need to check CR0.PE manually.
51565156
*/
5157-
if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) {
5157+
if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_VMXE)) {
51585158
kvm_queue_exception(vcpu, UD_VECTOR);
51595159
return 1;
51605160
}

arch/x86/kvm/vmx/vmx.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5180,7 +5180,7 @@ bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu)
51805180
if (!boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT))
51815181
return true;
51825182

5183-
return vmx_get_cpl(vcpu) == 3 && kvm_read_cr0_bits(vcpu, X86_CR0_AM) &&
5183+
return vmx_get_cpl(vcpu) == 3 && kvm_is_cr0_bit_set(vcpu, X86_CR0_AM) &&
51845184
(kvm_get_rflags(vcpu) & X86_EFLAGS_AC);
51855185
}
51865186

arch/x86/kvm/x86.c

Lines changed: 10 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -841,7 +841,7 @@ bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
841841

842842
bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr)
843843
{
844-
if ((dr != 4 && dr != 5) || !kvm_read_cr4_bits(vcpu, X86_CR4_DE))
844+
if ((dr != 4 && dr != 5) || !kvm_is_cr4_bit_set(vcpu, X86_CR4_DE))
845845
return true;
846846

847847
kvm_queue_exception(vcpu, UD_VECTOR);
@@ -983,7 +983,7 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
983983
return 1;
984984

985985
if (!(cr0 & X86_CR0_PG) &&
986-
(is_64_bit_mode(vcpu) || kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)))
986+
(is_64_bit_mode(vcpu) || kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE)))
987987
return 1;
988988

989989
static_call(kvm_x86_set_cr0)(vcpu, cr0);
@@ -1005,7 +1005,7 @@ void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu)
10051005
if (vcpu->arch.guest_state_protected)
10061006
return;
10071007

1008-
if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) {
1008+
if (kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE)) {
10091009

10101010
if (vcpu->arch.xcr0 != host_xcr0)
10111011
xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
@@ -1019,7 +1019,7 @@ void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu)
10191019
if (static_cpu_has(X86_FEATURE_PKU) &&
10201020
vcpu->arch.pkru != vcpu->arch.host_pkru &&
10211021
((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) ||
1022-
kvm_read_cr4_bits(vcpu, X86_CR4_PKE)))
1022+
kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE)))
10231023
write_pkru(vcpu->arch.pkru);
10241024
#endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */
10251025
}
@@ -1033,14 +1033,14 @@ void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu)
10331033
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
10341034
if (static_cpu_has(X86_FEATURE_PKU) &&
10351035
((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) ||
1036-
kvm_read_cr4_bits(vcpu, X86_CR4_PKE))) {
1036+
kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE))) {
10371037
vcpu->arch.pkru = rdpkru();
10381038
if (vcpu->arch.pkru != vcpu->arch.host_pkru)
10391039
write_pkru(vcpu->arch.host_pkru);
10401040
}
10411041
#endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */
10421042

1043-
if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) {
1043+
if (kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE)) {
10441044

10451045
if (vcpu->arch.xcr0 != host_xcr0)
10461046
xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
@@ -1245,7 +1245,7 @@ static void kvm_invalidate_pcid(struct kvm_vcpu *vcpu, unsigned long pcid)
12451245
* PCIDs for them are also 0, because MOV to CR3 always flushes the TLB
12461246
* with PCIDE=0.
12471247
*/
1248-
if (!kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE))
1248+
if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE))
12491249
return;
12501250

12511251
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
@@ -1260,9 +1260,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
12601260
bool skip_tlb_flush = false;
12611261
unsigned long pcid = 0;
12621262
#ifdef CONFIG_X86_64
1263-
bool pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE);
1264-
1265-
if (pcid_enabled) {
1263+
if (kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE)) {
12661264
skip_tlb_flush = cr3 & X86_CR3_PCID_NOFLUSH;
12671265
cr3 &= ~X86_CR3_PCID_NOFLUSH;
12681266
pcid = cr3 & X86_CR3_PCID_MASK;
@@ -5051,7 +5049,7 @@ static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
50515049
return 0;
50525050
if (mce->status & MCI_STATUS_UC) {
50535051
if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
5054-
!kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
5052+
!kvm_is_cr4_bit_set(vcpu, X86_CR4_MCE)) {
50555053
kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
50565054
return 0;
50575055
}
@@ -13254,7 +13252,7 @@ int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva)
1325413252
return 1;
1325513253
}
1325613254

13257-
pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE);
13255+
pcid_enabled = kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE);
1325813256

1325913257
switch (type) {
1326013258
case INVPCID_TYPE_INDIV_ADDR:

arch/x86/kvm/x86.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -123,7 +123,7 @@ static inline bool kvm_exception_is_soft(unsigned int nr)
123123

124124
static inline bool is_protmode(struct kvm_vcpu *vcpu)
125125
{
126-
return kvm_read_cr0_bits(vcpu, X86_CR0_PE);
126+
return kvm_is_cr0_bit_set(vcpu, X86_CR0_PE);
127127
}
128128

129129
static inline int is_long_mode(struct kvm_vcpu *vcpu)
@@ -193,7 +193,7 @@ static inline bool is_pae_paging(struct kvm_vcpu *vcpu)
193193

194194
static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu)
195195
{
196-
return kvm_read_cr4_bits(vcpu, X86_CR4_LA57) ? 57 : 48;
196+
return kvm_is_cr4_bit_set(vcpu, X86_CR4_LA57) ? 57 : 48;
197197
}
198198

199199
static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu)

0 commit comments

Comments
 (0)