2525#define IA32_MTRR_DEF_TYPE_FE (1ULL << 10)
2626#define IA32_MTRR_DEF_TYPE_TYPE_MASK (0xff)
2727
28+ static bool is_mtrr_base_msr (unsigned int msr )
29+ {
30+ /* MTRR base MSRs use even numbers, masks use odd numbers. */
31+ return !(msr & 0x1 );
32+ }
33+
34+ static struct kvm_mtrr_range * var_mtrr_msr_to_range (struct kvm_vcpu * vcpu ,
35+ unsigned int msr )
36+ {
37+ int index = (msr - MTRRphysBase_MSR (0 )) / 2 ;
38+
39+ return & vcpu -> arch .mtrr_state .var_ranges [index ];
40+ }
41+
2842static bool msr_mtrr_valid (unsigned msr )
2943{
3044 switch (msr ) {
31- case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1 :
45+ case MTRRphysBase_MSR ( 0 ) ... MTRRphysMask_MSR ( KVM_NR_VAR_MTRR - 1 ) :
3246 case MSR_MTRRfix64K_00000 :
3347 case MSR_MTRRfix16K_80000 :
3448 case MSR_MTRRfix16K_A0000 :
@@ -41,7 +55,6 @@ static bool msr_mtrr_valid(unsigned msr)
4155 case MSR_MTRRfix4K_F0000 :
4256 case MSR_MTRRfix4K_F8000 :
4357 case MSR_MTRRdefType :
44- case MSR_IA32_CR_PAT :
4558 return true;
4659 }
4760 return false;
@@ -52,17 +65,15 @@ static bool valid_mtrr_type(unsigned t)
5265 return t < 8 && (1 << t ) & 0x73 ; /* 0, 1, 4, 5, 6 */
5366}
5467
55- bool kvm_mtrr_valid (struct kvm_vcpu * vcpu , u32 msr , u64 data )
68+ static bool kvm_mtrr_valid (struct kvm_vcpu * vcpu , u32 msr , u64 data )
5669{
5770 int i ;
5871 u64 mask ;
5972
6073 if (!msr_mtrr_valid (msr ))
6174 return false;
6275
63- if (msr == MSR_IA32_CR_PAT ) {
64- return kvm_pat_valid (data );
65- } else if (msr == MSR_MTRRdefType ) {
76+ if (msr == MSR_MTRRdefType ) {
6677 if (data & ~0xcff )
6778 return false;
6879 return valid_mtrr_type (data & 0xff );
@@ -74,7 +85,8 @@ bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
7485 }
7586
7687 /* variable MTRRs */
77- WARN_ON (!(msr >= 0x200 && msr < 0x200 + 2 * KVM_NR_VAR_MTRR ));
88+ WARN_ON (!(msr >= MTRRphysBase_MSR (0 ) &&
89+ msr <= MTRRphysMask_MSR (KVM_NR_VAR_MTRR - 1 )));
7890
7991 mask = kvm_vcpu_reserved_gpa_bits_raw (vcpu );
8092 if ((msr & 1 ) == 0 ) {
@@ -88,7 +100,6 @@ bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
88100
89101 return (data & mask ) == 0 ;
90102}
91- EXPORT_SYMBOL_GPL (kvm_mtrr_valid );
92103
93104static bool mtrr_is_enabled (struct kvm_mtrr * mtrr_state )
94105{
@@ -308,10 +319,8 @@ static void update_mtrr(struct kvm_vcpu *vcpu, u32 msr)
308319{
309320 struct kvm_mtrr * mtrr_state = & vcpu -> arch .mtrr_state ;
310321 gfn_t start , end ;
311- int index ;
312322
313- if (msr == MSR_IA32_CR_PAT || !tdp_enabled ||
314- !kvm_arch_has_noncoherent_dma (vcpu -> kvm ))
323+ if (!tdp_enabled || !kvm_arch_has_noncoherent_dma (vcpu -> kvm ))
315324 return ;
316325
317326 if (!mtrr_is_enabled (mtrr_state ) && msr != MSR_MTRRdefType )
@@ -326,8 +335,7 @@ static void update_mtrr(struct kvm_vcpu *vcpu, u32 msr)
326335 end = ~0ULL ;
327336 } else {
328337 /* variable range MTRRs. */
329- index = (msr - 0x200 ) / 2 ;
330- var_mtrr_range (& mtrr_state -> var_ranges [index ], & start , & end );
338+ var_mtrr_range (var_mtrr_msr_to_range (vcpu , msr ), & start , & end );
331339 }
332340
333341 kvm_zap_gfn_range (vcpu -> kvm , gpa_to_gfn (start ), gpa_to_gfn (end ));
@@ -342,21 +350,18 @@ static void set_var_mtrr_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
342350{
343351 struct kvm_mtrr * mtrr_state = & vcpu -> arch .mtrr_state ;
344352 struct kvm_mtrr_range * tmp , * cur ;
345- int index , is_mtrr_mask ;
346353
347- index = (msr - 0x200 ) / 2 ;
348- is_mtrr_mask = msr - 0x200 - 2 * index ;
349- cur = & mtrr_state -> var_ranges [index ];
354+ cur = var_mtrr_msr_to_range (vcpu , msr );
350355
351356 /* remove the entry if it's in the list. */
352357 if (var_mtrr_range_is_valid (cur ))
353- list_del (& mtrr_state -> var_ranges [ index ]. node );
358+ list_del (& cur -> node );
354359
355360 /*
356361 * Set all illegal GPA bits in the mask, since those bits must
357362 * implicitly be 0. The bits are then cleared when reading them.
358363 */
359- if (! is_mtrr_mask )
364+ if (is_mtrr_base_msr ( msr ) )
360365 cur -> base = data ;
361366 else
362367 cur -> mask = data | kvm_vcpu_reserved_gpa_bits_raw (vcpu );
@@ -382,8 +387,6 @@ int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
382387 * (u64 * )& vcpu -> arch .mtrr_state .fixed_ranges [index ] = data ;
383388 else if (msr == MSR_MTRRdefType )
384389 vcpu -> arch .mtrr_state .deftype = data ;
385- else if (msr == MSR_IA32_CR_PAT )
386- vcpu -> arch .pat = data ;
387390 else
388391 set_var_mtrr_msr (vcpu , msr , data );
389392
@@ -411,21 +414,16 @@ int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
411414 return 1 ;
412415
413416 index = fixed_msr_to_range_index (msr );
414- if (index >= 0 )
417+ if (index >= 0 ) {
415418 * pdata = * (u64 * )& vcpu -> arch .mtrr_state .fixed_ranges [index ];
416- else if (msr == MSR_MTRRdefType )
419+ } else if (msr == MSR_MTRRdefType ) {
417420 * pdata = vcpu -> arch .mtrr_state .deftype ;
418- else if (msr == MSR_IA32_CR_PAT )
419- * pdata = vcpu -> arch .pat ;
420- else { /* Variable MTRRs */
421- int is_mtrr_mask ;
422-
423- index = (msr - 0x200 ) / 2 ;
424- is_mtrr_mask = msr - 0x200 - 2 * index ;
425- if (!is_mtrr_mask )
426- * pdata = vcpu -> arch .mtrr_state .var_ranges [index ].base ;
421+ } else {
422+ /* Variable MTRRs */
423+ if (is_mtrr_base_msr (msr ))
424+ * pdata = var_mtrr_msr_to_range (vcpu , msr )-> base ;
427425 else
428- * pdata = vcpu -> arch . mtrr_state . var_ranges [ index ]. mask ;
426+ * pdata = var_mtrr_msr_to_range ( vcpu , msr ) -> mask ;
429427
430428 * pdata &= ~kvm_vcpu_reserved_gpa_bits_raw (vcpu );
431429 }
0 commit comments