@@ -1195,14 +1195,6 @@ static bool access_arch_timer(struct kvm_vcpu *vcpu,
11951195 return true;
11961196}
11971197
1198- static u8 vcpu_pmuver (const struct kvm_vcpu * vcpu )
1199- {
1200- if (kvm_vcpu_has_pmu (vcpu ))
1201- return vcpu -> kvm -> arch .dfr0_pmuver ;
1202-
1203- return 0 ;
1204- }
1205-
12061198static s64 kvm_arm64_ftr_safe_value (u32 id , const struct arm64_ftr_bits * ftrp ,
12071199 s64 new , s64 cur )
12081200{
@@ -1288,19 +1280,6 @@ static int arm64_check_features(struct kvm_vcpu *vcpu,
12881280 return 0 ;
12891281}
12901282
1291- static u8 perfmon_to_pmuver (u8 perfmon )
1292- {
1293- switch (perfmon ) {
1294- case ID_DFR0_EL1_PerfMon_PMUv3 :
1295- return ID_AA64DFR0_EL1_PMUVer_IMP ;
1296- case ID_DFR0_EL1_PerfMon_IMPDEF :
1297- return ID_AA64DFR0_EL1_PMUVer_IMP_DEF ;
1298- default :
1299- /* Anything ARMv8.1+ and NI have the same value. For now. */
1300- return perfmon ;
1301- }
1302- }
1303-
13041283static u8 pmuver_to_perfmon (u8 pmuver )
13051284{
13061285 switch (pmuver ) {
@@ -1327,19 +1306,6 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
13271306 val = read_sanitised_ftr_reg (id );
13281307
13291308 switch (id ) {
1330- case SYS_ID_AA64PFR0_EL1 :
1331- if (!vcpu_has_sve (vcpu ))
1332- val &= ~ARM64_FEATURE_MASK (ID_AA64PFR0_EL1_SVE );
1333- val &= ~ARM64_FEATURE_MASK (ID_AA64PFR0_EL1_AMU );
1334- val &= ~ARM64_FEATURE_MASK (ID_AA64PFR0_EL1_CSV2 );
1335- val |= FIELD_PREP (ARM64_FEATURE_MASK (ID_AA64PFR0_EL1_CSV2 ), (u64 )vcpu -> kvm -> arch .pfr0_csv2 );
1336- val &= ~ARM64_FEATURE_MASK (ID_AA64PFR0_EL1_CSV3 );
1337- val |= FIELD_PREP (ARM64_FEATURE_MASK (ID_AA64PFR0_EL1_CSV3 ), (u64 )vcpu -> kvm -> arch .pfr0_csv3 );
1338- if (kvm_vgic_global_state .type == VGIC_V3 ) {
1339- val &= ~ARM64_FEATURE_MASK (ID_AA64PFR0_EL1_GIC );
1340- val |= FIELD_PREP (ARM64_FEATURE_MASK (ID_AA64PFR0_EL1_GIC ), 1 );
1341- }
1342- break ;
13431309 case SYS_ID_AA64PFR1_EL1 :
13441310 if (!kvm_has_mte (vcpu -> kvm ))
13451311 val &= ~ARM64_FEATURE_MASK (ID_AA64PFR1_EL1_MTE );
@@ -1360,22 +1326,6 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
13601326 if (!cpus_have_final_cap (ARM64_HAS_WFXT ))
13611327 val &= ~ARM64_FEATURE_MASK (ID_AA64ISAR2_EL1_WFxT );
13621328 break ;
1363- case SYS_ID_AA64DFR0_EL1 :
1364- /* Limit debug to ARMv8.0 */
1365- val &= ~ARM64_FEATURE_MASK (ID_AA64DFR0_EL1_DebugVer );
1366- val |= FIELD_PREP (ARM64_FEATURE_MASK (ID_AA64DFR0_EL1_DebugVer ), 6 );
1367- /* Set PMUver to the required version */
1368- val &= ~ARM64_FEATURE_MASK (ID_AA64DFR0_EL1_PMUVer );
1369- val |= FIELD_PREP (ARM64_FEATURE_MASK (ID_AA64DFR0_EL1_PMUVer ),
1370- vcpu_pmuver (vcpu ));
1371- /* Hide SPE from guests */
1372- val &= ~ARM64_FEATURE_MASK (ID_AA64DFR0_EL1_PMSVer );
1373- break ;
1374- case SYS_ID_DFR0_EL1 :
1375- val &= ~ARM64_FEATURE_MASK (ID_DFR0_EL1_PerfMon );
1376- val |= FIELD_PREP (ARM64_FEATURE_MASK (ID_DFR0_EL1_PerfMon ),
1377- pmuver_to_perfmon (vcpu_pmuver (vcpu )));
1378- break ;
13791329 case SYS_ID_AA64MMFR2_EL1 :
13801330 val &= ~ID_AA64MMFR2_EL1_CCIDX_MASK ;
13811331 break ;
@@ -1505,26 +1455,6 @@ static u64 read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
15051455 return val ;
15061456}
15071457
1508- static int set_id_aa64pfr0_el1 (struct kvm_vcpu * vcpu ,
1509- const struct sys_reg_desc * rd ,
1510- u64 val )
1511- {
1512- u8 csv2 , csv3 ;
1513- int r ;
1514-
1515- csv2 = cpuid_feature_extract_unsigned_field (val , ID_AA64PFR0_EL1_CSV2_SHIFT );
1516- csv3 = cpuid_feature_extract_unsigned_field (val , ID_AA64PFR0_EL1_CSV3_SHIFT );
1517-
1518- r = set_id_reg (vcpu , rd , val );
1519- if (r )
1520- return r ;
1521-
1522- vcpu -> kvm -> arch .pfr0_csv2 = csv2 ;
1523- vcpu -> kvm -> arch .pfr0_csv3 = csv3 ;
1524-
1525- return 0 ;
1526- }
1527-
15281458static u64 read_sanitised_id_aa64dfr0_el1 (struct kvm_vcpu * vcpu ,
15291459 const struct sys_reg_desc * rd )
15301460{
@@ -1553,7 +1483,6 @@ static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
15531483 u64 val )
15541484{
15551485 u8 pmuver = SYS_FIELD_GET (ID_AA64DFR0_EL1 , PMUVer , val );
1556- int r ;
15571486
15581487 /*
15591488 * Prior to commit 3d0dba5764b9 ("KVM: arm64: PMU: Move the
@@ -1569,17 +1498,10 @@ static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
15691498 * surprising than an ill-guided PMU driver poking at impdef system
15701499 * registers that end in an UNDEF...
15711500 */
1572- if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF ) {
1501+ if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF )
15731502 val &= ~ID_AA64DFR0_EL1_PMUVer_MASK ;
1574- pmuver = 0 ;
1575- }
15761503
1577- r = set_id_reg (vcpu , rd , val );
1578- if (r )
1579- return r ;
1580-
1581- vcpu -> kvm -> arch .dfr0_pmuver = pmuver ;
1582- return 0 ;
1504+ return set_id_reg (vcpu , rd , val );
15831505}
15841506
15851507static u64 read_sanitised_id_dfr0_el1 (struct kvm_vcpu * vcpu ,
@@ -1600,7 +1522,6 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
16001522 u64 val )
16011523{
16021524 u8 perfmon = SYS_FIELD_GET (ID_DFR0_EL1 , PerfMon , val );
1603- int r ;
16041525
16051526 if (perfmon == ID_DFR0_EL1_PerfMon_IMPDEF ) {
16061527 val &= ~ID_DFR0_EL1_PerfMon_MASK ;
@@ -1616,12 +1537,7 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
16161537 if (perfmon != 0 && perfmon < ID_DFR0_EL1_PerfMon_PMUv3 )
16171538 return - EINVAL ;
16181539
1619- r = set_id_reg (vcpu , rd , val );
1620- if (r )
1621- return r ;
1622-
1623- vcpu -> kvm -> arch .dfr0_pmuver = perfmon_to_pmuver (perfmon );
1624- return 0 ;
1540+ return set_id_reg (vcpu , rd , val );
16251541}
16261542
16271543/*
@@ -2076,7 +1992,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
20761992 { SYS_DESC (SYS_ID_AA64PFR0_EL1 ),
20771993 .access = access_id_reg ,
20781994 .get_user = get_id_reg ,
2079- .set_user = set_id_aa64pfr0_el1 ,
1995+ .set_user = set_id_reg ,
20801996 .reset = read_sanitised_id_aa64pfr0_el1 ,
20811997 .val = ID_AA64PFR0_EL1_CSV2_MASK | ID_AA64PFR0_EL1_CSV3_MASK , },
20821998 ID_SANITISED (ID_AA64PFR1_EL1 ),
0 commit comments