4242 */
4343
4444static u64 sys_reg_to_index (const struct sys_reg_desc * reg );
45+ static int set_id_reg (struct kvm_vcpu * vcpu , const struct sys_reg_desc * rd ,
46+ u64 val );
4547
4648static bool read_from_write_only (struct kvm_vcpu * vcpu ,
4749 struct sys_reg_params * params ,
@@ -1503,15 +1505,35 @@ static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
15031505 return 0 ;
15041506}
15051507
1508+ static u64 read_sanitised_id_aa64dfr0_el1 (struct kvm_vcpu * vcpu ,
1509+ const struct sys_reg_desc * rd )
1510+ {
1511+ u64 val = read_sanitised_ftr_reg (SYS_ID_AA64DFR0_EL1 );
1512+
1513+ /* Limit debug to ARMv8.0 */
1514+ val &= ~ID_AA64DFR0_EL1_DebugVer_MASK ;
1515+ val |= SYS_FIELD_PREP_ENUM (ID_AA64DFR0_EL1 , DebugVer , IMP );
1516+
1517+ /*
1518+ * Only initialize the PMU version if the vCPU was configured with one.
1519+ */
1520+ val &= ~ID_AA64DFR0_EL1_PMUVer_MASK ;
1521+ if (kvm_vcpu_has_pmu (vcpu ))
1522+ val |= SYS_FIELD_PREP (ID_AA64DFR0_EL1 , PMUVer ,
1523+ kvm_arm_pmu_get_pmuver_limit ());
1524+
1525+ /* Hide SPE from guests */
1526+ val &= ~ID_AA64DFR0_EL1_PMSVer_MASK ;
1527+
1528+ return val ;
1529+ }
1530+
15061531static int set_id_aa64dfr0_el1 (struct kvm_vcpu * vcpu ,
15071532 const struct sys_reg_desc * rd ,
15081533 u64 val )
15091534{
1510- u8 pmuver , host_pmuver ;
1511- bool valid_pmu ;
1512-
1513- host_pmuver = kvm_arm_pmu_get_pmuver_limit ();
1514- pmuver = SYS_FIELD_GET (ID_AA64DFR0_EL1 , PMUVer , val );
1535+ u8 pmuver = SYS_FIELD_GET (ID_AA64DFR0_EL1 , PMUVer , val );
1536+ int r ;
15151537
15161538 /*
15171539 * Prior to commit 3d0dba5764b9 ("KVM: arm64: PMU: Move the
@@ -1532,38 +1554,33 @@ static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
15321554 pmuver = 0 ;
15331555 }
15341556
1535- /*
1536- * Allow AA64DFR0_EL1.PMUver to be set from userspace as long
1537- * as it doesn't promise more than what the HW gives us.
1538- */
1539- if (pmuver > host_pmuver )
1540- return - EINVAL ;
1557+ r = set_id_reg (vcpu , rd , val );
1558+ if (r )
1559+ return r ;
15411560
1542- valid_pmu = pmuver ;
1561+ vcpu -> kvm -> arch .dfr0_pmuver = pmuver ;
1562+ return 0 ;
1563+ }
15431564
1544- /* Make sure view register and PMU support do match */
1545- if (kvm_vcpu_has_pmu (vcpu ) != valid_pmu )
1546- return - EINVAL ;
1565+ static u64 read_sanitised_id_dfr0_el1 (struct kvm_vcpu * vcpu ,
1566+ const struct sys_reg_desc * rd )
1567+ {
1568+ u8 perfmon = pmuver_to_perfmon (kvm_arm_pmu_get_pmuver_limit ());
1569+ u64 val = read_sanitised_ftr_reg (SYS_ID_DFR0_EL1 );
15471570
1548- /* We can only differ with PMUver, and anything else is an error */
1549- val ^= read_id_reg (vcpu , rd );
1550- val &= ~ARM64_FEATURE_MASK (ID_AA64DFR0_EL1_PMUVer );
1551- if (val )
1552- return - EINVAL ;
1571+ val &= ~ID_DFR0_EL1_PerfMon_MASK ;
1572+ if (kvm_vcpu_has_pmu (vcpu ))
1573+ val |= SYS_FIELD_PREP (ID_DFR0_EL1 , PerfMon , perfmon );
15531574
1554- vcpu -> kvm -> arch .dfr0_pmuver = pmuver ;
1555- return 0 ;
1575+ return val ;
15561576}
15571577
15581578static int set_id_dfr0_el1 (struct kvm_vcpu * vcpu ,
15591579 const struct sys_reg_desc * rd ,
15601580 u64 val )
15611581{
1562- u8 perfmon , host_perfmon ;
1563- bool valid_pmu ;
1564-
1565- host_perfmon = pmuver_to_perfmon (kvm_arm_pmu_get_pmuver_limit ());
1566- perfmon = SYS_FIELD_GET (ID_DFR0_EL1 , PerfMon , val );
1582+ u8 perfmon = SYS_FIELD_GET (ID_DFR0_EL1 , PerfMon , val );
1583+ int r ;
15671584
15681585 if (perfmon == ID_DFR0_EL1_PerfMon_IMPDEF ) {
15691586 val &= ~ID_DFR0_EL1_PerfMon_MASK ;
@@ -1576,21 +1593,12 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
15761593 * AArch64 side (as everything is emulated with that), and
15771594 * that this is a PMUv3.
15781595 */
1579- if (perfmon > host_perfmon ||
1580- (perfmon != 0 && perfmon < ID_DFR0_EL1_PerfMon_PMUv3 ))
1581- return - EINVAL ;
1582-
1583- valid_pmu = perfmon ;
1584-
1585- /* Make sure view register and PMU support do match */
1586- if (kvm_vcpu_has_pmu (vcpu ) != valid_pmu )
1596+ if (perfmon != 0 && perfmon < ID_DFR0_EL1_PerfMon_PMUv3 )
15871597 return - EINVAL ;
15881598
1589- /* We can only differ with PerfMon, and anything else is an error */
1590- val ^= read_id_reg (vcpu , rd );
1591- val &= ~ARM64_FEATURE_MASK (ID_DFR0_EL1_PerfMon );
1592- if (val )
1593- return - EINVAL ;
1599+ r = set_id_reg (vcpu , rd , val );
1600+ if (r )
1601+ return r ;
15941602
15951603 vcpu -> kvm -> arch .dfr0_pmuver = perfmon_to_pmuver (perfmon );
15961604 return 0 ;
@@ -1998,9 +2006,13 @@ static const struct sys_reg_desc sys_reg_descs[] = {
19982006 /* CRm=1 */
19992007 AA32_ID_SANITISED (ID_PFR0_EL1 ),
20002008 AA32_ID_SANITISED (ID_PFR1_EL1 ),
2001- { SYS_DESC (SYS_ID_DFR0_EL1 ), .access = access_id_reg ,
2002- .get_user = get_id_reg , .set_user = set_id_dfr0_el1 ,
2003- .visibility = aa32_id_visibility , },
2009+ { SYS_DESC (SYS_ID_DFR0_EL1 ),
2010+ .access = access_id_reg ,
2011+ .get_user = get_id_reg ,
2012+ .set_user = set_id_dfr0_el1 ,
2013+ .visibility = aa32_id_visibility ,
2014+ .reset = read_sanitised_id_dfr0_el1 ,
2015+ .val = ID_DFR0_EL1_PerfMon_MASK , },
20042016 ID_HIDDEN (ID_AFR0_EL1 ),
20052017 AA32_ID_SANITISED (ID_MMFR0_EL1 ),
20062018 AA32_ID_SANITISED (ID_MMFR1_EL1 ),
@@ -2040,8 +2052,12 @@ static const struct sys_reg_desc sys_reg_descs[] = {
20402052 ID_UNALLOCATED (4 ,7 ),
20412053
20422054 /* CRm=5 */
2043- { SYS_DESC (SYS_ID_AA64DFR0_EL1 ), .access = access_id_reg ,
2044- .get_user = get_id_reg , .set_user = set_id_aa64dfr0_el1 , },
2055+ { SYS_DESC (SYS_ID_AA64DFR0_EL1 ),
2056+ .access = access_id_reg ,
2057+ .get_user = get_id_reg ,
2058+ .set_user = set_id_aa64dfr0_el1 ,
2059+ .reset = read_sanitised_id_aa64dfr0_el1 ,
2060+ .val = ID_AA64DFR0_EL1_PMUVer_MASK , },
20452061 ID_SANITISED (ID_AA64DFR1_EL1 ),
20462062 ID_UNALLOCATED (5 ,2 ),
20472063 ID_UNALLOCATED (5 ,3 ),
0 commit comments