Skip to content

Commit def8c22

Browse files
murzinvwilldeacon
authored andcommitted
arm64: Add support of PAuth QARMA3 architected algorithm
QARMA3 is relaxed version of the QARMA5 algorithm which expected to reduce the latency of calculation while still delivering a suitable level of security. Support for QARMA3 can be discovered via ID_AA64ISAR2_EL1 APA3, bits [15:12] Indicates whether the QARMA3 algorithm is implemented in the PE for address authentication in AArch64 state. GPA3, bits [11:8] Indicates whether the QARMA3 algorithm is implemented in the PE for generic code authentication in AArch64 state. Signed-off-by: Vladimir Murzin <vladimir.murzin@arm.com> Acked-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20220224124952.119612-4-vladimir.murzin@arm.com Signed-off-by: Will Deacon <will@kernel.org>
1 parent be3256a commit def8c22

11 files changed

Lines changed: 97 additions & 4 deletions

File tree

arch/arm64/include/asm/asm_pointer_auth.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,9 @@ alternative_else_nop_endif
6060
.macro __ptrauth_keys_init_cpu tsk, tmp1, tmp2, tmp3
6161
mrs \tmp1, id_aa64isar1_el1
6262
ubfx \tmp1, \tmp1, #ID_AA64ISAR1_APA_SHIFT, #8
63+
mrs_s \tmp2, SYS_ID_AA64ISAR2_EL1
64+
ubfx \tmp2, \tmp2, #ID_AA64ISAR2_APA3_SHIFT, #4
65+
orr \tmp1, \tmp1, \tmp2
6366
cbz \tmp1, .Lno_addr_auth\@
6467
mov_q \tmp1, (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \
6568
SCTLR_ELx_ENDA | SCTLR_ELx_ENDB)

arch/arm64/include/asm/cpufeature.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -854,6 +854,7 @@ static inline unsigned int get_vmid_bits(u64 mmfr1)
854854
extern struct arm64_ftr_override id_aa64mmfr1_override;
855855
extern struct arm64_ftr_override id_aa64pfr1_override;
856856
extern struct arm64_ftr_override id_aa64isar1_override;
857+
extern struct arm64_ftr_override id_aa64isar2_override;
857858

858859
u32 get_kvm_ipa_limit(void);
859860
void dump_cpu_features(void);

arch/arm64/include/asm/kvm_hyp.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -118,6 +118,7 @@ extern u64 kvm_nvhe_sym(id_aa64pfr0_el1_sys_val);
118118
extern u64 kvm_nvhe_sym(id_aa64pfr1_el1_sys_val);
119119
extern u64 kvm_nvhe_sym(id_aa64isar0_el1_sys_val);
120120
extern u64 kvm_nvhe_sym(id_aa64isar1_el1_sys_val);
121+
extern u64 kvm_nvhe_sym(id_aa64isar2_el1_sys_val);
121122
extern u64 kvm_nvhe_sym(id_aa64mmfr0_el1_sys_val);
122123
extern u64 kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val);
123124
extern u64 kvm_nvhe_sym(id_aa64mmfr2_el1_sys_val);

arch/arm64/include/asm/sysreg.h

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -773,6 +773,8 @@
773773
#define ID_AA64ISAR1_GPI_IMP_DEF 0x1
774774

775775
/* id_aa64isar2 */
776+
#define ID_AA64ISAR2_APA3_SHIFT 12
777+
#define ID_AA64ISAR2_GPA3_SHIFT 8
776778
#define ID_AA64ISAR2_RPRES_SHIFT 4
777779
#define ID_AA64ISAR2_WFXT_SHIFT 0
778780

@@ -786,6 +788,16 @@
786788
#define ID_AA64ISAR2_WFXT_NI 0x0
787789
#define ID_AA64ISAR2_WFXT_SUPPORTED 0x2
788790

791+
#define ID_AA64ISAR2_APA3_NI 0x0
792+
#define ID_AA64ISAR2_APA3_ARCHITECTED 0x1
793+
#define ID_AA64ISAR2_APA3_ARCH_EPAC 0x2
794+
#define ID_AA64ISAR2_APA3_ARCH_EPAC2 0x3
795+
#define ID_AA64ISAR2_APA3_ARCH_EPAC2_FPAC 0x4
796+
#define ID_AA64ISAR2_APA3_ARCH_EPAC2_FPAC_CMB 0x5
797+
798+
#define ID_AA64ISAR2_GPA3_NI 0x0
799+
#define ID_AA64ISAR2_GPA3_ARCHITECTED 0x1
800+
789801
/* id_aa64pfr0 */
790802
#define ID_AA64PFR0_CSV3_SHIFT 60
791803
#define ID_AA64PFR0_CSV2_SHIFT 56

arch/arm64/kernel/cpufeature.c

Lines changed: 39 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -226,6 +226,10 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
226226
};
227227

228228
static const struct arm64_ftr_bits ftr_id_aa64isar2[] = {
229+
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
230+
FTR_STRICT, FTR_EXACT, ID_AA64ISAR2_APA3_SHIFT, 4, 0),
231+
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
232+
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_GPA3_SHIFT, 4, 0),
229233
ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_RPRES_SHIFT, 4, 0),
230234
ARM64_FTR_END,
231235
};
@@ -596,6 +600,7 @@ static const struct arm64_ftr_bits ftr_raz[] = {
596600
struct arm64_ftr_override __ro_after_init id_aa64mmfr1_override;
597601
struct arm64_ftr_override __ro_after_init id_aa64pfr1_override;
598602
struct arm64_ftr_override __ro_after_init id_aa64isar1_override;
603+
struct arm64_ftr_override __ro_after_init id_aa64isar2_override;
599604

600605
static const struct __ftr_reg_entry {
601606
u32 sys_id;
@@ -644,6 +649,8 @@ static const struct __ftr_reg_entry {
644649
ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1,
645650
&id_aa64isar1_override),
646651
ARM64_FTR_REG(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2),
652+
ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2,
653+
&id_aa64isar2_override),
647654

648655
/* Op1 = 0, CRn = 0, CRm = 7 */
649656
ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
@@ -1837,17 +1844,19 @@ static bool has_address_auth_metacap(const struct arm64_cpu_capabilities *entry,
18371844
{
18381845
bool api = has_address_auth_cpucap(cpu_hwcaps_ptrs[ARM64_HAS_ADDRESS_AUTH_IMP_DEF], scope);
18391846
bool apa = has_address_auth_cpucap(cpu_hwcaps_ptrs[ARM64_HAS_ADDRESS_AUTH_ARCH_QARMA5], scope);
1847+
bool apa3 = has_address_auth_cpucap(cpu_hwcaps_ptrs[ARM64_HAS_ADDRESS_AUTH_ARCH_QARMA3], scope);
18401848

1841-
return apa || api;
1849+
return apa || apa3 || api;
18421850
}
18431851

18441852
static bool has_generic_auth(const struct arm64_cpu_capabilities *entry,
18451853
int __unused)
18461854
{
18471855
bool gpi = __system_matches_cap(ARM64_HAS_GENERIC_AUTH_IMP_DEF);
18481856
bool gpa = __system_matches_cap(ARM64_HAS_GENERIC_AUTH_ARCH_QARMA5);
1857+
bool gpa3 = __system_matches_cap(ARM64_HAS_GENERIC_AUTH_ARCH_QARMA3);
18491858

1850-
return gpa || gpi;
1859+
return gpa || gpa3 || gpi;
18511860
}
18521861
#endif /* CONFIG_ARM64_PTR_AUTH */
18531862

@@ -2243,6 +2252,16 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
22432252
.min_field_value = ID_AA64ISAR1_APA_ARCHITECTED,
22442253
.matches = has_address_auth_cpucap,
22452254
},
2255+
{
2256+
.desc = "Address authentication (architected QARMA3 algorithm)",
2257+
.capability = ARM64_HAS_ADDRESS_AUTH_ARCH_QARMA3,
2258+
.type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
2259+
.sys_reg = SYS_ID_AA64ISAR2_EL1,
2260+
.sign = FTR_UNSIGNED,
2261+
.field_pos = ID_AA64ISAR2_APA3_SHIFT,
2262+
.min_field_value = ID_AA64ISAR2_APA3_ARCHITECTED,
2263+
.matches = has_address_auth_cpucap,
2264+
},
22462265
{
22472266
.desc = "Address authentication (IMP DEF algorithm)",
22482267
.capability = ARM64_HAS_ADDRESS_AUTH_IMP_DEF,
@@ -2268,6 +2287,16 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
22682287
.min_field_value = ID_AA64ISAR1_GPA_ARCHITECTED,
22692288
.matches = has_cpuid_feature,
22702289
},
2290+
{
2291+
.desc = "Generic authentication (architected QARMA3 algorithm)",
2292+
.capability = ARM64_HAS_GENERIC_AUTH_ARCH_QARMA3,
2293+
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
2294+
.sys_reg = SYS_ID_AA64ISAR2_EL1,
2295+
.sign = FTR_UNSIGNED,
2296+
.field_pos = ID_AA64ISAR2_GPA3_SHIFT,
2297+
.min_field_value = ID_AA64ISAR2_GPA3_ARCHITECTED,
2298+
.matches = has_cpuid_feature,
2299+
},
22712300
{
22722301
.desc = "Generic authentication (IMP DEF algorithm)",
22732302
.capability = ARM64_HAS_GENERIC_AUTH_IMP_DEF,
@@ -2415,6 +2444,10 @@ static const struct arm64_cpu_capabilities ptr_auth_hwcap_addr_matches[] = {
24152444
HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_APA_SHIFT,
24162445
FTR_UNSIGNED, ID_AA64ISAR1_APA_ARCHITECTED)
24172446
},
2447+
{
2448+
HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_APA3_SHIFT,
2449+
FTR_UNSIGNED, ID_AA64ISAR2_APA3_ARCHITECTED)
2450+
},
24182451
{
24192452
HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_API_SHIFT,
24202453
FTR_UNSIGNED, ID_AA64ISAR1_API_IMP_DEF)
@@ -2427,6 +2460,10 @@ static const struct arm64_cpu_capabilities ptr_auth_hwcap_gen_matches[] = {
24272460
HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_GPA_SHIFT,
24282461
FTR_UNSIGNED, ID_AA64ISAR1_GPA_ARCHITECTED)
24292462
},
2463+
{
2464+
HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_GPA3_SHIFT,
2465+
FTR_UNSIGNED, ID_AA64ISAR2_GPA3_ARCHITECTED)
2466+
},
24302467
{
24312468
HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_GPI_SHIFT,
24322469
FTR_UNSIGNED, ID_AA64ISAR1_GPI_IMP_DEF)

arch/arm64/kernel/idreg-override.c

Lines changed: 14 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
#define FTR_DESC_NAME_LEN 20
1818
#define FTR_DESC_FIELD_LEN 10
1919
#define FTR_ALIAS_NAME_LEN 30
20-
#define FTR_ALIAS_OPTION_LEN 80
20+
#define FTR_ALIAS_OPTION_LEN 116
2121

2222
struct ftr_set_desc {
2323
char name[FTR_DESC_NAME_LEN];
@@ -71,6 +71,16 @@ static const struct ftr_set_desc isar1 __initconst = {
7171
},
7272
};
7373

74+
static const struct ftr_set_desc isar2 __initconst = {
75+
.name = "id_aa64isar2",
76+
.override = &id_aa64isar2_override,
77+
.fields = {
78+
{ "gpa3", ID_AA64ISAR2_GPA3_SHIFT },
79+
{ "apa3", ID_AA64ISAR2_APA3_SHIFT },
80+
{}
81+
},
82+
};
83+
7484
extern struct arm64_ftr_override kaslr_feature_override;
7585

7686
static const struct ftr_set_desc kaslr __initconst = {
@@ -88,6 +98,7 @@ static const struct ftr_set_desc * const regs[] __initconst = {
8898
&mmfr1,
8999
&pfr1,
90100
&isar1,
101+
&isar2,
91102
&kaslr,
92103
};
93104

@@ -100,7 +111,8 @@ static const struct {
100111
{ "arm64.nobti", "id_aa64pfr1.bt=0" },
101112
{ "arm64.nopauth",
102113
"id_aa64isar1.gpi=0 id_aa64isar1.gpa=0 "
103-
"id_aa64isar1.api=0 id_aa64isar1.apa=0" },
114+
"id_aa64isar1.api=0 id_aa64isar1.apa=0 "
115+
"id_aa64isar2.gpa3=0 id_aa64isar2.apa3=0" },
104116
{ "arm64.nomte", "id_aa64pfr1.mte=0" },
105117
{ "nokaslr", "kaslr.disabled=1" },
106118
};

arch/arm64/kvm/arm.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1870,6 +1870,7 @@ static int kvm_hyp_init_protection(u32 hyp_va_bits)
18701870
kvm_nvhe_sym(id_aa64pfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
18711871
kvm_nvhe_sym(id_aa64isar0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR0_EL1);
18721872
kvm_nvhe_sym(id_aa64isar1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR1_EL1);
1873+
kvm_nvhe_sym(id_aa64isar2_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1);
18731874
kvm_nvhe_sym(id_aa64mmfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
18741875
kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
18751876
kvm_nvhe_sym(id_aa64mmfr2_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR2_EL1);

arch/arm64/kvm/hyp/include/nvhe/fixed_config.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -192,6 +192,11 @@
192192
ARM64_FEATURE_MASK(ID_AA64ISAR1_I8MM) \
193193
)
194194

195+
#define PVM_ID_AA64ISAR2_ALLOW (\
196+
ARM64_FEATURE_MASK(ID_AA64ISAR2_GPA3) | \
197+
ARM64_FEATURE_MASK(ID_AA64ISAR2_APA3) \
198+
)
199+
195200
u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id);
196201
bool kvm_handle_pvm_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code);
197202
bool kvm_handle_pvm_restricted(struct kvm_vcpu *vcpu, u64 *exit_code);

arch/arm64/kvm/hyp/nvhe/sys_regs.c

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@ u64 id_aa64pfr0_el1_sys_val;
2222
u64 id_aa64pfr1_el1_sys_val;
2323
u64 id_aa64isar0_el1_sys_val;
2424
u64 id_aa64isar1_el1_sys_val;
25+
u64 id_aa64isar2_el1_sys_val;
2526
u64 id_aa64mmfr0_el1_sys_val;
2627
u64 id_aa64mmfr1_el1_sys_val;
2728
u64 id_aa64mmfr2_el1_sys_val;
@@ -183,6 +184,17 @@ static u64 get_pvm_id_aa64isar1(const struct kvm_vcpu *vcpu)
183184
return id_aa64isar1_el1_sys_val & allow_mask;
184185
}
185186

187+
static u64 get_pvm_id_aa64isar2(const struct kvm_vcpu *vcpu)
188+
{
189+
u64 allow_mask = PVM_ID_AA64ISAR2_ALLOW;
190+
191+
if (!vcpu_has_ptrauth(vcpu))
192+
allow_mask &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_APA3) |
193+
ARM64_FEATURE_MASK(ID_AA64ISAR2_GPA3));
194+
195+
return id_aa64isar2_el1_sys_val & allow_mask;
196+
}
197+
186198
static u64 get_pvm_id_aa64mmfr0(const struct kvm_vcpu *vcpu)
187199
{
188200
u64 set_mask;
@@ -225,6 +237,8 @@ u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id)
225237
return get_pvm_id_aa64isar0(vcpu);
226238
case SYS_ID_AA64ISAR1_EL1:
227239
return get_pvm_id_aa64isar1(vcpu);
240+
case SYS_ID_AA64ISAR2_EL1:
241+
return get_pvm_id_aa64isar2(vcpu);
228242
case SYS_ID_AA64MMFR0_EL1:
229243
return get_pvm_id_aa64mmfr0(vcpu);
230244
case SYS_ID_AA64MMFR1_EL1:

arch/arm64/kvm/sys_regs.c

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1097,6 +1097,11 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
10971097
ARM64_FEATURE_MASK(ID_AA64ISAR1_GPA) |
10981098
ARM64_FEATURE_MASK(ID_AA64ISAR1_GPI));
10991099
break;
1100+
case SYS_ID_AA64ISAR2_EL1:
1101+
if (!vcpu_has_ptrauth(vcpu))
1102+
val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_APA3) |
1103+
ARM64_FEATURE_MASK(ID_AA64ISAR2_GPA3));
1104+
break;
11001105
case SYS_ID_AA64DFR0_EL1:
11011106
/* Limit debug to ARMv8.0 */
11021107
val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER);

0 commit comments

Comments
 (0)