|
25 | 25 | #define _EFER_SVME 12 /* Enable virtualization */ |
26 | 26 | #define _EFER_LMSLE 13 /* Long Mode Segment Limit Enable */ |
27 | 27 | #define _EFER_FFXSR 14 /* Enable Fast FXSAVE/FXRSTOR */ |
| 28 | +#define _EFER_AUTOIBRS 21 /* Enable Automatic IBRS */ |
28 | 29 |
|
29 | 30 | #define EFER_SCE (1<<_EFER_SCE) |
30 | 31 | #define EFER_LME (1<<_EFER_LME) |
|
33 | 34 | #define EFER_SVME (1<<_EFER_SVME) |
34 | 35 | #define EFER_LMSLE (1<<_EFER_LMSLE) |
35 | 36 | #define EFER_FFXSR (1<<_EFER_FFXSR) |
| 37 | +#define EFER_AUTOIBRS (1<<_EFER_AUTOIBRS) |
36 | 38 |
|
37 | 39 | /* Intel MSRs. Some also available on other CPUs */ |
38 | 40 |
|
|
49 | 51 | #define SPEC_CTRL_RRSBA_DIS_S_SHIFT 6 /* Disable RRSBA behavior */ |
50 | 52 | #define SPEC_CTRL_RRSBA_DIS_S BIT(SPEC_CTRL_RRSBA_DIS_S_SHIFT) |
51 | 53 |
|
| 54 | +/* A mask for bits which the kernel toggles when controlling mitigations */ |
| 55 | +#define SPEC_CTRL_MITIGATIONS_MASK (SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD \ |
| 56 | + | SPEC_CTRL_RRSBA_DIS_S) |
| 57 | + |
52 | 58 | #define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */ |
53 | 59 | #define PRED_CMD_IBPB BIT(0) /* Indirect Branch Prediction Barrier */ |
54 | 60 |
|
|
189 | 195 | #define MSR_TURBO_RATIO_LIMIT1 0x000001ae |
190 | 196 | #define MSR_TURBO_RATIO_LIMIT2 0x000001af |
191 | 197 |
|
| 198 | +#define MSR_SNOOP_RSP_0 0x00001328 |
| 199 | +#define MSR_SNOOP_RSP_1 0x00001329 |
| 200 | + |
192 | 201 | #define MSR_LBR_SELECT 0x000001c8 |
193 | 202 | #define MSR_LBR_TOS 0x000001c9 |
194 | 203 |
|
|
566 | 575 | #define MSR_AMD64_SEV_ES_ENABLED BIT_ULL(MSR_AMD64_SEV_ES_ENABLED_BIT) |
567 | 576 | #define MSR_AMD64_SEV_SNP_ENABLED BIT_ULL(MSR_AMD64_SEV_SNP_ENABLED_BIT) |
568 | 577 |
|
| 578 | +/* SNP feature bits enabled by the hypervisor */ |
| 579 | +#define MSR_AMD64_SNP_VTOM BIT_ULL(3) |
| 580 | +#define MSR_AMD64_SNP_REFLECT_VC BIT_ULL(4) |
| 581 | +#define MSR_AMD64_SNP_RESTRICTED_INJ BIT_ULL(5) |
| 582 | +#define MSR_AMD64_SNP_ALT_INJ BIT_ULL(6) |
| 583 | +#define MSR_AMD64_SNP_DEBUG_SWAP BIT_ULL(7) |
| 584 | +#define MSR_AMD64_SNP_PREVENT_HOST_IBS BIT_ULL(8) |
| 585 | +#define MSR_AMD64_SNP_BTB_ISOLATION BIT_ULL(9) |
| 586 | +#define MSR_AMD64_SNP_VMPL_SSS BIT_ULL(10) |
| 587 | +#define MSR_AMD64_SNP_SECURE_TSC BIT_ULL(11) |
| 588 | +#define MSR_AMD64_SNP_VMGEXIT_PARAM BIT_ULL(12) |
| 589 | +#define MSR_AMD64_SNP_IBS_VIRT BIT_ULL(14) |
| 590 | +#define MSR_AMD64_SNP_VMSA_REG_PROTECTION BIT_ULL(16) |
| 591 | +#define MSR_AMD64_SNP_SMT_PROTECTION BIT_ULL(17) |
| 592 | + |
| 593 | +/* SNP feature bits reserved for future use. */ |
| 594 | +#define MSR_AMD64_SNP_RESERVED_BIT13 BIT_ULL(13) |
| 595 | +#define MSR_AMD64_SNP_RESERVED_BIT15 BIT_ULL(15) |
| 596 | +#define MSR_AMD64_SNP_RESERVED_MASK GENMASK_ULL(63, 18) |
| 597 | + |
569 | 598 | #define MSR_AMD64_VIRT_SPEC_CTRL 0xc001011f |
570 | 599 |
|
571 | 600 | /* AMD Collaborative Processor Performance Control MSRs */ |
|
1061 | 1090 |
|
1062 | 1091 | /* - AMD: */ |
1063 | 1092 | #define MSR_IA32_MBA_BW_BASE 0xc0000200 |
| 1093 | +#define MSR_IA32_SMBA_BW_BASE 0xc0000280 |
| 1094 | +#define MSR_IA32_EVT_CFG_BASE 0xc0000400 |
1064 | 1095 |
|
1065 | 1096 | /* MSR_IA32_VMX_MISC bits */ |
1066 | 1097 | #define MSR_IA32_VMX_MISC_INTEL_PT (1ULL << 14) |
|
0 commit comments