|
30 | 30 | #include <asm/fixmap.h> |
31 | 31 | #include <asm/paravirt.h> |
32 | 32 | #include <asm/asm-prototypes.h> |
| 33 | +#include <asm/cfi.h> |
33 | 34 |
|
34 | 35 | int __read_mostly alternatives_patched; |
35 | 36 |
|
@@ -832,15 +833,82 @@ void __init_or_module apply_seal_endbr(s32 *start, s32 *end) { } |
832 | 833 | #endif /* CONFIG_X86_KERNEL_IBT */ |
833 | 834 |
|
834 | 835 | #ifdef CONFIG_FINEIBT |
| 836 | +#define __CFI_DEFAULT CFI_DEFAULT |
| 837 | +#elif defined(CONFIG_CFI_CLANG) |
| 838 | +#define __CFI_DEFAULT CFI_KCFI |
| 839 | +#else |
| 840 | +#define __CFI_DEFAULT CFI_OFF |
| 841 | +#endif |
835 | 842 |
|
836 | | -enum cfi_mode { |
837 | | - CFI_DEFAULT, |
838 | | - CFI_OFF, |
839 | | - CFI_KCFI, |
840 | | - CFI_FINEIBT, |
841 | | -}; |
| 843 | +enum cfi_mode cfi_mode __ro_after_init = __CFI_DEFAULT; |
| 844 | + |
| 845 | +#ifdef CONFIG_CFI_CLANG |
| 846 | +struct bpf_insn; |
| 847 | + |
| 848 | +/* Must match bpf_func_t / DEFINE_BPF_PROG_RUN() */ |
| 849 | +extern unsigned int __bpf_prog_runX(const void *ctx, |
| 850 | + const struct bpf_insn *insn); |
| 851 | + |
| 852 | +/* |
| 853 | + * Force a reference to the external symbol so the compiler generates |
| 854 | + * __kcfi_typid. |
| 855 | + */ |
| 856 | +__ADDRESSABLE(__bpf_prog_runX); |
| 857 | + |
| 858 | +/* u32 __ro_after_init cfi_bpf_hash = __kcfi_typeid___bpf_prog_runX; */ |
| 859 | +asm ( |
| 860 | +" .pushsection .data..ro_after_init,\"aw\",@progbits \n" |
| 861 | +" .type cfi_bpf_hash,@object \n" |
| 862 | +" .globl cfi_bpf_hash \n" |
| 863 | +" .p2align 2, 0x0 \n" |
| 864 | +"cfi_bpf_hash: \n" |
| 865 | +" .long __kcfi_typeid___bpf_prog_runX \n" |
| 866 | +" .size cfi_bpf_hash, 4 \n" |
| 867 | +" .popsection \n" |
| 868 | +); |
| 869 | + |
| 870 | +/* Must match bpf_callback_t */ |
| 871 | +extern u64 __bpf_callback_fn(u64, u64, u64, u64, u64); |
| 872 | + |
| 873 | +__ADDRESSABLE(__bpf_callback_fn); |
| 874 | + |
| 875 | +/* u32 __ro_after_init cfi_bpf_subprog_hash = __kcfi_typeid___bpf_callback_fn; */ |
| 876 | +asm ( |
| 877 | +" .pushsection .data..ro_after_init,\"aw\",@progbits \n" |
| 878 | +" .type cfi_bpf_subprog_hash,@object \n" |
| 879 | +" .globl cfi_bpf_subprog_hash \n" |
| 880 | +" .p2align 2, 0x0 \n" |
| 881 | +"cfi_bpf_subprog_hash: \n" |
| 882 | +" .long __kcfi_typeid___bpf_callback_fn \n" |
| 883 | +" .size cfi_bpf_subprog_hash, 4 \n" |
| 884 | +" .popsection \n" |
| 885 | +); |
| 886 | + |
| 887 | +u32 cfi_get_func_hash(void *func) |
| 888 | +{ |
| 889 | + u32 hash; |
| 890 | + |
| 891 | + func -= cfi_get_offset(); |
| 892 | + switch (cfi_mode) { |
| 893 | + case CFI_FINEIBT: |
| 894 | + func += 7; |
| 895 | + break; |
| 896 | + case CFI_KCFI: |
| 897 | + func += 1; |
| 898 | + break; |
| 899 | + default: |
| 900 | + return 0; |
| 901 | + } |
| 902 | + |
| 903 | + if (get_kernel_nofault(hash, func)) |
| 904 | + return 0; |
| 905 | + |
| 906 | + return hash; |
| 907 | +} |
| 908 | +#endif |
| 909 | + |
| 910 | +#ifdef CONFIG_FINEIBT |
842 | 911 |
|
843 | | -static enum cfi_mode cfi_mode __ro_after_init = CFI_DEFAULT; |
844 | 912 | static bool cfi_rand __ro_after_init = true; |
845 | 913 | static u32 cfi_seed __ro_after_init; |
846 | 914 |
|
@@ -1149,8 +1217,11 @@ static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline, |
1149 | 1217 | goto err; |
1150 | 1218 |
|
1151 | 1219 | if (cfi_rand) { |
1152 | | - if (builtin) |
| 1220 | + if (builtin) { |
1153 | 1221 | cfi_seed = get_random_u32(); |
| 1222 | + cfi_bpf_hash = cfi_rehash(cfi_bpf_hash); |
| 1223 | + cfi_bpf_subprog_hash = cfi_rehash(cfi_bpf_subprog_hash); |
| 1224 | + } |
1154 | 1225 |
|
1155 | 1226 | ret = cfi_rand_preamble(start_cfi, end_cfi); |
1156 | 1227 | if (ret) |
|
0 commit comments