|
20 | 20 | #include <linux/dma-map-ops.h> |
21 | 21 | #include <linux/crash_dump.h> |
22 | 22 | #include <linux/hugetlb.h> |
| 23 | +#ifdef CONFIG_RELOCATABLE |
| 24 | +#include <linux/elf.h> |
| 25 | +#endif |
23 | 26 |
|
24 | 27 | #include <asm/fixmap.h> |
25 | 28 | #include <asm/tlbflush.h> |
@@ -146,7 +149,7 @@ static void __init print_vm_layout(void) |
146 | 149 | print_ml("kasan", KASAN_SHADOW_START, KASAN_SHADOW_END); |
147 | 150 | #endif |
148 | 151 |
|
149 | | - print_ml("kernel", (unsigned long)KERNEL_LINK_ADDR, |
| 152 | + print_ml("kernel", (unsigned long)kernel_map.virt_addr, |
150 | 153 | (unsigned long)ADDRESS_SPACE_END); |
151 | 154 | } |
152 | 155 | } |
@@ -820,6 +823,44 @@ static __init void set_satp_mode(void) |
820 | 823 | #error "setup_vm() is called from head.S before relocate so it should not use absolute addressing." |
821 | 824 | #endif |
822 | 825 |
|
| 826 | +#ifdef CONFIG_RELOCATABLE |
| 827 | +extern unsigned long __rela_dyn_start, __rela_dyn_end; |
| 828 | + |
| 829 | +static void __init relocate_kernel(void) |
| 830 | +{ |
| 831 | + Elf64_Rela *rela = (Elf64_Rela *)&__rela_dyn_start; |
| 832 | + /* |
| 833 | + * This holds the offset between the linked virtual address and the |
| 834 | + * relocated virtual address. |
| 835 | + */ |
| 836 | + uintptr_t reloc_offset = kernel_map.virt_addr - KERNEL_LINK_ADDR; |
| 837 | + /* |
| 838 | + * This holds the offset between kernel linked virtual address and |
| 839 | + * physical address. |
| 840 | + */ |
| 841 | + uintptr_t va_kernel_link_pa_offset = KERNEL_LINK_ADDR - kernel_map.phys_addr; |
| 842 | + |
| 843 | + for ( ; rela < (Elf64_Rela *)&__rela_dyn_end; rela++) { |
| 844 | + Elf64_Addr addr = (rela->r_offset - va_kernel_link_pa_offset); |
| 845 | + Elf64_Addr relocated_addr = rela->r_addend; |
| 846 | + |
| 847 | + if (rela->r_info != R_RISCV_RELATIVE) |
| 848 | + continue; |
| 849 | + |
| 850 | + /* |
| 851 | + * Make sure to not relocate vdso symbols like rt_sigreturn |
| 852 | + * which are linked from the address 0 in vmlinux since |
| 853 | + * vdso symbol addresses are actually used as an offset from |
| 854 | + * mm->context.vdso in VDSO_OFFSET macro. |
| 855 | + */ |
| 856 | + if (relocated_addr >= KERNEL_LINK_ADDR) |
| 857 | + relocated_addr += reloc_offset; |
| 858 | + |
| 859 | + *(Elf64_Addr *)addr = relocated_addr; |
| 860 | + } |
| 861 | +} |
| 862 | +#endif /* CONFIG_RELOCATABLE */ |
| 863 | + |
823 | 864 | #ifdef CONFIG_XIP_KERNEL |
824 | 865 | static void __init create_kernel_page_table(pgd_t *pgdir, |
825 | 866 | __always_unused bool early) |
@@ -1007,6 +1048,17 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) |
1007 | 1048 | BUG_ON((kernel_map.virt_addr + kernel_map.size) > ADDRESS_SPACE_END - SZ_4K); |
1008 | 1049 | #endif |
1009 | 1050 |
|
| 1051 | +#ifdef CONFIG_RELOCATABLE |
| 1052 | + /* |
| 1053 | + * Early page table uses only one PUD, which makes it possible |
| 1054 | + * to map PUD_SIZE aligned on PUD_SIZE: if the relocation offset |
| 1055 | + * makes the kernel cross over a PUD_SIZE boundary, raise a bug |
| 1056 | + * since a part of the kernel would not get mapped. |
| 1057 | + */ |
| 1058 | + BUG_ON(PUD_SIZE - (kernel_map.virt_addr & (PUD_SIZE - 1)) < kernel_map.size); |
| 1059 | + relocate_kernel(); |
| 1060 | +#endif |
| 1061 | + |
1010 | 1062 | apply_early_boot_alternatives(); |
1011 | 1063 | pt_ops_set_early(); |
1012 | 1064 |
|
|
0 commit comments