2020#include <linux/dma-map-ops.h>
2121#include <linux/crash_dump.h>
2222#include <linux/hugetlb.h>
23- #ifdef CONFIG_RELOCATABLE
24- #include <linux/elf.h>
25- #endif
2623#include <linux/kfence.h>
2724#include <linux/execmem.h>
2825
2926#include <asm/fixmap.h>
3027#include <asm/io.h>
3128#include <asm/kasan.h>
29+ #include <asm/module.h>
3230#include <asm/numa.h>
3331#include <asm/pgtable.h>
3432#include <asm/sections.h>
@@ -323,6 +321,44 @@ static void __init setup_bootmem(void)
323321 hugetlb_cma_reserve (PUD_SHIFT - PAGE_SHIFT );
324322}
325323
324+ #ifdef CONFIG_RELOCATABLE
325+ extern unsigned long __rela_dyn_start , __rela_dyn_end ;
326+
327+ static void __init relocate_kernel (void )
328+ {
329+ Elf_Rela * rela = (Elf_Rela * )& __rela_dyn_start ;
330+ /*
331+ * This holds the offset between the linked virtual address and the
332+ * relocated virtual address.
333+ */
334+ uintptr_t reloc_offset = kernel_map .virt_addr - KERNEL_LINK_ADDR ;
335+ /*
336+ * This holds the offset between kernel linked virtual address and
337+ * physical address.
338+ */
339+ uintptr_t va_kernel_link_pa_offset = KERNEL_LINK_ADDR - kernel_map .phys_addr ;
340+
341+ for ( ; rela < (Elf_Rela * )& __rela_dyn_end ; rela ++ ) {
342+ Elf_Addr addr = (rela -> r_offset - va_kernel_link_pa_offset );
343+ Elf_Addr relocated_addr = rela -> r_addend ;
344+
345+ if (rela -> r_info != R_RISCV_RELATIVE )
346+ continue ;
347+
348+ /*
349+ * Make sure to not relocate vdso symbols like rt_sigreturn
350+ * which are linked from the address 0 in vmlinux since
351+ * vdso symbol addresses are actually used as an offset from
352+ * mm->context.vdso in VDSO_OFFSET macro.
353+ */
354+ if (relocated_addr >= KERNEL_LINK_ADDR )
355+ relocated_addr += reloc_offset ;
356+
357+ * (Elf_Addr * )addr = relocated_addr ;
358+ }
359+ }
360+ #endif /* CONFIG_RELOCATABLE */
361+
326362#ifdef CONFIG_MMU
327363struct pt_alloc_ops pt_ops __meminitdata ;
328364
@@ -823,6 +859,8 @@ static __init void set_satp_mode(uintptr_t dtb_pa)
823859 uintptr_t set_satp_mode_pmd = ((unsigned long )set_satp_mode ) & PMD_MASK ;
824860 u64 satp_mode_cmdline = __pi_set_satp_mode_from_cmdline (dtb_pa );
825861
862+ kernel_map .page_offset = PAGE_OFFSET_L5 ;
863+
826864 if (satp_mode_cmdline == SATP_MODE_57 ) {
827865 disable_pgtable_l5 ();
828866 } else if (satp_mode_cmdline == SATP_MODE_48 ) {
@@ -893,44 +931,6 @@ static __init void set_satp_mode(uintptr_t dtb_pa)
893931#error "setup_vm() is called from head.S before relocate so it should not use absolute addressing."
894932#endif
895933
896- #ifdef CONFIG_RELOCATABLE
897- extern unsigned long __rela_dyn_start , __rela_dyn_end ;
898-
899- static void __init relocate_kernel (void )
900- {
901- Elf64_Rela * rela = (Elf64_Rela * )& __rela_dyn_start ;
902- /*
903- * This holds the offset between the linked virtual address and the
904- * relocated virtual address.
905- */
906- uintptr_t reloc_offset = kernel_map .virt_addr - KERNEL_LINK_ADDR ;
907- /*
908- * This holds the offset between kernel linked virtual address and
909- * physical address.
910- */
911- uintptr_t va_kernel_link_pa_offset = KERNEL_LINK_ADDR - kernel_map .phys_addr ;
912-
913- for ( ; rela < (Elf64_Rela * )& __rela_dyn_end ; rela ++ ) {
914- Elf64_Addr addr = (rela -> r_offset - va_kernel_link_pa_offset );
915- Elf64_Addr relocated_addr = rela -> r_addend ;
916-
917- if (rela -> r_info != R_RISCV_RELATIVE )
918- continue ;
919-
920- /*
921- * Make sure to not relocate vdso symbols like rt_sigreturn
922- * which are linked from the address 0 in vmlinux since
923- * vdso symbol addresses are actually used as an offset from
924- * mm->context.vdso in VDSO_OFFSET macro.
925- */
926- if (relocated_addr >= KERNEL_LINK_ADDR )
927- relocated_addr += reloc_offset ;
928-
929- * (Elf64_Addr * )addr = relocated_addr ;
930- }
931- }
932- #endif /* CONFIG_RELOCATABLE */
933-
934934#ifdef CONFIG_XIP_KERNEL
935935static void __init create_kernel_page_table (pgd_t * pgdir ,
936936 __always_unused bool early )
@@ -1108,11 +1108,6 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
11081108 kernel_map .virt_addr = KERNEL_LINK_ADDR + kernel_map .virt_offset ;
11091109
11101110#ifdef CONFIG_XIP_KERNEL
1111- #ifdef CONFIG_64BIT
1112- kernel_map .page_offset = PAGE_OFFSET_L3 ;
1113- #else
1114- kernel_map .page_offset = _AC (CONFIG_PAGE_OFFSET , UL );
1115- #endif
11161111 kernel_map .xiprom = (uintptr_t )CONFIG_XIP_PHYS_ADDR ;
11171112 kernel_map .xiprom_sz = (uintptr_t )(& _exiprom ) - (uintptr_t )(& _xiprom );
11181113
@@ -1127,7 +1122,6 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
11271122 kernel_map .va_kernel_xip_data_pa_offset = kernel_map .virt_addr - kernel_map .phys_addr
11281123 + (uintptr_t )& _sdata - (uintptr_t )& _start ;
11291124#else
1130- kernel_map .page_offset = _AC (CONFIG_PAGE_OFFSET , UL );
11311125 kernel_map .phys_addr = (uintptr_t )(& _start );
11321126 kernel_map .size = (uintptr_t )(& _end ) - kernel_map .phys_addr ;
11331127 kernel_map .va_kernel_pa_offset = kernel_map .virt_addr - kernel_map .phys_addr ;
@@ -1174,7 +1168,8 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
11741168 * makes the kernel cross over a PUD_SIZE boundary, raise a bug
11751169 * since a part of the kernel would not get mapped.
11761170 */
1177- BUG_ON (PUD_SIZE - (kernel_map .virt_addr & (PUD_SIZE - 1 )) < kernel_map .size );
1171+ if (IS_ENABLED (CONFIG_64BIT ))
1172+ BUG_ON (PUD_SIZE - (kernel_map .virt_addr & (PUD_SIZE - 1 )) < kernel_map .size );
11781173 relocate_kernel ();
11791174#endif
11801175
@@ -1378,6 +1373,12 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
13781373{
13791374 dtb_early_va = (void * )dtb_pa ;
13801375 dtb_early_pa = dtb_pa ;
1376+
1377+ #ifdef CONFIG_RELOCATABLE
1378+ kernel_map .virt_addr = (uintptr_t )_start ;
1379+ kernel_map .phys_addr = (uintptr_t )_start ;
1380+ relocate_kernel ();
1381+ #endif
13811382}
13821383
13831384static inline void setup_vm_final (void )
0 commit comments