@@ -185,34 +185,15 @@ static void kaslr_adjust_relocs(unsigned long min_addr, unsigned long max_addr,
185185}
186186
187187static void kaslr_adjust_got (unsigned long offset ) {}
188- static void rescue_relocs (void ) {}
189- static void free_relocs (void ) {}
190188#else
191- static int * vmlinux_relocs_64_start ;
192- static int * vmlinux_relocs_64_end ;
193-
194- static void rescue_relocs (void )
195- {
196- unsigned long size = __vmlinux_relocs_64_end - __vmlinux_relocs_64_start ;
197-
198- vmlinux_relocs_64_start = (void * )physmem_alloc_top_down (RR_RELOC , size , 0 );
199- vmlinux_relocs_64_end = (void * )vmlinux_relocs_64_start + size ;
200- memmove (vmlinux_relocs_64_start , __vmlinux_relocs_64_start , size );
201- }
202-
203- static void free_relocs (void )
204- {
205- physmem_free (RR_RELOC );
206- }
207-
208189static void kaslr_adjust_relocs (unsigned long min_addr , unsigned long max_addr ,
209190 unsigned long offset , unsigned long phys_offset )
210191{
211192 int * reloc ;
212193 long loc ;
213194
214195 /* Adjust R_390_64 relocations */
215- for (reloc = vmlinux_relocs_64_start ; reloc < vmlinux_relocs_64_end ; reloc ++ ) {
196+ for (reloc = ( int * ) __vmlinux_relocs_64_start ; reloc < ( int * ) __vmlinux_relocs_64_end ; reloc ++ ) {
216197 loc = (long )* reloc + phys_offset ;
217198 if (loc < min_addr || loc > max_addr )
218199 error ("64-bit relocation outside of kernel!\n" );
@@ -486,7 +467,6 @@ void startup_kernel(void)
486467 detect_physmem_online_ranges (max_physmem_end );
487468 save_ipl_cert_comp_list ();
488469 rescue_initrd (safe_addr , ident_map_size );
489- rescue_relocs ();
490470
491471 if (kaslr_enabled ())
492472 __kaslr_offset_phys = randomize_within_range (kernel_size , THREAD_SIZE , 0 , ident_map_size );
@@ -498,6 +478,18 @@ void startup_kernel(void)
498478
499479 /* vmlinux decompression is done, shrink reserved low memory */
500480 physmem_reserve (RR_DECOMPRESSOR , 0 , (unsigned long )_decompressor_end );
481+
482+ /*
483+ * In case KASLR is enabled the randomized location of .amode31
484+ * section might overlap with .vmlinux.relocs section. To avoid that
485+ * the below randomize_within_range() could have been called with
486+ * __vmlinux_relocs_64_end as the lower range address. However,
487+ * .amode31 section is written to by the decompressed kernel - at
488+ * that time the contents of .vmlinux.relocs is not needed anymore.
489+ * Conversly, .vmlinux.relocs is read only by the decompressor, even
490+ * before the kernel started. Therefore, in case the two sections
491+ * overlap there is no risk of corrupting any data.
492+ */
501493 if (kaslr_enabled ())
502494 amode31_lma = randomize_within_range (vmlinux .amode31_size , PAGE_SIZE , 0 , SZ_2G );
503495 if (!amode31_lma )
@@ -521,7 +513,6 @@ void startup_kernel(void)
521513 kaslr_adjust_relocs (__kaslr_offset_phys , __kaslr_offset_phys + vmlinux .image_size ,
522514 __kaslr_offset , __kaslr_offset_phys );
523515 kaslr_adjust_got (__kaslr_offset );
524- free_relocs ();
525516 setup_vmem (__kaslr_offset , __kaslr_offset + kernel_size , asce_limit );
526517 copy_bootdata ();
527518
0 commit comments