@@ -110,38 +110,19 @@ static struct linux_binfmt elf_format = {
110110
111111#define BAD_ADDR (x ) (unlikely((unsigned long)(x) >= TASK_SIZE))
112112
113- static int set_brk (unsigned long start , unsigned long end , int prot )
114- {
115- start = ELF_PAGEALIGN (start );
116- end = ELF_PAGEALIGN (end );
117- if (end > start ) {
118- /*
119- * Map the last of the bss segment.
120- * If the header is requesting these pages to be
121- * executable, honour that (ppc32 needs this).
122- */
123- int error = vm_brk_flags (start , end - start ,
124- prot & PROT_EXEC ? VM_EXEC : 0 );
125- if (error )
126- return error ;
127- }
128- current -> mm -> start_brk = current -> mm -> brk = end ;
129- return 0 ;
130- }
131-
132- /* We need to explicitly zero any fractional pages
133- after the data section (i.e. bss). This would
134- contain the junk from the file that should not
135- be in memory
113+ /*
114+ * We need to explicitly zero any trailing portion of the page that follows
115+ * p_filesz when it ends before the page ends (e.g. bss), otherwise this
116+ * memory will contain the junk from the file that should not be present.
136117 */
137- static int padzero (unsigned long elf_bss )
118+ static int padzero (unsigned long address )
138119{
139120 unsigned long nbyte ;
140121
141- nbyte = ELF_PAGEOFFSET (elf_bss );
122+ nbyte = ELF_PAGEOFFSET (address );
142123 if (nbyte ) {
143124 nbyte = ELF_MIN_ALIGN - nbyte ;
144- if (clear_user ((void __user * ) elf_bss , nbyte ))
125+ if (clear_user ((void __user * )address , nbyte ))
145126 return - EFAULT ;
146127 }
147128 return 0 ;
@@ -367,6 +348,11 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec,
367348 return 0 ;
368349}
369350
351+ /*
352+ * Map "eppnt->p_filesz" bytes from "filep" offset "eppnt->p_offset"
353+ * into memory at "addr". (Note that p_filesz is rounded up to the
354+ * next page, so any extra bytes from the file must be wiped.)
355+ */
370356static unsigned long elf_map (struct file * filep , unsigned long addr ,
371357 const struct elf_phdr * eppnt , int prot , int type ,
372358 unsigned long total_size )
@@ -406,6 +392,60 @@ static unsigned long elf_map(struct file *filep, unsigned long addr,
406392 return (map_addr );
407393}
408394
395+ /*
396+ * Map "eppnt->p_filesz" bytes from "filep" offset "eppnt->p_offset"
397+ * into memory at "addr". Memory from "p_filesz" through "p_memsz"
398+ * rounded up to the next page is zeroed.
399+ */
400+ static unsigned long elf_load (struct file * filep , unsigned long addr ,
401+ const struct elf_phdr * eppnt , int prot , int type ,
402+ unsigned long total_size )
403+ {
404+ unsigned long zero_start , zero_end ;
405+ unsigned long map_addr ;
406+
407+ if (eppnt -> p_filesz ) {
408+ map_addr = elf_map (filep , addr , eppnt , prot , type , total_size );
409+ if (BAD_ADDR (map_addr ))
410+ return map_addr ;
411+ if (eppnt -> p_memsz > eppnt -> p_filesz ) {
412+ zero_start = map_addr + ELF_PAGEOFFSET (eppnt -> p_vaddr ) +
413+ eppnt -> p_filesz ;
414+ zero_end = map_addr + ELF_PAGEOFFSET (eppnt -> p_vaddr ) +
415+ eppnt -> p_memsz ;
416+
417+ /*
418+ * Zero the end of the last mapped page but ignore
419+ * any errors if the segment isn't writable.
420+ */
421+ if (padzero (zero_start ) && (prot & PROT_WRITE ))
422+ return - EFAULT ;
423+ }
424+ } else {
425+ map_addr = zero_start = ELF_PAGESTART (addr );
426+ zero_end = zero_start + ELF_PAGEOFFSET (eppnt -> p_vaddr ) +
427+ eppnt -> p_memsz ;
428+ }
429+ if (eppnt -> p_memsz > eppnt -> p_filesz ) {
430+ /*
431+ * Map the last of the segment.
432+ * If the header is requesting these pages to be
433+ * executable, honour that (ppc32 needs this).
434+ */
435+ int error ;
436+
437+ zero_start = ELF_PAGEALIGN (zero_start );
438+ zero_end = ELF_PAGEALIGN (zero_end );
439+
440+ error = vm_brk_flags (zero_start , zero_end - zero_start ,
441+ prot & PROT_EXEC ? VM_EXEC : 0 );
442+ if (error )
443+ map_addr = error ;
444+ }
445+ return map_addr ;
446+ }
447+
448+
409449static unsigned long total_mapping_size (const struct elf_phdr * phdr , int nr )
410450{
411451 elf_addr_t min_addr = -1 ;
@@ -596,8 +636,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
596636 struct elf_phdr * eppnt ;
597637 unsigned long load_addr = 0 ;
598638 int load_addr_set = 0 ;
599- unsigned long last_bss = 0 , elf_bss = 0 ;
600- int bss_prot = 0 ;
601639 unsigned long error = ~0UL ;
602640 unsigned long total_size ;
603641 int i ;
@@ -634,7 +672,7 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
634672 else if (no_base && interp_elf_ex -> e_type == ET_DYN )
635673 load_addr = - vaddr ;
636674
637- map_addr = elf_map (interpreter , load_addr + vaddr ,
675+ map_addr = elf_load (interpreter , load_addr + vaddr ,
638676 eppnt , elf_prot , elf_type , total_size );
639677 total_size = 0 ;
640678 error = map_addr ;
@@ -660,51 +698,9 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
660698 error = - ENOMEM ;
661699 goto out ;
662700 }
663-
664- /*
665- * Find the end of the file mapping for this phdr, and
666- * keep track of the largest address we see for this.
667- */
668- k = load_addr + eppnt -> p_vaddr + eppnt -> p_filesz ;
669- if (k > elf_bss )
670- elf_bss = k ;
671-
672- /*
673- * Do the same thing for the memory mapping - between
674- * elf_bss and last_bss is the bss section.
675- */
676- k = load_addr + eppnt -> p_vaddr + eppnt -> p_memsz ;
677- if (k > last_bss ) {
678- last_bss = k ;
679- bss_prot = elf_prot ;
680- }
681701 }
682702 }
683703
684- /*
685- * Now fill out the bss section: first pad the last page from
686- * the file up to the page boundary, and zero it from elf_bss
687- * up to the end of the page.
688- */
689- if (padzero (elf_bss )) {
690- error = - EFAULT ;
691- goto out ;
692- }
693- /*
694- * Next, align both the file and mem bss up to the page size,
695- * since this is where elf_bss was just zeroed up to, and where
696- * last_bss will end after the vm_brk_flags() below.
697- */
698- elf_bss = ELF_PAGEALIGN (elf_bss );
699- last_bss = ELF_PAGEALIGN (last_bss );
700- /* Finally, if there is still more bss to allocate, do it. */
701- if (last_bss > elf_bss ) {
702- error = vm_brk_flags (elf_bss , last_bss - elf_bss ,
703- bss_prot & PROT_EXEC ? VM_EXEC : 0 );
704- if (error )
705- goto out ;
706- }
707-
708704 error = load_addr ;
709705out :
710706 return error ;
@@ -828,8 +824,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
828824 unsigned long error ;
829825 struct elf_phdr * elf_ppnt , * elf_phdata , * interp_elf_phdata = NULL ;
830826 struct elf_phdr * elf_property_phdata = NULL ;
831- unsigned long elf_bss , elf_brk ;
832- int bss_prot = 0 ;
827+ unsigned long elf_brk ;
833828 int retval , i ;
834829 unsigned long elf_entry ;
835830 unsigned long e_entry ;
@@ -1020,7 +1015,6 @@ static int load_elf_binary(struct linux_binprm *bprm)
10201015 if (retval < 0 )
10211016 goto out_free_dentry ;
10221017
1023- elf_bss = 0 ;
10241018 elf_brk = 0 ;
10251019
10261020 start_code = ~0UL ;
@@ -1040,33 +1034,6 @@ static int load_elf_binary(struct linux_binprm *bprm)
10401034 if (elf_ppnt -> p_type != PT_LOAD )
10411035 continue ;
10421036
1043- if (unlikely (elf_brk > elf_bss )) {
1044- unsigned long nbyte ;
1045-
1046- /* There was a PT_LOAD segment with p_memsz > p_filesz
1047- before this one. Map anonymous pages, if needed,
1048- and clear the area. */
1049- retval = set_brk (elf_bss + load_bias ,
1050- elf_brk + load_bias ,
1051- bss_prot );
1052- if (retval )
1053- goto out_free_dentry ;
1054- nbyte = ELF_PAGEOFFSET (elf_bss );
1055- if (nbyte ) {
1056- nbyte = ELF_MIN_ALIGN - nbyte ;
1057- if (nbyte > elf_brk - elf_bss )
1058- nbyte = elf_brk - elf_bss ;
1059- if (clear_user ((void __user * )elf_bss +
1060- load_bias , nbyte )) {
1061- /*
1062- * This bss-zeroing can fail if the ELF
1063- * file specifies odd protections. So
1064- * we don't check the return value
1065- */
1066- }
1067- }
1068- }
1069-
10701037 elf_prot = make_prot (elf_ppnt -> p_flags , & arch_state ,
10711038 !!interpreter , false);
10721039
@@ -1162,7 +1129,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
11621129 }
11631130 }
11641131
1165- error = elf_map (bprm -> file , load_bias + vaddr , elf_ppnt ,
1132+ error = elf_load (bprm -> file , load_bias + vaddr , elf_ppnt ,
11661133 elf_prot , elf_flags , total_size );
11671134 if (BAD_ADDR (error )) {
11681135 retval = IS_ERR_VALUE (error ) ?
@@ -1210,40 +1177,24 @@ static int load_elf_binary(struct linux_binprm *bprm)
12101177
12111178 k = elf_ppnt -> p_vaddr + elf_ppnt -> p_filesz ;
12121179
1213- if (k > elf_bss )
1214- elf_bss = k ;
12151180 if ((elf_ppnt -> p_flags & PF_X ) && end_code < k )
12161181 end_code = k ;
12171182 if (end_data < k )
12181183 end_data = k ;
12191184 k = elf_ppnt -> p_vaddr + elf_ppnt -> p_memsz ;
1220- if (k > elf_brk ) {
1221- bss_prot = elf_prot ;
1185+ if (k > elf_brk )
12221186 elf_brk = k ;
1223- }
12241187 }
12251188
12261189 e_entry = elf_ex -> e_entry + load_bias ;
12271190 phdr_addr += load_bias ;
1228- elf_bss += load_bias ;
12291191 elf_brk += load_bias ;
12301192 start_code += load_bias ;
12311193 end_code += load_bias ;
12321194 start_data += load_bias ;
12331195 end_data += load_bias ;
12341196
1235- /* Calling set_brk effectively mmaps the pages that we need
1236- * for the bss and break sections. We must do this before
1237- * mapping in the interpreter, to make sure it doesn't wind
1238- * up getting placed where the bss needs to go.
1239- */
1240- retval = set_brk (elf_bss , elf_brk , bss_prot );
1241- if (retval )
1242- goto out_free_dentry ;
1243- if (likely (elf_bss != elf_brk ) && unlikely (padzero (elf_bss ))) {
1244- retval = - EFAULT ; /* Nobody gets to see this, but.. */
1245- goto out_free_dentry ;
1246- }
1197+ current -> mm -> start_brk = current -> mm -> brk = ELF_PAGEALIGN (elf_brk );
12471198
12481199 if (interpreter ) {
12491200 elf_entry = load_elf_interp (interp_elf_ex ,
@@ -1369,7 +1320,6 @@ static int load_elf_library(struct file *file)
13691320{
13701321 struct elf_phdr * elf_phdata ;
13711322 struct elf_phdr * eppnt ;
1372- unsigned long elf_bss , bss , len ;
13731323 int retval , error , i , j ;
13741324 struct elfhdr elf_ex ;
13751325
@@ -1414,30 +1364,15 @@ static int load_elf_library(struct file *file)
14141364 eppnt ++ ;
14151365
14161366 /* Now use mmap to map the library into memory. */
1417- error = vm_mmap (file ,
1418- ELF_PAGESTART (eppnt -> p_vaddr ),
1419- (eppnt -> p_filesz +
1420- ELF_PAGEOFFSET (eppnt -> p_vaddr )),
1367+ error = elf_load (file , ELF_PAGESTART (eppnt -> p_vaddr ),
1368+ eppnt ,
14211369 PROT_READ | PROT_WRITE | PROT_EXEC ,
14221370 MAP_FIXED_NOREPLACE | MAP_PRIVATE ,
1423- ( eppnt -> p_offset -
1424- ELF_PAGEOFFSET ( eppnt -> p_vaddr )));
1371+ 0 );
1372+
14251373 if (error != ELF_PAGESTART (eppnt -> p_vaddr ))
14261374 goto out_free_ph ;
14271375
1428- elf_bss = eppnt -> p_vaddr + eppnt -> p_filesz ;
1429- if (padzero (elf_bss )) {
1430- error = - EFAULT ;
1431- goto out_free_ph ;
1432- }
1433-
1434- len = ELF_PAGEALIGN (eppnt -> p_filesz + eppnt -> p_vaddr );
1435- bss = ELF_PAGEALIGN (eppnt -> p_memsz + eppnt -> p_vaddr );
1436- if (bss > len ) {
1437- error = vm_brk (len , bss - len );
1438- if (error )
1439- goto out_free_ph ;
1440- }
14411376 error = 0 ;
14421377
14431378out_free_ph :
0 commit comments