Skip to content

Commit 567b351

Browse files
John David Anglinhdeller
authored andcommitted
parisc: Cleanup mmap implementation regarding color alignment
This change simplifies the randomization of file mapping regions. It reworks the code to remove duplication. The flow is now similar to that for mips. Finally, we consistently use the do_color_align variable to determine when color alignment is needed. Tested on rp3440. Signed-off-by: John David Anglin <dave.anglin@bell.net> Signed-off-by: Helge Deller <deller@gmx.de>
1 parent 653f3ea commit 567b351

1 file changed

Lines changed: 63 additions & 103 deletions

File tree

arch/parisc/kernel/sys_parisc.c

Lines changed: 63 additions & 103 deletions
Original file line numberDiff line numberDiff line change
@@ -25,31 +25,26 @@
2525
#include <linux/random.h>
2626
#include <linux/compat.h>
2727

28-
/* we construct an artificial offset for the mapping based on the physical
29-
* address of the kernel mapping variable */
30-
#define GET_LAST_MMAP(filp) \
31-
(filp ? ((unsigned long) filp->f_mapping) >> 8 : 0UL)
32-
#define SET_LAST_MMAP(filp, val) \
33-
{ /* nothing */ }
34-
35-
static int get_offset(unsigned int last_mmap)
36-
{
37-
return (last_mmap & (SHM_COLOUR-1)) >> PAGE_SHIFT;
38-
}
28+
/*
29+
* Construct an artificial page offset for the mapping based on the physical
30+
* address of the kernel file mapping variable.
31+
*/
32+
#define GET_FILP_PGOFF(filp) \
33+
(filp ? (((unsigned long) filp->f_mapping) >> 8) \
34+
& ((SHM_COLOUR-1) >> PAGE_SHIFT) : 0UL)
3935

40-
static unsigned long shared_align_offset(unsigned int last_mmap,
36+
static unsigned long shared_align_offset(unsigned long filp_pgoff,
4137
unsigned long pgoff)
4238
{
43-
return (get_offset(last_mmap) + pgoff) << PAGE_SHIFT;
39+
return (filp_pgoff + pgoff) << PAGE_SHIFT;
4440
}
4541

4642
static inline unsigned long COLOR_ALIGN(unsigned long addr,
47-
unsigned int last_mmap, unsigned long pgoff)
43+
unsigned long filp_pgoff, unsigned long pgoff)
4844
{
4945
unsigned long base = (addr+SHM_COLOUR-1) & ~(SHM_COLOUR-1);
5046
unsigned long off = (SHM_COLOUR-1) &
51-
(shared_align_offset(last_mmap, pgoff) << PAGE_SHIFT);
52-
47+
shared_align_offset(filp_pgoff, pgoff);
5348
return base + off;
5449
}
5550

@@ -98,126 +93,91 @@ static unsigned long mmap_upper_limit(struct rlimit *rlim_stack)
9893
return PAGE_ALIGN(STACK_TOP - stack_base);
9994
}
10095

96+
enum mmap_allocation_direction {UP, DOWN};
10197

102-
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
103-
unsigned long len, unsigned long pgoff, unsigned long flags)
98+
static unsigned long arch_get_unmapped_area_common(struct file *filp,
99+
unsigned long addr, unsigned long len, unsigned long pgoff,
100+
unsigned long flags, enum mmap_allocation_direction dir)
104101
{
105102
struct mm_struct *mm = current->mm;
106103
struct vm_area_struct *vma, *prev;
107-
unsigned long task_size = TASK_SIZE;
108-
int do_color_align, last_mmap;
104+
unsigned long filp_pgoff;
105+
int do_color_align;
109106
struct vm_unmapped_area_info info;
110107

111-
if (len > task_size)
108+
if (unlikely(len > TASK_SIZE))
112109
return -ENOMEM;
113110

114111
do_color_align = 0;
115112
if (filp || (flags & MAP_SHARED))
116113
do_color_align = 1;
117-
last_mmap = GET_LAST_MMAP(filp);
114+
filp_pgoff = GET_FILP_PGOFF(filp);
118115

119116
if (flags & MAP_FIXED) {
120-
if ((flags & MAP_SHARED) && last_mmap &&
121-
(addr - shared_align_offset(last_mmap, pgoff))
117+
/* Even MAP_FIXED mappings must reside within TASK_SIZE */
118+
if (TASK_SIZE - len < addr)
119+
return -EINVAL;
120+
121+
if ((flags & MAP_SHARED) && filp &&
122+
(addr - shared_align_offset(filp_pgoff, pgoff))
122123
& (SHM_COLOUR - 1))
123124
return -EINVAL;
124-
goto found_addr;
125+
return addr;
125126
}
126127

127128
if (addr) {
128-
if (do_color_align && last_mmap)
129-
addr = COLOR_ALIGN(addr, last_mmap, pgoff);
129+
if (do_color_align)
130+
addr = COLOR_ALIGN(addr, filp_pgoff, pgoff);
130131
else
131132
addr = PAGE_ALIGN(addr);
132133

133134
vma = find_vma_prev(mm, addr, &prev);
134-
if (task_size - len >= addr &&
135+
if (TASK_SIZE - len >= addr &&
135136
(!vma || addr + len <= vm_start_gap(vma)) &&
136137
(!prev || addr >= vm_end_gap(prev)))
137-
goto found_addr;
138+
return addr;
138139
}
139140

140-
info.flags = 0;
141141
info.length = len;
142+
info.align_mask = do_color_align ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
143+
info.align_offset = shared_align_offset(filp_pgoff, pgoff);
144+
145+
if (dir == DOWN) {
146+
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
147+
info.low_limit = PAGE_SIZE;
148+
info.high_limit = mm->mmap_base;
149+
addr = vm_unmapped_area(&info);
150+
if (!(addr & ~PAGE_MASK))
151+
return addr;
152+
VM_BUG_ON(addr != -ENOMEM);
153+
154+
/*
155+
* A failed mmap() very likely causes application failure,
156+
* so fall back to the bottom-up function here. This scenario
157+
* can happen with large stack limits and large mmap()
158+
* allocations.
159+
*/
160+
}
161+
162+
info.flags = 0;
142163
info.low_limit = mm->mmap_legacy_base;
143164
info.high_limit = mmap_upper_limit(NULL);
144-
info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
145-
info.align_offset = shared_align_offset(last_mmap, pgoff);
146-
addr = vm_unmapped_area(&info);
147-
148-
found_addr:
149-
if (do_color_align && !last_mmap && !(addr & ~PAGE_MASK))
150-
SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT));
151-
152-
return addr;
165+
return vm_unmapped_area(&info);
153166
}
154167

155-
unsigned long
156-
arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
157-
const unsigned long len, const unsigned long pgoff,
158-
const unsigned long flags)
168+
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
169+
unsigned long len, unsigned long pgoff, unsigned long flags)
159170
{
160-
struct vm_area_struct *vma, *prev;
161-
struct mm_struct *mm = current->mm;
162-
unsigned long addr = addr0;
163-
int do_color_align, last_mmap;
164-
struct vm_unmapped_area_info info;
165-
166-
/* requested length too big for entire address space */
167-
if (len > TASK_SIZE)
168-
return -ENOMEM;
169-
170-
do_color_align = 0;
171-
if (filp || (flags & MAP_SHARED))
172-
do_color_align = 1;
173-
last_mmap = GET_LAST_MMAP(filp);
174-
175-
if (flags & MAP_FIXED) {
176-
if ((flags & MAP_SHARED) && last_mmap &&
177-
(addr - shared_align_offset(last_mmap, pgoff))
178-
& (SHM_COLOUR - 1))
179-
return -EINVAL;
180-
goto found_addr;
181-
}
182-
183-
/* requesting a specific address */
184-
if (addr) {
185-
if (do_color_align && last_mmap)
186-
addr = COLOR_ALIGN(addr, last_mmap, pgoff);
187-
else
188-
addr = PAGE_ALIGN(addr);
189-
190-
vma = find_vma_prev(mm, addr, &prev);
191-
if (TASK_SIZE - len >= addr &&
192-
(!vma || addr + len <= vm_start_gap(vma)) &&
193-
(!prev || addr >= vm_end_gap(prev)))
194-
goto found_addr;
195-
}
196-
197-
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
198-
info.length = len;
199-
info.low_limit = PAGE_SIZE;
200-
info.high_limit = mm->mmap_base;
201-
info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
202-
info.align_offset = shared_align_offset(last_mmap, pgoff);
203-
addr = vm_unmapped_area(&info);
204-
if (!(addr & ~PAGE_MASK))
205-
goto found_addr;
206-
VM_BUG_ON(addr != -ENOMEM);
207-
208-
/*
209-
* A failed mmap() very likely causes application failure,
210-
* so fall back to the bottom-up function here. This scenario
211-
* can happen with large stack limits and large mmap()
212-
* allocations.
213-
*/
214-
return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
215-
216-
found_addr:
217-
if (do_color_align && !last_mmap && !(addr & ~PAGE_MASK))
218-
SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT));
171+
return arch_get_unmapped_area_common(filp,
172+
addr, len, pgoff, flags, UP);
173+
}
219174

220-
return addr;
175+
unsigned long arch_get_unmapped_area_topdown(struct file *filp,
176+
unsigned long addr, unsigned long len, unsigned long pgoff,
177+
unsigned long flags)
178+
{
179+
return arch_get_unmapped_area_common(filp,
180+
addr, len, pgoff, flags, DOWN);
221181
}
222182

223183
static int mmap_is_legacy(void)

0 commit comments

Comments
 (0)