|
25 | 25 | #include <linux/random.h> |
26 | 26 | #include <linux/compat.h> |
27 | 27 |
|
28 | | -/* we construct an artificial offset for the mapping based on the physical |
29 | | - * address of the kernel mapping variable */ |
30 | | -#define GET_LAST_MMAP(filp) \ |
31 | | - (filp ? ((unsigned long) filp->f_mapping) >> 8 : 0UL) |
32 | | -#define SET_LAST_MMAP(filp, val) \ |
33 | | - { /* nothing */ } |
34 | | - |
35 | | -static int get_offset(unsigned int last_mmap) |
36 | | -{ |
37 | | - return (last_mmap & (SHM_COLOUR-1)) >> PAGE_SHIFT; |
38 | | -} |
| 28 | +/* |
| 29 | + * Construct an artificial page offset for the mapping based on the physical |
| 30 | + * address of the kernel file mapping variable. |
| 31 | + */ |
| 32 | +#define GET_FILP_PGOFF(filp) \ |
| 33 | + (filp ? (((unsigned long) filp->f_mapping) >> 8) \ |
| 34 | + & ((SHM_COLOUR-1) >> PAGE_SHIFT) : 0UL) |
39 | 35 |
|
40 | | -static unsigned long shared_align_offset(unsigned int last_mmap, |
| 36 | +static unsigned long shared_align_offset(unsigned long filp_pgoff, |
41 | 37 | unsigned long pgoff) |
42 | 38 | { |
43 | | - return (get_offset(last_mmap) + pgoff) << PAGE_SHIFT; |
| 39 | + return (filp_pgoff + pgoff) << PAGE_SHIFT; |
44 | 40 | } |
45 | 41 |
|
46 | 42 | static inline unsigned long COLOR_ALIGN(unsigned long addr, |
47 | | - unsigned int last_mmap, unsigned long pgoff) |
| 43 | + unsigned long filp_pgoff, unsigned long pgoff) |
48 | 44 | { |
49 | 45 | unsigned long base = (addr+SHM_COLOUR-1) & ~(SHM_COLOUR-1); |
50 | 46 | unsigned long off = (SHM_COLOUR-1) & |
51 | | - (shared_align_offset(last_mmap, pgoff) << PAGE_SHIFT); |
52 | | - |
| 47 | + shared_align_offset(filp_pgoff, pgoff); |
53 | 48 | return base + off; |
54 | 49 | } |
55 | 50 |
|
@@ -98,126 +93,91 @@ static unsigned long mmap_upper_limit(struct rlimit *rlim_stack) |
98 | 93 | return PAGE_ALIGN(STACK_TOP - stack_base); |
99 | 94 | } |
100 | 95 |
|
| 96 | +enum mmap_allocation_direction {UP, DOWN}; |
101 | 97 |
|
102 | | -unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, |
103 | | - unsigned long len, unsigned long pgoff, unsigned long flags) |
| 98 | +static unsigned long arch_get_unmapped_area_common(struct file *filp, |
| 99 | + unsigned long addr, unsigned long len, unsigned long pgoff, |
| 100 | + unsigned long flags, enum mmap_allocation_direction dir) |
104 | 101 | { |
105 | 102 | struct mm_struct *mm = current->mm; |
106 | 103 | struct vm_area_struct *vma, *prev; |
107 | | - unsigned long task_size = TASK_SIZE; |
108 | | - int do_color_align, last_mmap; |
| 104 | + unsigned long filp_pgoff; |
| 105 | + int do_color_align; |
109 | 106 | struct vm_unmapped_area_info info; |
110 | 107 |
|
111 | | - if (len > task_size) |
| 108 | + if (unlikely(len > TASK_SIZE)) |
112 | 109 | return -ENOMEM; |
113 | 110 |
|
114 | 111 | do_color_align = 0; |
115 | 112 | if (filp || (flags & MAP_SHARED)) |
116 | 113 | do_color_align = 1; |
117 | | - last_mmap = GET_LAST_MMAP(filp); |
| 114 | + filp_pgoff = GET_FILP_PGOFF(filp); |
118 | 115 |
|
119 | 116 | if (flags & MAP_FIXED) { |
120 | | - if ((flags & MAP_SHARED) && last_mmap && |
121 | | - (addr - shared_align_offset(last_mmap, pgoff)) |
| 117 | + /* Even MAP_FIXED mappings must reside within TASK_SIZE */ |
| 118 | + if (TASK_SIZE - len < addr) |
| 119 | + return -EINVAL; |
| 120 | + |
| 121 | + if ((flags & MAP_SHARED) && filp && |
| 122 | + (addr - shared_align_offset(filp_pgoff, pgoff)) |
122 | 123 | & (SHM_COLOUR - 1)) |
123 | 124 | return -EINVAL; |
124 | | - goto found_addr; |
| 125 | + return addr; |
125 | 126 | } |
126 | 127 |
|
127 | 128 | if (addr) { |
128 | | - if (do_color_align && last_mmap) |
129 | | - addr = COLOR_ALIGN(addr, last_mmap, pgoff); |
| 129 | + if (do_color_align) |
| 130 | + addr = COLOR_ALIGN(addr, filp_pgoff, pgoff); |
130 | 131 | else |
131 | 132 | addr = PAGE_ALIGN(addr); |
132 | 133 |
|
133 | 134 | vma = find_vma_prev(mm, addr, &prev); |
134 | | - if (task_size - len >= addr && |
| 135 | + if (TASK_SIZE - len >= addr && |
135 | 136 | (!vma || addr + len <= vm_start_gap(vma)) && |
136 | 137 | (!prev || addr >= vm_end_gap(prev))) |
137 | | - goto found_addr; |
| 138 | + return addr; |
138 | 139 | } |
139 | 140 |
|
140 | | - info.flags = 0; |
141 | 141 | info.length = len; |
| 142 | + info.align_mask = do_color_align ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0; |
| 143 | + info.align_offset = shared_align_offset(filp_pgoff, pgoff); |
| 144 | + |
| 145 | + if (dir == DOWN) { |
| 146 | + info.flags = VM_UNMAPPED_AREA_TOPDOWN; |
| 147 | + info.low_limit = PAGE_SIZE; |
| 148 | + info.high_limit = mm->mmap_base; |
| 149 | + addr = vm_unmapped_area(&info); |
| 150 | + if (!(addr & ~PAGE_MASK)) |
| 151 | + return addr; |
| 152 | + VM_BUG_ON(addr != -ENOMEM); |
| 153 | + |
| 154 | + /* |
| 155 | + * A failed mmap() very likely causes application failure, |
| 156 | + * so fall back to the bottom-up function here. This scenario |
| 157 | + * can happen with large stack limits and large mmap() |
| 158 | + * allocations. |
| 159 | + */ |
| 160 | + } |
| 161 | + |
| 162 | + info.flags = 0; |
142 | 163 | info.low_limit = mm->mmap_legacy_base; |
143 | 164 | info.high_limit = mmap_upper_limit(NULL); |
144 | | - info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0; |
145 | | - info.align_offset = shared_align_offset(last_mmap, pgoff); |
146 | | - addr = vm_unmapped_area(&info); |
147 | | - |
148 | | -found_addr: |
149 | | - if (do_color_align && !last_mmap && !(addr & ~PAGE_MASK)) |
150 | | - SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT)); |
151 | | - |
152 | | - return addr; |
| 165 | + return vm_unmapped_area(&info); |
153 | 166 | } |
154 | 167 |
|
155 | | -unsigned long |
156 | | -arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, |
157 | | - const unsigned long len, const unsigned long pgoff, |
158 | | - const unsigned long flags) |
| 168 | +unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, |
| 169 | + unsigned long len, unsigned long pgoff, unsigned long flags) |
159 | 170 | { |
160 | | - struct vm_area_struct *vma, *prev; |
161 | | - struct mm_struct *mm = current->mm; |
162 | | - unsigned long addr = addr0; |
163 | | - int do_color_align, last_mmap; |
164 | | - struct vm_unmapped_area_info info; |
165 | | - |
166 | | - /* requested length too big for entire address space */ |
167 | | - if (len > TASK_SIZE) |
168 | | - return -ENOMEM; |
169 | | - |
170 | | - do_color_align = 0; |
171 | | - if (filp || (flags & MAP_SHARED)) |
172 | | - do_color_align = 1; |
173 | | - last_mmap = GET_LAST_MMAP(filp); |
174 | | - |
175 | | - if (flags & MAP_FIXED) { |
176 | | - if ((flags & MAP_SHARED) && last_mmap && |
177 | | - (addr - shared_align_offset(last_mmap, pgoff)) |
178 | | - & (SHM_COLOUR - 1)) |
179 | | - return -EINVAL; |
180 | | - goto found_addr; |
181 | | - } |
182 | | - |
183 | | - /* requesting a specific address */ |
184 | | - if (addr) { |
185 | | - if (do_color_align && last_mmap) |
186 | | - addr = COLOR_ALIGN(addr, last_mmap, pgoff); |
187 | | - else |
188 | | - addr = PAGE_ALIGN(addr); |
189 | | - |
190 | | - vma = find_vma_prev(mm, addr, &prev); |
191 | | - if (TASK_SIZE - len >= addr && |
192 | | - (!vma || addr + len <= vm_start_gap(vma)) && |
193 | | - (!prev || addr >= vm_end_gap(prev))) |
194 | | - goto found_addr; |
195 | | - } |
196 | | - |
197 | | - info.flags = VM_UNMAPPED_AREA_TOPDOWN; |
198 | | - info.length = len; |
199 | | - info.low_limit = PAGE_SIZE; |
200 | | - info.high_limit = mm->mmap_base; |
201 | | - info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0; |
202 | | - info.align_offset = shared_align_offset(last_mmap, pgoff); |
203 | | - addr = vm_unmapped_area(&info); |
204 | | - if (!(addr & ~PAGE_MASK)) |
205 | | - goto found_addr; |
206 | | - VM_BUG_ON(addr != -ENOMEM); |
207 | | - |
208 | | - /* |
209 | | - * A failed mmap() very likely causes application failure, |
210 | | - * so fall back to the bottom-up function here. This scenario |
211 | | - * can happen with large stack limits and large mmap() |
212 | | - * allocations. |
213 | | - */ |
214 | | - return arch_get_unmapped_area(filp, addr0, len, pgoff, flags); |
215 | | - |
216 | | -found_addr: |
217 | | - if (do_color_align && !last_mmap && !(addr & ~PAGE_MASK)) |
218 | | - SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT)); |
| 171 | + return arch_get_unmapped_area_common(filp, |
| 172 | + addr, len, pgoff, flags, UP); |
| 173 | +} |
219 | 174 |
|
220 | | - return addr; |
| 175 | +unsigned long arch_get_unmapped_area_topdown(struct file *filp, |
| 176 | + unsigned long addr, unsigned long len, unsigned long pgoff, |
| 177 | + unsigned long flags) |
| 178 | +{ |
| 179 | + return arch_get_unmapped_area_common(filp, |
| 180 | + addr, len, pgoff, flags, DOWN); |
221 | 181 | } |
222 | 182 |
|
223 | 183 | static int mmap_is_legacy(void) |
|
0 commit comments