f68a3747bd9b39b854895ca0274a007c9068326d
[AGL/meta-agl.git] / meta-agl-bsp / recipes-kernel / linux / linux-yocto / 4.8-0001-SEC-Backport-Fix-CVE-2017-1000364-through-backport.patch
1 From 75349cc7326dd2aa645bf21fe65a40c68b386c29 Mon Sep 17 00:00:00 2001
2 From: =?UTF-8?q?Jan-Simon=20M=C3=B6ller?= <jsmoeller@linuxfoundation.org>
3 Date: Wed, 28 Jun 2017 00:35:18 +0200
4 Subject: [PATCH] [SEC][Backport] Fix CVE-2017-1000364 through backport
5 MIME-Version: 1.0
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
8
9 Fix CVE-2017-1000364 through backport of upstream patches (from 4.9 branch):
10 - https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git/commit/?h=linux-4.9.y&id=cfc0eb403816c5c4f9667d959de5e22789b5421e
11 - https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git/commit/?h=linux-4.9.y&id=5d10ad6297260e9b85e7645ee544a6115bb229e4
12 - https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git/commit/?h=linux-4.9.y&id=ce7fe8595902c3f03ef528c2dc1928b3f4b67fcf
13
14 Signed-off-by: Jan-Simon Möller <jsmoeller@linuxfoundation.org>
15 ---
16  Documentation/kernel-parameters.txt |   7 ++
17  arch/arc/mm/mmap.c                  |   2 +-
18  arch/arm/mm/mmap.c                  |   4 +-
19  arch/frv/mm/elf-fdpic.c             |   2 +-
20  arch/mips/mm/mmap.c                 |   2 +-
21  arch/parisc/kernel/sys_parisc.c     |  15 ++--
22  arch/powerpc/mm/hugetlbpage-radix.c |   2 +-
23  arch/powerpc/mm/mmap.c              |   4 +-
24  arch/powerpc/mm/slice.c             |   2 +-
25  arch/s390/mm/mmap.c                 |   4 +-
26  arch/sh/mm/mmap.c                   |   4 +-
27  arch/sparc/kernel/sys_sparc_64.c    |   4 +-
28  arch/sparc/mm/hugetlbpage.c         |   2 +-
29  arch/tile/mm/hugetlbpage.c          |   2 +-
30  arch/x86/kernel/sys_x86_64.c        |   4 +-
31  arch/x86/mm/hugetlbpage.c           |   2 +-
32  arch/xtensa/kernel/syscall.c        |   2 +-
33  fs/hugetlbfs/inode.c                |   2 +-
34  fs/proc/task_mmu.c                  |   4 -
35  include/linux/mm.h                  |  53 ++++++------
36  mm/gup.c                            |   5 --
37  mm/memory.c                         |  38 ---------
38  mm/mmap.c                           | 158 ++++++++++++++++++++++--------------
39  23 files changed, 159 insertions(+), 165 deletions(-)
40
41 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
42 index 46726d4899fe..c1980b5c6a15 100644
43 --- a/Documentation/kernel-parameters.txt
44 +++ b/Documentation/kernel-parameters.txt
45 @@ -3852,6 +3852,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
46         spia_pedr=
47         spia_peddr=
48  
49 +       stack_guard_gap=        [MM]
50 +                       override the default stack gap protection. The value
51 +                       is in page units and it defines how many pages prior
52 +                       to (for stacks growing down) resp. after (for stacks
53 +                       growing up) the main stack are reserved for no other
54 +                       mapping. Default value is 256 pages.
55 +
56         stacktrace      [FTRACE]
57                         Enabled the stack tracer on boot up.
58  
59 diff --git a/arch/arc/mm/mmap.c b/arch/arc/mm/mmap.c
60 index 2e06d56e987b..cf4ae6958240 100644
61 --- a/arch/arc/mm/mmap.c
62 +++ b/arch/arc/mm/mmap.c
63 @@ -64,7 +64,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
64  
65                 vma = find_vma(mm, addr);
66                 if (TASK_SIZE - len >= addr &&
67 -                   (!vma || addr + len <= vma->vm_start))
68 +                   (!vma || addr + len <= vm_start_gap(vma)))
69                         return addr;
70         }
71  
72 diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
73 index 66353caa35b9..641334ebf46d 100644
74 --- a/arch/arm/mm/mmap.c
75 +++ b/arch/arm/mm/mmap.c
76 @@ -89,7 +89,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
77  
78                 vma = find_vma(mm, addr);
79                 if (TASK_SIZE - len >= addr &&
80 -                   (!vma || addr + len <= vma->vm_start))
81 +                   (!vma || addr + len <= vm_start_gap(vma)))
82                         return addr;
83         }
84  
85 @@ -140,7 +140,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
86                         addr = PAGE_ALIGN(addr);
87                 vma = find_vma(mm, addr);
88                 if (TASK_SIZE - len >= addr &&
89 -                               (!vma || addr + len <= vma->vm_start))
90 +                               (!vma || addr + len <= vm_start_gap(vma)))
91                         return addr;
92         }
93  
94 diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
95 index 836f14707a62..efa59f1f8022 100644
96 --- a/arch/frv/mm/elf-fdpic.c
97 +++ b/arch/frv/mm/elf-fdpic.c
98 @@ -74,7 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
99                 addr = PAGE_ALIGN(addr);
100                 vma = find_vma(current->mm, addr);
101                 if (TASK_SIZE - len >= addr &&
102 -                   (!vma || addr + len <= vma->vm_start))
103 +                   (!vma || addr + len <= vm_start_gap(vma)))
104                         goto success;
105         }
106  
107 diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
108 index 353037699512..c5fdea5debe5 100644
109 --- a/arch/mips/mm/mmap.c
110 +++ b/arch/mips/mm/mmap.c
111 @@ -92,7 +92,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
112  
113                 vma = find_vma(mm, addr);
114                 if (TASK_SIZE - len >= addr &&
115 -                   (!vma || addr + len <= vma->vm_start))
116 +                   (!vma || addr + len <= vm_start_gap(vma)))
117                         return addr;
118         }
119  
120 diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
121 index 0a393a04e891..1d7691fa8ab2 100644
122 --- a/arch/parisc/kernel/sys_parisc.c
123 +++ b/arch/parisc/kernel/sys_parisc.c
124 @@ -88,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
125                 unsigned long len, unsigned long pgoff, unsigned long flags)
126  {
127         struct mm_struct *mm = current->mm;
128 -       struct vm_area_struct *vma;
129 +       struct vm_area_struct *vma, *prev;
130         unsigned long task_size = TASK_SIZE;
131         int do_color_align, last_mmap;
132         struct vm_unmapped_area_info info;
133 @@ -115,9 +115,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
134                 else
135                         addr = PAGE_ALIGN(addr);
136  
137 -               vma = find_vma(mm, addr);
138 +               vma = find_vma_prev(mm, addr, &prev);
139                 if (task_size - len >= addr &&
140 -                   (!vma || addr + len <= vma->vm_start))
141 +                   (!vma || addr + len <= vm_start_gap(vma)) &&
142 +                   (!prev || addr >= vm_end_gap(prev)))
143                         goto found_addr;
144         }
145  
146 @@ -141,7 +142,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
147                           const unsigned long len, const unsigned long pgoff,
148                           const unsigned long flags)
149  {
150 -       struct vm_area_struct *vma;
151 +       struct vm_area_struct *vma, *prev;
152         struct mm_struct *mm = current->mm;
153         unsigned long addr = addr0;
154         int do_color_align, last_mmap;
155 @@ -175,9 +176,11 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
156                         addr = COLOR_ALIGN(addr, last_mmap, pgoff);
157                 else
158                         addr = PAGE_ALIGN(addr);
159 -               vma = find_vma(mm, addr);
160 +
161 +               vma = find_vma_prev(mm, addr, &prev);
162                 if (TASK_SIZE - len >= addr &&
163 -                   (!vma || addr + len <= vma->vm_start))
164 +                   (!vma || addr + len <= vm_start_gap(vma)) &&
165 +                   (!prev || addr >= vm_end_gap(prev)))
166                         goto found_addr;
167         }
168  
169 diff --git a/arch/powerpc/mm/hugetlbpage-radix.c b/arch/powerpc/mm/hugetlbpage-radix.c
170 index 35254a678456..a2b2d97f7eda 100644
171 --- a/arch/powerpc/mm/hugetlbpage-radix.c
172 +++ b/arch/powerpc/mm/hugetlbpage-radix.c
173 @@ -65,7 +65,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
174                 addr = ALIGN(addr, huge_page_size(h));
175                 vma = find_vma(mm, addr);
176                 if (TASK_SIZE - len >= addr &&
177 -                   (!vma || addr + len <= vma->vm_start))
178 +                   (!vma || addr + len <= vm_start_gap(vma)))
179                         return addr;
180         }
181         /*
182 diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
183 index 2f1e44362198..5bc2845cddf4 100644
184 --- a/arch/powerpc/mm/mmap.c
185 +++ b/arch/powerpc/mm/mmap.c
186 @@ -106,7 +106,7 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr,
187                 addr = PAGE_ALIGN(addr);
188                 vma = find_vma(mm, addr);
189                 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
190 -                   (!vma || addr + len <= vma->vm_start))
191 +                   (!vma || addr + len <= vm_start_gap(vma)))
192                         return addr;
193         }
194  
195 @@ -142,7 +142,7 @@ radix__arch_get_unmapped_area_topdown(struct file *filp,
196                 addr = PAGE_ALIGN(addr);
197                 vma = find_vma(mm, addr);
198                 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
199 -                               (!vma || addr + len <= vma->vm_start))
200 +                               (!vma || addr + len <= vm_start_gap(vma)))
201                         return addr;
202         }
203  
204 diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
205 index 2b27458902ee..c4d5c9c61e0f 100644
206 --- a/arch/powerpc/mm/slice.c
207 +++ b/arch/powerpc/mm/slice.c
208 @@ -105,7 +105,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
209         if ((mm->task_size - len) < addr)
210                 return 0;
211         vma = find_vma(mm, addr);
212 -       return (!vma || (addr + len) <= vma->vm_start);
213 +       return (!vma || (addr + len) <= vm_start_gap(vma));
214  }
215  
216  static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
217 diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
218 index eb9df2822da1..812368f274c9 100644
219 --- a/arch/s390/mm/mmap.c
220 +++ b/arch/s390/mm/mmap.c
221 @@ -98,7 +98,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
222                 addr = PAGE_ALIGN(addr);
223                 vma = find_vma(mm, addr);
224                 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
225 -                   (!vma || addr + len <= vma->vm_start))
226 +                   (!vma || addr + len <= vm_start_gap(vma)))
227                         return addr;
228         }
229  
230 @@ -136,7 +136,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
231                 addr = PAGE_ALIGN(addr);
232                 vma = find_vma(mm, addr);
233                 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
234 -                               (!vma || addr + len <= vma->vm_start))
235 +                               (!vma || addr + len <= vm_start_gap(vma)))
236                         return addr;
237         }
238  
239 diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
240 index 6777177807c2..7df7d5944188 100644
241 --- a/arch/sh/mm/mmap.c
242 +++ b/arch/sh/mm/mmap.c
243 @@ -63,7 +63,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
244  
245                 vma = find_vma(mm, addr);
246                 if (TASK_SIZE - len >= addr &&
247 -                   (!vma || addr + len <= vma->vm_start))
248 +                   (!vma || addr + len <= vm_start_gap(vma)))
249                         return addr;
250         }
251  
252 @@ -113,7 +113,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
253  
254                 vma = find_vma(mm, addr);
255                 if (TASK_SIZE - len >= addr &&
256 -                   (!vma || addr + len <= vma->vm_start))
257 +                   (!vma || addr + len <= vm_start_gap(vma)))
258                         return addr;
259         }
260  
261 diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
262 index fe8b8ee8e660..02e05e221b94 100644
263 --- a/arch/sparc/kernel/sys_sparc_64.c
264 +++ b/arch/sparc/kernel/sys_sparc_64.c
265 @@ -118,7 +118,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
266  
267                 vma = find_vma(mm, addr);
268                 if (task_size - len >= addr &&
269 -                   (!vma || addr + len <= vma->vm_start))
270 +                   (!vma || addr + len <= vm_start_gap(vma)))
271                         return addr;
272         }
273  
274 @@ -181,7 +181,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
275  
276                 vma = find_vma(mm, addr);
277                 if (task_size - len >= addr &&
278 -                   (!vma || addr + len <= vma->vm_start))
279 +                   (!vma || addr + len <= vm_start_gap(vma)))
280                         return addr;
281         }
282  
283 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
284 index 988acc8b1b80..58cde8d9be8a 100644
285 --- a/arch/sparc/mm/hugetlbpage.c
286 +++ b/arch/sparc/mm/hugetlbpage.c
287 @@ -116,7 +116,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
288                 addr = ALIGN(addr, HPAGE_SIZE);
289                 vma = find_vma(mm, addr);
290                 if (task_size - len >= addr &&
291 -                   (!vma || addr + len <= vma->vm_start))
292 +                   (!vma || addr + len <= vm_start_gap(vma)))
293                         return addr;
294         }
295         if (mm->get_unmapped_area == arch_get_unmapped_area)
296 diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
297 index 77ceaa343fce..67508b249ede 100644
298 --- a/arch/tile/mm/hugetlbpage.c
299 +++ b/arch/tile/mm/hugetlbpage.c
300 @@ -232,7 +232,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
301                 addr = ALIGN(addr, huge_page_size(h));
302                 vma = find_vma(mm, addr);
303                 if (TASK_SIZE - len >= addr &&
304 -                   (!vma || addr + len <= vma->vm_start))
305 +                   (!vma || addr + len <= vm_start_gap(vma)))
306                         return addr;
307         }
308         if (current->mm->get_unmapped_area == arch_get_unmapped_area)
309 diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
310 index 10e0272d789a..136ad7c1ce7b 100644
311 --- a/arch/x86/kernel/sys_x86_64.c
312 +++ b/arch/x86/kernel/sys_x86_64.c
313 @@ -143,7 +143,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
314                 addr = PAGE_ALIGN(addr);
315                 vma = find_vma(mm, addr);
316                 if (end - len >= addr &&
317 -                   (!vma || addr + len <= vma->vm_start))
318 +                   (!vma || addr + len <= vm_start_gap(vma)))
319                         return addr;
320         }
321  
322 @@ -186,7 +186,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
323                 addr = PAGE_ALIGN(addr);
324                 vma = find_vma(mm, addr);
325                 if (TASK_SIZE - len >= addr &&
326 -                               (!vma || addr + len <= vma->vm_start))
327 +                               (!vma || addr + len <= vm_start_gap(vma)))
328                         return addr;
329         }
330  
331 diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
332 index 2ae8584b44c7..fe342e8ed529 100644
333 --- a/arch/x86/mm/hugetlbpage.c
334 +++ b/arch/x86/mm/hugetlbpage.c
335 @@ -144,7 +144,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
336                 addr = ALIGN(addr, huge_page_size(h));
337                 vma = find_vma(mm, addr);
338                 if (TASK_SIZE - len >= addr &&
339 -                   (!vma || addr + len <= vma->vm_start))
340 +                   (!vma || addr + len <= vm_start_gap(vma)))
341                         return addr;
342         }
343         if (mm->get_unmapped_area == arch_get_unmapped_area)
344 diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c
345 index 83cf49685373..3aaaae18417c 100644
346 --- a/arch/xtensa/kernel/syscall.c
347 +++ b/arch/xtensa/kernel/syscall.c
348 @@ -87,7 +87,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
349                 /* At this point:  (!vmm || addr < vmm->vm_end). */
350                 if (TASK_SIZE - len < addr)
351                         return -ENOMEM;
352 -               if (!vmm || addr + len <= vmm->vm_start)
353 +               if (!vmm || addr + len <= vm_start_gap(vmm))
354                         return addr;
355                 addr = vmm->vm_end;
356                 if (flags & MAP_SHARED)
357 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
358 index 4ea71eba40a5..aac9114728c3 100644
359 --- a/fs/hugetlbfs/inode.c
360 +++ b/fs/hugetlbfs/inode.c
361 @@ -191,7 +191,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
362                 addr = ALIGN(addr, huge_page_size(h));
363                 vma = find_vma(mm, addr);
364                 if (TASK_SIZE - len >= addr &&
365 -                   (!vma || addr + len <= vma->vm_start))
366 +                   (!vma || addr + len <= vm_start_gap(vma)))
367                         return addr;
368         }
369  
370 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
371 index 2750949397eb..5feada822930 100644
372 --- a/fs/proc/task_mmu.c
373 +++ b/fs/proc/task_mmu.c
374 @@ -309,11 +309,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
375  
376         /* We don't show the stack guard page in /proc/maps */
377         start = vma->vm_start;
378 -       if (stack_guard_page_start(vma, start))
379 -               start += PAGE_SIZE;
380         end = vma->vm_end;
381 -       if (stack_guard_page_end(vma, end))
382 -               end -= PAGE_SIZE;
383  
384         seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
385         seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
386 diff --git a/include/linux/mm.h b/include/linux/mm.h
387 index 582d30baaa86..31206cc2e99c 100644
388 --- a/include/linux/mm.h
389 +++ b/include/linux/mm.h
390 @@ -1392,39 +1392,11 @@ int clear_page_dirty_for_io(struct page *page);
391  
392  int get_cmdline(struct task_struct *task, char *buffer, int buflen);
393  
394 -/* Is the vma a continuation of the stack vma above it? */
395 -static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
396 -{
397 -       return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
398 -}
399 -
400  static inline bool vma_is_anonymous(struct vm_area_struct *vma)
401  {
402         return !vma->vm_ops;
403  }
404  
405 -static inline int stack_guard_page_start(struct vm_area_struct *vma,
406 -                                            unsigned long addr)
407 -{
408 -       return (vma->vm_flags & VM_GROWSDOWN) &&
409 -               (vma->vm_start == addr) &&
410 -               !vma_growsdown(vma->vm_prev, addr);
411 -}
412 -
413 -/* Is the vma a continuation of the stack vma below it? */
414 -static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
415 -{
416 -       return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
417 -}
418 -
419 -static inline int stack_guard_page_end(struct vm_area_struct *vma,
420 -                                          unsigned long addr)
421 -{
422 -       return (vma->vm_flags & VM_GROWSUP) &&
423 -               (vma->vm_end == addr) &&
424 -               !vma_growsup(vma->vm_next, addr);
425 -}
426 -
427  int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t);
428  
429  extern unsigned long move_page_tables(struct vm_area_struct *vma,
430 @@ -2153,6 +2125,7 @@ void page_cache_async_readahead(struct address_space *mapping,
431                                 pgoff_t offset,
432                                 unsigned long size);
433  
434 +extern unsigned long stack_guard_gap;
435  /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
436  extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
437  
438 @@ -2181,6 +2154,30 @@ static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * m
439         return vma;
440  }
441  
442 +static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
443 +{
444 +       unsigned long vm_start = vma->vm_start;
445 +
446 +       if (vma->vm_flags & VM_GROWSDOWN) {
447 +               vm_start -= stack_guard_gap;
448 +               if (vm_start > vma->vm_start)
449 +                       vm_start = 0;
450 +       }
451 +       return vm_start;
452 +}
453 +
454 +static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
455 +{
456 +       unsigned long vm_end = vma->vm_end;
457 +
458 +       if (vma->vm_flags & VM_GROWSUP) {
459 +               vm_end += stack_guard_gap;
460 +               if (vm_end < vma->vm_end)
461 +                       vm_end = -PAGE_SIZE;
462 +       }
463 +       return vm_end;
464 +}
465 +
466  static inline unsigned long vma_pages(struct vm_area_struct *vma)
467  {
468         return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
469 diff --git a/mm/gup.c b/mm/gup.c
470 index 22cc22e7432f..4b3723734623 100644
471 --- a/mm/gup.c
472 +++ b/mm/gup.c
473 @@ -370,11 +370,6 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
474         /* mlock all present pages, but do not fault in new pages */
475         if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK)
476                 return -ENOENT;
477 -       /* For mm_populate(), just skip the stack guard page. */
478 -       if ((*flags & FOLL_POPULATE) &&
479 -                       (stack_guard_page_start(vma, address) ||
480 -                        stack_guard_page_end(vma, address + PAGE_SIZE)))
481 -               return -ENOENT;
482         if (*flags & FOLL_WRITE)
483                 fault_flags |= FAULT_FLAG_WRITE;
484         if (*flags & FOLL_REMOTE)
485 diff --git a/mm/memory.c b/mm/memory.c
486 index 45f39f391cc2..527ef7841146 100644
487 --- a/mm/memory.c
488 +++ b/mm/memory.c
489 @@ -2695,40 +2695,6 @@ out_release:
490  }
491  
492  /*
493 - * This is like a special single-page "expand_{down|up}wards()",
494 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
495 - * doesn't hit another vma.
496 - */
497 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
498 -{
499 -       address &= PAGE_MASK;
500 -       if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
501 -               struct vm_area_struct *prev = vma->vm_prev;
502 -
503 -               /*
504 -                * Is there a mapping abutting this one below?
505 -                *
506 -                * That's only ok if it's the same stack mapping
507 -                * that has gotten split..
508 -                */
509 -               if (prev && prev->vm_end == address)
510 -                       return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
511 -
512 -               return expand_downwards(vma, address - PAGE_SIZE);
513 -       }
514 -       if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
515 -               struct vm_area_struct *next = vma->vm_next;
516 -
517 -               /* As VM_GROWSDOWN but s/below/above/ */
518 -               if (next && next->vm_start == address + PAGE_SIZE)
519 -                       return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
520 -
521 -               return expand_upwards(vma, address + PAGE_SIZE);
522 -       }
523 -       return 0;
524 -}
525 -
526 -/*
527   * We enter with non-exclusive mmap_sem (to exclude vma changes,
528   * but allow concurrent faults), and pte mapped but not yet locked.
529   * We return with mmap_sem still held, but pte unmapped and unlocked.
530 @@ -2744,10 +2710,6 @@ static int do_anonymous_page(struct fault_env *fe)
531         if (vma->vm_flags & VM_SHARED)
532                 return VM_FAULT_SIGBUS;
533  
534 -       /* Check if we need to add a guard page to the stack */
535 -       if (check_stack_guard_page(vma, fe->address) < 0)
536 -               return VM_FAULT_SIGSEGV;
537 -
538         /*
539          * Use pte_alloc() instead of pte_alloc_map().  We can't run
540          * pte_offset_map() on pmds where a huge pmd might be created
541 diff --git a/mm/mmap.c b/mm/mmap.c
542 index f3ebc5a54367..e1c1eb4b3942 100644
543 --- a/mm/mmap.c
544 +++ b/mm/mmap.c
545 @@ -176,6 +176,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
546         unsigned long retval;
547         unsigned long newbrk, oldbrk;
548         struct mm_struct *mm = current->mm;
549 +       struct vm_area_struct *next;
550         unsigned long min_brk;
551         bool populate;
552  
553 @@ -221,7 +222,8 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
554         }
555  
556         /* Check against existing mmap mappings. */
557 -       if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
558 +       next = find_vma(mm, oldbrk);
559 +       if (next && newbrk + PAGE_SIZE > vm_start_gap(next))
560                 goto out;
561  
562         /* Ok, looks good - let it rip. */
563 @@ -244,10 +246,22 @@ out:
564  
565  static long vma_compute_subtree_gap(struct vm_area_struct *vma)
566  {
567 -       unsigned long max, subtree_gap;
568 -       max = vma->vm_start;
569 -       if (vma->vm_prev)
570 -               max -= vma->vm_prev->vm_end;
571 +       unsigned long max, prev_end, subtree_gap;
572 +
573 +       /*
574 +        * Note: in the rare case of a VM_GROWSDOWN above a VM_GROWSUP, we
575 +        * allow two stack_guard_gaps between them here, and when choosing
576 +        * an unmapped area; whereas when expanding we only require one.
577 +        * That's a little inconsistent, but keeps the code here simpler.
578 +        */
579 +       max = vm_start_gap(vma);
580 +       if (vma->vm_prev) {
581 +               prev_end = vm_end_gap(vma->vm_prev);
582 +               if (max > prev_end)
583 +                       max -= prev_end;
584 +               else
585 +                       max = 0;
586 +       }
587         if (vma->vm_rb.rb_left) {
588                 subtree_gap = rb_entry(vma->vm_rb.rb_left,
589                                 struct vm_area_struct, vm_rb)->rb_subtree_gap;
590 @@ -343,7 +357,7 @@ static void validate_mm(struct mm_struct *mm)
591                         anon_vma_unlock_read(anon_vma);
592                 }
593  
594 -               highest_address = vma->vm_end;
595 +               highest_address = vm_end_gap(vma);
596                 vma = vma->vm_next;
597                 i++;
598         }
599 @@ -512,7 +526,7 @@ void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
600         if (vma->vm_next)
601                 vma_gap_update(vma->vm_next);
602         else
603 -               mm->highest_vm_end = vma->vm_end;
604 +               mm->highest_vm_end = vm_end_gap(vma);
605  
606         /*
607          * vma->vm_prev wasn't known when we followed the rbtree to find the
608 @@ -765,7 +779,7 @@ again:
609                         vma_gap_update(vma);
610                 if (end_changed) {
611                         if (!next)
612 -                               mm->highest_vm_end = end;
613 +                               mm->highest_vm_end = vm_end_gap(vma);
614                         else if (!adjust_next)
615                                 vma_gap_update(next);
616                 }
617 @@ -1630,7 +1644,7 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
618  
619         while (true) {
620                 /* Visit left subtree if it looks promising */
621 -               gap_end = vma->vm_start;
622 +               gap_end = vm_start_gap(vma);
623                 if (gap_end >= low_limit && vma->vm_rb.rb_left) {
624                         struct vm_area_struct *left =
625                                 rb_entry(vma->vm_rb.rb_left,
626 @@ -1641,12 +1655,13 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
627                         }
628                 }
629  
630 -               gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
631 +               gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
632  check_current:
633                 /* Check if current node has a suitable gap */
634                 if (gap_start > high_limit)
635                         return -ENOMEM;
636 -               if (gap_end >= low_limit && gap_end - gap_start >= length)
637 +               if (gap_end >= low_limit &&
638 +                   gap_end > gap_start && gap_end - gap_start >= length)
639                         goto found;
640  
641                 /* Visit right subtree if it looks promising */
642 @@ -1668,8 +1683,8 @@ check_current:
643                         vma = rb_entry(rb_parent(prev),
644                                        struct vm_area_struct, vm_rb);
645                         if (prev == vma->vm_rb.rb_left) {
646 -                               gap_start = vma->vm_prev->vm_end;
647 -                               gap_end = vma->vm_start;
648 +                               gap_start = vm_end_gap(vma->vm_prev);
649 +                               gap_end = vm_start_gap(vma);
650                                 goto check_current;
651                         }
652                 }
653 @@ -1733,7 +1748,7 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
654  
655         while (true) {
656                 /* Visit right subtree if it looks promising */
657 -               gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
658 +               gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
659                 if (gap_start <= high_limit && vma->vm_rb.rb_right) {
660                         struct vm_area_struct *right =
661                                 rb_entry(vma->vm_rb.rb_right,
662 @@ -1746,10 +1761,11 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
663  
664  check_current:
665                 /* Check if current node has a suitable gap */
666 -               gap_end = vma->vm_start;
667 +               gap_end = vm_start_gap(vma);
668                 if (gap_end < low_limit)
669                         return -ENOMEM;
670 -               if (gap_start <= high_limit && gap_end - gap_start >= length)
671 +               if (gap_start <= high_limit &&
672 +                   gap_end > gap_start && gap_end - gap_start >= length)
673                         goto found;
674  
675                 /* Visit left subtree if it looks promising */
676 @@ -1772,7 +1788,7 @@ check_current:
677                                        struct vm_area_struct, vm_rb);
678                         if (prev == vma->vm_rb.rb_right) {
679                                 gap_start = vma->vm_prev ?
680 -                                       vma->vm_prev->vm_end : 0;
681 +                                       vm_end_gap(vma->vm_prev) : 0;
682                                 goto check_current;
683                         }
684                 }
685 @@ -1810,7 +1826,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
686                 unsigned long len, unsigned long pgoff, unsigned long flags)
687  {
688         struct mm_struct *mm = current->mm;
689 -       struct vm_area_struct *vma;
690 +       struct vm_area_struct *vma, *prev;
691         struct vm_unmapped_area_info info;
692  
693         if (len > TASK_SIZE - mmap_min_addr)
694 @@ -1821,9 +1837,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
695  
696         if (addr) {
697                 addr = PAGE_ALIGN(addr);
698 -               vma = find_vma(mm, addr);
699 +               vma = find_vma_prev(mm, addr, &prev);
700                 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
701 -                   (!vma || addr + len <= vma->vm_start))
702 +                   (!vma || addr + len <= vm_start_gap(vma)) &&
703 +                   (!prev || addr >= vm_end_gap(prev)))
704                         return addr;
705         }
706  
707 @@ -1846,7 +1863,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
708                           const unsigned long len, const unsigned long pgoff,
709                           const unsigned long flags)
710  {
711 -       struct vm_area_struct *vma;
712 +       struct vm_area_struct *vma, *prev;
713         struct mm_struct *mm = current->mm;
714         unsigned long addr = addr0;
715         struct vm_unmapped_area_info info;
716 @@ -1861,9 +1878,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
717         /* requesting a specific address */
718         if (addr) {
719                 addr = PAGE_ALIGN(addr);
720 -               vma = find_vma(mm, addr);
721 +               vma = find_vma_prev(mm, addr, &prev);
722                 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
723 -                               (!vma || addr + len <= vma->vm_start))
724 +                               (!vma || addr + len <= vm_start_gap(vma)) &&
725 +                               (!prev || addr >= vm_end_gap(prev)))
726                         return addr;
727         }
728  
729 @@ -1998,21 +2016,19 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
730   * update accounting. This is shared with both the
731   * grow-up and grow-down cases.
732   */
733 -static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow)
734 +static int acct_stack_growth(struct vm_area_struct *vma,
735 +                            unsigned long size, unsigned long grow)
736  {
737         struct mm_struct *mm = vma->vm_mm;
738         struct rlimit *rlim = current->signal->rlim;
739 -       unsigned long new_start, actual_size;
740 +       unsigned long new_start;
741  
742         /* address space limit tests */
743         if (!may_expand_vm(mm, vma->vm_flags, grow))
744                 return -ENOMEM;
745  
746         /* Stack limit test */
747 -       actual_size = size;
748 -       if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
749 -               actual_size -= PAGE_SIZE;
750 -       if (actual_size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur))
751 +       if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur))
752                 return -ENOMEM;
753  
754         /* mlock limit tests */
755 @@ -2050,16 +2066,32 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
756  int expand_upwards(struct vm_area_struct *vma, unsigned long address)
757  {
758         struct mm_struct *mm = vma->vm_mm;
759 +       struct vm_area_struct *next;
760 +       unsigned long gap_addr;
761         int error = 0;
762  
763         if (!(vma->vm_flags & VM_GROWSUP))
764                 return -EFAULT;
765  
766 -       /* Guard against wrapping around to address 0. */
767 -       if (address < PAGE_ALIGN(address+4))
768 -               address = PAGE_ALIGN(address+4);
769 -       else
770 +       /* Guard against exceeding limits of the address space. */
771 +       address &= PAGE_MASK;
772 +       if (address >= TASK_SIZE)
773                 return -ENOMEM;
774 +       address += PAGE_SIZE;
775 +
776 +       /* Enforce stack_guard_gap */
777 +       gap_addr = address + stack_guard_gap;
778 +
779 +       /* Guard against overflow */
780 +       if (gap_addr < address || gap_addr > TASK_SIZE)
781 +               gap_addr = TASK_SIZE;
782 +
783 +       next = vma->vm_next;
784 +       if (next && next->vm_start < gap_addr) {
785 +               if (!(next->vm_flags & VM_GROWSUP))
786 +                       return -ENOMEM;
787 +               /* Check that both stack segments have the same anon_vma? */
788 +       }
789  
790         /* We must make sure the anon_vma is allocated. */
791         if (unlikely(anon_vma_prepare(vma)))
792 @@ -2104,7 +2136,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
793                                 if (vma->vm_next)
794                                         vma_gap_update(vma->vm_next);
795                                 else
796 -                                       mm->highest_vm_end = address;
797 +                                       mm->highest_vm_end = vm_end_gap(vma);
798                                 spin_unlock(&mm->page_table_lock);
799  
800                                 perf_event_mmap(vma);
801 @@ -2125,6 +2157,8 @@ int expand_downwards(struct vm_area_struct *vma,
802                                    unsigned long address)
803  {
804         struct mm_struct *mm = vma->vm_mm;
805 +       struct vm_area_struct *prev;
806 +       unsigned long gap_addr;
807         int error;
808  
809         address &= PAGE_MASK;
810 @@ -2132,6 +2166,17 @@ int expand_downwards(struct vm_area_struct *vma,
811         if (error)
812                 return error;
813  
814 +       /* Enforce stack_guard_gap */
815 +       gap_addr = address - stack_guard_gap;
816 +       if (gap_addr > address)
817 +               return -ENOMEM;
818 +       prev = vma->vm_prev;
819 +       if (prev && prev->vm_end > gap_addr) {
820 +               if (!(prev->vm_flags & VM_GROWSDOWN))
821 +                       return -ENOMEM;
822 +               /* Check that both stack segments have the same anon_vma? */
823 +       }
824 +
825         /* We must make sure the anon_vma is allocated. */
826         if (unlikely(anon_vma_prepare(vma)))
827                 return -ENOMEM;
828 @@ -2186,28 +2231,25 @@ int expand_downwards(struct vm_area_struct *vma,
829         return error;
830  }
831  
832 -/*
833 - * Note how expand_stack() refuses to expand the stack all the way to
834 - * abut the next virtual mapping, *unless* that mapping itself is also
835 - * a stack mapping. We want to leave room for a guard page, after all
836 - * (the guard page itself is not added here, that is done by the
837 - * actual page faulting logic)
838 - *
839 - * This matches the behavior of the guard page logic (see mm/memory.c:
840 - * check_stack_guard_page()), which only allows the guard page to be
841 - * removed under these circumstances.
842 - */
843 +/* enforced gap between the expanding stack and other mappings. */
844 +unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
845 +
846 +static int __init cmdline_parse_stack_guard_gap(char *p)
847 +{
848 +       unsigned long val;
849 +       char *endptr;
850 +
851 +       val = simple_strtoul(p, &endptr, 10);
852 +       if (!*endptr)
853 +               stack_guard_gap = val << PAGE_SHIFT;
854 +
855 +       return 0;
856 +}
857 +__setup("stack_guard_gap=", cmdline_parse_stack_guard_gap);
858 +
859  #ifdef CONFIG_STACK_GROWSUP
860  int expand_stack(struct vm_area_struct *vma, unsigned long address)
861  {
862 -       struct vm_area_struct *next;
863 -
864 -       address &= PAGE_MASK;
865 -       next = vma->vm_next;
866 -       if (next && next->vm_start == address + PAGE_SIZE) {
867 -               if (!(next->vm_flags & VM_GROWSUP))
868 -                       return -ENOMEM;
869 -       }
870         return expand_upwards(vma, address);
871  }
872  
873 @@ -2229,14 +2271,6 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
874  #else
875  int expand_stack(struct vm_area_struct *vma, unsigned long address)
876  {
877 -       struct vm_area_struct *prev;
878 -
879 -       address &= PAGE_MASK;
880 -       prev = vma->vm_prev;
881 -       if (prev && prev->vm_end == address) {
882 -               if (!(prev->vm_flags & VM_GROWSDOWN))
883 -                       return -ENOMEM;
884 -       }
885         return expand_downwards(vma, address);
886  }
887  
888 @@ -2334,7 +2368,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
889                 vma->vm_prev = prev;
890                 vma_gap_update(vma);
891         } else
892 -               mm->highest_vm_end = prev ? prev->vm_end : 0;
893 +               mm->highest_vm_end = prev ? vm_end_gap(prev) : 0;
894         tail_vma->vm_next = NULL;
895  
896         /* Kill the cache */
897 -- 
898 2.12.3
899