1ac41aaeeSGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0+
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds * flexible mmap layout support
41da177e4SLinus Torvalds *
51da177e4SLinus Torvalds * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
61da177e4SLinus Torvalds * All Rights Reserved.
71da177e4SLinus Torvalds *
81da177e4SLinus Torvalds * Started by Ingo Molnar <mingo@elte.hu>
91da177e4SLinus Torvalds */
101da177e4SLinus Torvalds
11ca21872eSHeiko Carstens #include <linux/elf-randomize.h>
121da177e4SLinus Torvalds #include <linux/personality.h>
131da177e4SLinus Torvalds #include <linux/mm.h>
14638ad34aSMartin Schwidefsky #include <linux/mman.h>
153f07c014SIngo Molnar #include <linux/sched/signal.h>
1601042607SIngo Molnar #include <linux/sched/mm.h>
17df1ca53cSHeiko Carstens #include <linux/random.h>
18048cd4e5SHeiko Carstens #include <linux/compat.h>
191f6b83e5SMartin Schwidefsky #include <linux/security.h>
20ff24b07aSPaul Gortmaker #include <asm/elf.h>
211da177e4SLinus Torvalds
stack_maxrandom_size(void)229046e401SHeiko Carstens static unsigned long stack_maxrandom_size(void)
239046e401SHeiko Carstens {
249046e401SHeiko Carstens if (!(current->flags & PF_RANDOMIZE))
259046e401SHeiko Carstens return 0;
269046e401SHeiko Carstens return STACK_RND_MASK << PAGE_SHIFT;
279046e401SHeiko Carstens }
289046e401SHeiko Carstens
mmap_is_legacy(struct rlimit * rlim_stack)298f2af155SKees Cook static inline int mmap_is_legacy(struct rlimit *rlim_stack)
301060f62eSHeiko Carstens {
311060f62eSHeiko Carstens if (current->personality & ADDR_COMPAT_LAYOUT)
321060f62eSHeiko Carstens return 1;
338f2af155SKees Cook if (rlim_stack->rlim_cur == RLIM_INFINITY)
341060f62eSHeiko Carstens return 1;
351060f62eSHeiko Carstens return sysctl_legacy_va_layout;
361060f62eSHeiko Carstens }
371060f62eSHeiko Carstens
arch_mmap_rnd(void)382b68f6caSKees Cook unsigned long arch_mmap_rnd(void)
39df1ca53cSHeiko Carstens {
40a251c17aSJason A. Donenfeld return (get_random_u32() & MMAP_RND_MASK) << PAGE_SHIFT;
41df1ca53cSHeiko Carstens }
42df1ca53cSHeiko Carstens
mmap_base_legacy(unsigned long rnd)438e89a356SKees Cook static unsigned long mmap_base_legacy(unsigned long rnd)
447aba842fSHeiko Carstens {
458e89a356SKees Cook return TASK_UNMAPPED_BASE + rnd;
467aba842fSHeiko Carstens }
477aba842fSHeiko Carstens
mmap_base(unsigned long rnd,struct rlimit * rlim_stack)488f2af155SKees Cook static inline unsigned long mmap_base(unsigned long rnd,
498f2af155SKees Cook struct rlimit *rlim_stack)
501da177e4SLinus Torvalds {
518f2af155SKees Cook unsigned long gap = rlim_stack->rlim_cur;
52a0308c13SMartin Schwidefsky unsigned long pad = stack_maxrandom_size() + stack_guard_gap;
53a0308c13SMartin Schwidefsky unsigned long gap_min, gap_max;
541da177e4SLinus Torvalds
55a0308c13SMartin Schwidefsky /* Values close to RLIM_INFINITY can overflow. */
56a0308c13SMartin Schwidefsky if (gap + pad > gap)
57a0308c13SMartin Schwidefsky gap += pad;
58a0308c13SMartin Schwidefsky
59a0308c13SMartin Schwidefsky /*
60a0308c13SMartin Schwidefsky * Top of mmap area (just below the process stack).
61f2f47d0eSSven Schnelle * Leave at least a ~128 MB hole.
62a0308c13SMartin Schwidefsky */
63f2f47d0eSSven Schnelle gap_min = SZ_128M;
64a0308c13SMartin Schwidefsky gap_max = (STACK_TOP / 6) * 5;
65a0308c13SMartin Schwidefsky
66a0308c13SMartin Schwidefsky if (gap < gap_min)
67a0308c13SMartin Schwidefsky gap = gap_min;
68a0308c13SMartin Schwidefsky else if (gap > gap_max)
69a0308c13SMartin Schwidefsky gap = gap_max;
70a0308c13SMartin Schwidefsky
71a0308c13SMartin Schwidefsky return PAGE_ALIGN(STACK_TOP - gap - rnd);
721da177e4SLinus Torvalds }
731da177e4SLinus Torvalds
arch_get_unmapped_area(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)74712fa5f2SAlexander Gordeev unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
75712fa5f2SAlexander Gordeev unsigned long len, unsigned long pgoff,
76712fa5f2SAlexander Gordeev unsigned long flags)
771f6b83e5SMartin Schwidefsky {
781f6b83e5SMartin Schwidefsky struct mm_struct *mm = current->mm;
791f6b83e5SMartin Schwidefsky struct vm_area_struct *vma;
801f6b83e5SMartin Schwidefsky struct vm_unmapped_area_info info;
811f6b83e5SMartin Schwidefsky
829b11c791SMartin Schwidefsky if (len > TASK_SIZE - mmap_min_addr)
831f6b83e5SMartin Schwidefsky return -ENOMEM;
841f6b83e5SMartin Schwidefsky
851f6b83e5SMartin Schwidefsky if (flags & MAP_FIXED)
869b11c791SMartin Schwidefsky goto check_asce_limit;
871f6b83e5SMartin Schwidefsky
881f6b83e5SMartin Schwidefsky if (addr) {
891f6b83e5SMartin Schwidefsky addr = PAGE_ALIGN(addr);
901f6b83e5SMartin Schwidefsky vma = find_vma(mm, addr);
919b11c791SMartin Schwidefsky if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
921be7107fSHugh Dickins (!vma || addr + len <= vm_start_gap(vma)))
939b11c791SMartin Schwidefsky goto check_asce_limit;
941f6b83e5SMartin Schwidefsky }
951f6b83e5SMartin Schwidefsky
961f6b83e5SMartin Schwidefsky info.flags = 0;
971f6b83e5SMartin Schwidefsky info.length = len;
981f6b83e5SMartin Schwidefsky info.low_limit = mm->mmap_base;
999b11c791SMartin Schwidefsky info.high_limit = TASK_SIZE;
100c7e8b2c2SMartin Schwidefsky if (filp || (flags & MAP_SHARED))
101c7e8b2c2SMartin Schwidefsky info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
102c7e8b2c2SMartin Schwidefsky else
103c7e8b2c2SMartin Schwidefsky info.align_mask = 0;
1041f6b83e5SMartin Schwidefsky info.align_offset = pgoff << PAGE_SHIFT;
1059b11c791SMartin Schwidefsky addr = vm_unmapped_area(&info);
106712fa5f2SAlexander Gordeev if (offset_in_page(addr))
1079b11c791SMartin Schwidefsky return addr;
1089b11c791SMartin Schwidefsky
1099b11c791SMartin Schwidefsky check_asce_limit:
110712fa5f2SAlexander Gordeev return check_asce_limit(mm, addr, len);
1119b11c791SMartin Schwidefsky }
1129b11c791SMartin Schwidefsky
arch_get_unmapped_area_topdown(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)113712fa5f2SAlexander Gordeev unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
114712fa5f2SAlexander Gordeev unsigned long len, unsigned long pgoff,
115712fa5f2SAlexander Gordeev unsigned long flags)
1161f6b83e5SMartin Schwidefsky {
1171f6b83e5SMartin Schwidefsky struct vm_area_struct *vma;
1181f6b83e5SMartin Schwidefsky struct mm_struct *mm = current->mm;
1191f6b83e5SMartin Schwidefsky struct vm_unmapped_area_info info;
1201f6b83e5SMartin Schwidefsky
1211f6b83e5SMartin Schwidefsky /* requested length too big for entire address space */
1229b11c791SMartin Schwidefsky if (len > TASK_SIZE - mmap_min_addr)
1231f6b83e5SMartin Schwidefsky return -ENOMEM;
1241f6b83e5SMartin Schwidefsky
1251f6b83e5SMartin Schwidefsky if (flags & MAP_FIXED)
1269b11c791SMartin Schwidefsky goto check_asce_limit;
1271f6b83e5SMartin Schwidefsky
1281f6b83e5SMartin Schwidefsky /* requesting a specific address */
1291f6b83e5SMartin Schwidefsky if (addr) {
1301f6b83e5SMartin Schwidefsky addr = PAGE_ALIGN(addr);
1311f6b83e5SMartin Schwidefsky vma = find_vma(mm, addr);
1329b11c791SMartin Schwidefsky if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
1331be7107fSHugh Dickins (!vma || addr + len <= vm_start_gap(vma)))
1349b11c791SMartin Schwidefsky goto check_asce_limit;
1351f6b83e5SMartin Schwidefsky }
1361f6b83e5SMartin Schwidefsky
1371f6b83e5SMartin Schwidefsky info.flags = VM_UNMAPPED_AREA_TOPDOWN;
1381f6b83e5SMartin Schwidefsky info.length = len;
139*6b008640SLinus Torvalds info.low_limit = PAGE_SIZE;
1401f6b83e5SMartin Schwidefsky info.high_limit = mm->mmap_base;
141c7e8b2c2SMartin Schwidefsky if (filp || (flags & MAP_SHARED))
142c7e8b2c2SMartin Schwidefsky info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
143c7e8b2c2SMartin Schwidefsky else
144c7e8b2c2SMartin Schwidefsky info.align_mask = 0;
1451f6b83e5SMartin Schwidefsky info.align_offset = pgoff << PAGE_SHIFT;
1461f6b83e5SMartin Schwidefsky addr = vm_unmapped_area(&info);
1471f6b83e5SMartin Schwidefsky
1481f6b83e5SMartin Schwidefsky /*
1491f6b83e5SMartin Schwidefsky * A failed mmap() very likely causes application failure,
1501f6b83e5SMartin Schwidefsky * so fall back to the bottom-up function here. This scenario
1511f6b83e5SMartin Schwidefsky * can happen with large stack limits and large mmap()
1521f6b83e5SMartin Schwidefsky * allocations.
1531f6b83e5SMartin Schwidefsky */
154712fa5f2SAlexander Gordeev if (offset_in_page(addr)) {
1551f6b83e5SMartin Schwidefsky VM_BUG_ON(addr != -ENOMEM);
1561f6b83e5SMartin Schwidefsky info.flags = 0;
1571f6b83e5SMartin Schwidefsky info.low_limit = TASK_UNMAPPED_BASE;
1589b11c791SMartin Schwidefsky info.high_limit = TASK_SIZE;
1591f6b83e5SMartin Schwidefsky addr = vm_unmapped_area(&info);
160712fa5f2SAlexander Gordeev if (offset_in_page(addr))
1619b11c791SMartin Schwidefsky return addr;
1629b11c791SMartin Schwidefsky }
1639b11c791SMartin Schwidefsky
1649b11c791SMartin Schwidefsky check_asce_limit:
165712fa5f2SAlexander Gordeev return check_asce_limit(mm, addr, len);
1661f6b83e5SMartin Schwidefsky }
1671f6b83e5SMartin Schwidefsky
1686252d702SMartin Schwidefsky /*
1696252d702SMartin Schwidefsky * This function, called very early during the creation of a new
1706252d702SMartin Schwidefsky * process VM image, sets up which VM layout function to use:
1716252d702SMartin Schwidefsky */
arch_pick_mmap_layout(struct mm_struct * mm,struct rlimit * rlim_stack)1728f2af155SKees Cook void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
1736252d702SMartin Schwidefsky {
1748e89a356SKees Cook unsigned long random_factor = 0UL;
1758e89a356SKees Cook
1768e89a356SKees Cook if (current->flags & PF_RANDOMIZE)
1772b68f6caSKees Cook random_factor = arch_mmap_rnd();
1788e89a356SKees Cook
1796252d702SMartin Schwidefsky /*
1806252d702SMartin Schwidefsky * Fall back to the standard layout if the personality
1816252d702SMartin Schwidefsky * bit is set, or if the expected stack growth is unlimited:
1826252d702SMartin Schwidefsky */
1838f2af155SKees Cook if (mmap_is_legacy(rlim_stack)) {
1848e89a356SKees Cook mm->mmap_base = mmap_base_legacy(random_factor);
1859b11c791SMartin Schwidefsky mm->get_unmapped_area = arch_get_unmapped_area;
1866252d702SMartin Schwidefsky } else {
1878f2af155SKees Cook mm->mmap_base = mmap_base(random_factor, rlim_stack);
1889b11c791SMartin Schwidefsky mm->get_unmapped_area = arch_get_unmapped_area_topdown;
1896252d702SMartin Schwidefsky }
1906252d702SMartin Schwidefsky }
191fd5d210fSAnshuman Khandual
192fd5d210fSAnshuman Khandual static const pgprot_t protection_map[16] = {
193fd5d210fSAnshuman Khandual [VM_NONE] = PAGE_NONE,
194fd5d210fSAnshuman Khandual [VM_READ] = PAGE_RO,
195fd5d210fSAnshuman Khandual [VM_WRITE] = PAGE_RO,
196fd5d210fSAnshuman Khandual [VM_WRITE | VM_READ] = PAGE_RO,
197fd5d210fSAnshuman Khandual [VM_EXEC] = PAGE_RX,
198fd5d210fSAnshuman Khandual [VM_EXEC | VM_READ] = PAGE_RX,
199fd5d210fSAnshuman Khandual [VM_EXEC | VM_WRITE] = PAGE_RX,
200fd5d210fSAnshuman Khandual [VM_EXEC | VM_WRITE | VM_READ] = PAGE_RX,
201fd5d210fSAnshuman Khandual [VM_SHARED] = PAGE_NONE,
202fd5d210fSAnshuman Khandual [VM_SHARED | VM_READ] = PAGE_RO,
203fd5d210fSAnshuman Khandual [VM_SHARED | VM_WRITE] = PAGE_RW,
204fd5d210fSAnshuman Khandual [VM_SHARED | VM_WRITE | VM_READ] = PAGE_RW,
205fd5d210fSAnshuman Khandual [VM_SHARED | VM_EXEC] = PAGE_RX,
206fd5d210fSAnshuman Khandual [VM_SHARED | VM_EXEC | VM_READ] = PAGE_RX,
207fd5d210fSAnshuman Khandual [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_RWX,
208fd5d210fSAnshuman Khandual [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_RWX
209fd5d210fSAnshuman Khandual };
210fd5d210fSAnshuman Khandual DECLARE_VM_GET_PAGE_PROT
211