1*caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 21d18c47cSCatalin Marinas /* 31d18c47cSCatalin Marinas * Based on arch/arm/mm/mmap.c 41d18c47cSCatalin Marinas * 51d18c47cSCatalin Marinas * Copyright (C) 2012 ARM Ltd. 61d18c47cSCatalin Marinas */ 71d18c47cSCatalin Marinas 81d18c47cSCatalin Marinas #include <linux/elf.h> 91d18c47cSCatalin Marinas #include <linux/fs.h> 101151f838SArd Biesheuvel #include <linux/memblock.h> 111d18c47cSCatalin Marinas #include <linux/mm.h> 121d18c47cSCatalin Marinas #include <linux/mman.h> 131d18c47cSCatalin Marinas #include <linux/export.h> 141d18c47cSCatalin Marinas #include <linux/shm.h> 153f07c014SIngo Molnar #include <linux/sched/signal.h> 1601042607SIngo Molnar #include <linux/sched/mm.h> 171d18c47cSCatalin Marinas #include <linux/io.h> 181d18c47cSCatalin Marinas #include <linux/personality.h> 191d18c47cSCatalin Marinas #include <linux/random.h> 201d18c47cSCatalin Marinas 211d18c47cSCatalin Marinas #include <asm/cputype.h> 221d18c47cSCatalin Marinas 231d18c47cSCatalin Marinas /* 241d18c47cSCatalin Marinas * Leave enough space between the mmap area and the stack to honour ulimit in 251d18c47cSCatalin Marinas * the face of randomisation. 261d18c47cSCatalin Marinas */ 27cf92251dSRik van Riel #define MIN_GAP (SZ_128M) 281d18c47cSCatalin Marinas #define MAX_GAP (STACK_TOP/6*5) 291d18c47cSCatalin Marinas 308f2af155SKees Cook static int mmap_is_legacy(struct rlimit *rlim_stack) 311d18c47cSCatalin Marinas { 321d18c47cSCatalin Marinas if (current->personality & ADDR_COMPAT_LAYOUT) 331d18c47cSCatalin Marinas return 1; 341d18c47cSCatalin Marinas 358f2af155SKees Cook if (rlim_stack->rlim_cur == RLIM_INFINITY) 361d18c47cSCatalin Marinas return 1; 371d18c47cSCatalin Marinas 381d18c47cSCatalin Marinas return sysctl_legacy_va_layout; 391d18c47cSCatalin Marinas } 401d18c47cSCatalin Marinas 412b68f6caSKees Cook unsigned long arch_mmap_rnd(void) 421d18c47cSCatalin Marinas { 43dd04cff1SKees Cook unsigned long rnd; 441d18c47cSCatalin Marinas 458f0d3aa9SDaniel Cashman #ifdef CONFIG_COMPAT 468f0d3aa9SDaniel Cashman if (test_thread_flag(TIF_32BIT)) 475ef11c35SDaniel Cashman rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1); 488f0d3aa9SDaniel Cashman else 498f0d3aa9SDaniel Cashman #endif 505ef11c35SDaniel Cashman rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1); 51d6c763afSYann Droneaud return rnd << PAGE_SHIFT; 521d18c47cSCatalin Marinas } 531d18c47cSCatalin Marinas 548f2af155SKees Cook static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack) 551d18c47cSCatalin Marinas { 568f2af155SKees Cook unsigned long gap = rlim_stack->rlim_cur; 57cf92251dSRik van Riel unsigned long pad = (STACK_RND_MASK << PAGE_SHIFT) + stack_guard_gap; 58cf92251dSRik van Riel 59cf92251dSRik van Riel /* Values close to RLIM_INFINITY can overflow. */ 60cf92251dSRik van Riel if (gap + pad > gap) 61cf92251dSRik van Riel gap += pad; 621d18c47cSCatalin Marinas 631d18c47cSCatalin Marinas if (gap < MIN_GAP) 641d18c47cSCatalin Marinas gap = MIN_GAP; 651d18c47cSCatalin Marinas else if (gap > MAX_GAP) 661d18c47cSCatalin Marinas gap = MAX_GAP; 671d18c47cSCatalin Marinas 68dd04cff1SKees Cook return PAGE_ALIGN(STACK_TOP - gap - rnd); 691d18c47cSCatalin Marinas } 701d18c47cSCatalin Marinas 711d18c47cSCatalin Marinas /* 721d18c47cSCatalin Marinas * This function, called very early during the creation of a new process VM 731d18c47cSCatalin Marinas * image, sets up which VM layout function to use: 741d18c47cSCatalin Marinas */ 758f2af155SKees Cook void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) 761d18c47cSCatalin Marinas { 77dd04cff1SKees Cook unsigned long random_factor = 0UL; 78dd04cff1SKees Cook 79dd04cff1SKees Cook if (current->flags & PF_RANDOMIZE) 802b68f6caSKees Cook random_factor = arch_mmap_rnd(); 81dd04cff1SKees Cook 821d18c47cSCatalin Marinas /* 831d18c47cSCatalin Marinas * Fall back to the standard layout if the personality bit is set, or 841d18c47cSCatalin Marinas * if the expected stack growth is unlimited: 851d18c47cSCatalin Marinas */ 868f2af155SKees Cook if (mmap_is_legacy(rlim_stack)) { 87dd04cff1SKees Cook mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; 881d18c47cSCatalin Marinas mm->get_unmapped_area = arch_get_unmapped_area; 891d18c47cSCatalin Marinas } else { 908f2af155SKees Cook mm->mmap_base = mmap_base(random_factor, rlim_stack); 911d18c47cSCatalin Marinas mm->get_unmapped_area = arch_get_unmapped_area_topdown; 921d18c47cSCatalin Marinas } 931d18c47cSCatalin Marinas } 941d18c47cSCatalin Marinas 951d18c47cSCatalin Marinas /* 961d18c47cSCatalin Marinas * You really shouldn't be using read() or write() on /dev/mem. This might go 971d18c47cSCatalin Marinas * away in the future. 981d18c47cSCatalin Marinas */ 99097cbd8dSMin-Hua Chen int valid_phys_addr_range(phys_addr_t addr, size_t size) 1001d18c47cSCatalin Marinas { 1011151f838SArd Biesheuvel /* 1021151f838SArd Biesheuvel * Check whether addr is covered by a memory region without the 1031151f838SArd Biesheuvel * MEMBLOCK_NOMAP attribute, and whether that region covers the 1041151f838SArd Biesheuvel * entire range. In theory, this could lead to false negatives 1051151f838SArd Biesheuvel * if the range is covered by distinct but adjacent memory regions 1061151f838SArd Biesheuvel * that only differ in other attributes. However, few of such 1071151f838SArd Biesheuvel * attributes have been defined, and it is debatable whether it 1081151f838SArd Biesheuvel * follows that /dev/mem read() calls should be able traverse 1091151f838SArd Biesheuvel * such boundaries. 1101151f838SArd Biesheuvel */ 1111151f838SArd Biesheuvel return memblock_is_region_memory(addr, size) && 1121151f838SArd Biesheuvel memblock_is_map_memory(addr); 1131d18c47cSCatalin Marinas } 1141d18c47cSCatalin Marinas 1151d18c47cSCatalin Marinas /* 1161d18c47cSCatalin Marinas * Do not allow /dev/mem mappings beyond the supported physical range. 1171d18c47cSCatalin Marinas */ 1181d18c47cSCatalin Marinas int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) 1191d18c47cSCatalin Marinas { 1201d18c47cSCatalin Marinas return !(((pfn << PAGE_SHIFT) + size) & ~PHYS_MASK); 1211d18c47cSCatalin Marinas } 1221d18c47cSCatalin Marinas 1231d18c47cSCatalin Marinas #ifdef CONFIG_STRICT_DEVMEM 1241d18c47cSCatalin Marinas 1251d18c47cSCatalin Marinas #include <linux/ioport.h> 1261d18c47cSCatalin Marinas 1271d18c47cSCatalin Marinas /* 1281d18c47cSCatalin Marinas * devmem_is_allowed() checks to see if /dev/mem access to a certain address 1291d18c47cSCatalin Marinas * is valid. The argument is a physical page number. We mimic x86 here by 1301d18c47cSCatalin Marinas * disallowing access to system RAM as well as device-exclusive MMIO regions. 1311d18c47cSCatalin Marinas * This effectively disable read()/write() on /dev/mem. 1321d18c47cSCatalin Marinas */ 1331d18c47cSCatalin Marinas int devmem_is_allowed(unsigned long pfn) 1341d18c47cSCatalin Marinas { 1351d18c47cSCatalin Marinas if (iomem_is_exclusive(pfn << PAGE_SHIFT)) 1361d18c47cSCatalin Marinas return 0; 1371d18c47cSCatalin Marinas if (!page_is_ram(pfn)) 1381d18c47cSCatalin Marinas return 1; 1391d18c47cSCatalin Marinas return 0; 1401d18c47cSCatalin Marinas } 1411d18c47cSCatalin Marinas 1421d18c47cSCatalin Marinas #endif 143