11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/arch/arm/mm/mmap.c 31da177e4SLinus Torvalds */ 41da177e4SLinus Torvalds #include <linux/fs.h> 51da177e4SLinus Torvalds #include <linux/mm.h> 61da177e4SLinus Torvalds #include <linux/mman.h> 71da177e4SLinus Torvalds #include <linux/shm.h> 83f07c014SIngo Molnar #include <linux/sched/signal.h> 9*01042607SIngo Molnar #include <linux/sched/mm.h> 1009d9bae0SRussell King #include <linux/io.h> 11df5419a9SNicolas Pitre #include <linux/personality.h> 12cc92c28bSNicolas Pitre #include <linux/random.h> 1341dfaa93SRob Herring #include <asm/cachetype.h> 141da177e4SLinus Torvalds 151da177e4SLinus Torvalds #define COLOUR_ALIGN(addr,pgoff) \ 161da177e4SLinus Torvalds ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \ 171da177e4SLinus Torvalds (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1))) 181da177e4SLinus Torvalds 197dbaa466SRob Herring /* gap between mmap and stack */ 207dbaa466SRob Herring #define MIN_GAP (128*1024*1024UL) 217dbaa466SRob Herring #define MAX_GAP ((TASK_SIZE)/6*5) 227dbaa466SRob Herring 237dbaa466SRob Herring static int mmap_is_legacy(void) 247dbaa466SRob Herring { 257dbaa466SRob Herring if (current->personality & ADDR_COMPAT_LAYOUT) 267dbaa466SRob Herring return 1; 277dbaa466SRob Herring 287dbaa466SRob Herring if (rlimit(RLIMIT_STACK) == RLIM_INFINITY) 297dbaa466SRob Herring return 1; 307dbaa466SRob Herring 317dbaa466SRob Herring return sysctl_legacy_va_layout; 327dbaa466SRob Herring } 337dbaa466SRob Herring 347dbaa466SRob Herring static unsigned long mmap_base(unsigned long rnd) 357dbaa466SRob Herring { 367dbaa466SRob Herring unsigned long gap = rlimit(RLIMIT_STACK); 377dbaa466SRob Herring 387dbaa466SRob Herring if (gap < MIN_GAP) 397dbaa466SRob Herring gap = MIN_GAP; 407dbaa466SRob Herring else if (gap > MAX_GAP) 417dbaa466SRob Herring gap = MAX_GAP; 427dbaa466SRob Herring 437dbaa466SRob Herring return PAGE_ALIGN(TASK_SIZE - gap - rnd); 447dbaa466SRob Herring } 457dbaa466SRob Herring 461da177e4SLinus Torvalds /* 471da177e4SLinus Torvalds * We need to ensure that shared mappings are correctly aligned to 481da177e4SLinus Torvalds * avoid aliasing issues with VIPT caches. We need to ensure that 491da177e4SLinus Torvalds * a specific page of an object is always mapped at a multiple of 501da177e4SLinus Torvalds * SHMLBA bytes. 511da177e4SLinus Torvalds * 521da177e4SLinus Torvalds * We unconditionally provide this function for all cases, however 531da177e4SLinus Torvalds * in the VIVT case, we optimise out the alignment rules. 541da177e4SLinus Torvalds */ 551da177e4SLinus Torvalds unsigned long 561da177e4SLinus Torvalds arch_get_unmapped_area(struct file *filp, unsigned long addr, 571da177e4SLinus Torvalds unsigned long len, unsigned long pgoff, unsigned long flags) 581da177e4SLinus Torvalds { 591da177e4SLinus Torvalds struct mm_struct *mm = current->mm; 601da177e4SLinus Torvalds struct vm_area_struct *vma; 6141dfaa93SRob Herring int do_align = 0; 6241dfaa93SRob Herring int aliasing = cache_is_vipt_aliasing(); 63394ef640SMichel Lespinasse struct vm_unmapped_area_info info; 641da177e4SLinus Torvalds 651da177e4SLinus Torvalds /* 661da177e4SLinus Torvalds * We only need to do colour alignment if either the I or D 6741dfaa93SRob Herring * caches alias. 681da177e4SLinus Torvalds */ 691da177e4SLinus Torvalds if (aliasing) 7041dfaa93SRob Herring do_align = filp || (flags & MAP_SHARED); 711da177e4SLinus Torvalds 721da177e4SLinus Torvalds /* 73acec0ac0SBenjamin Herrenschmidt * We enforce the MAP_FIXED case. 741da177e4SLinus Torvalds */ 751da177e4SLinus Torvalds if (flags & MAP_FIXED) { 76e77414e0SAl Viro if (aliasing && flags & MAP_SHARED && 77e77414e0SAl Viro (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) 781da177e4SLinus Torvalds return -EINVAL; 791da177e4SLinus Torvalds return addr; 801da177e4SLinus Torvalds } 811da177e4SLinus Torvalds 821da177e4SLinus Torvalds if (len > TASK_SIZE) 831da177e4SLinus Torvalds return -ENOMEM; 841da177e4SLinus Torvalds 851da177e4SLinus Torvalds if (addr) { 861da177e4SLinus Torvalds if (do_align) 871da177e4SLinus Torvalds addr = COLOUR_ALIGN(addr, pgoff); 881da177e4SLinus Torvalds else 891da177e4SLinus Torvalds addr = PAGE_ALIGN(addr); 901da177e4SLinus Torvalds 911da177e4SLinus Torvalds vma = find_vma(mm, addr); 921da177e4SLinus Torvalds if (TASK_SIZE - len >= addr && 931da177e4SLinus Torvalds (!vma || addr + len <= vma->vm_start)) 941da177e4SLinus Torvalds return addr; 951da177e4SLinus Torvalds } 961da177e4SLinus Torvalds 97394ef640SMichel Lespinasse info.flags = 0; 98394ef640SMichel Lespinasse info.length = len; 99394ef640SMichel Lespinasse info.low_limit = mm->mmap_base; 100394ef640SMichel Lespinasse info.high_limit = TASK_SIZE; 101394ef640SMichel Lespinasse info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; 102394ef640SMichel Lespinasse info.align_offset = pgoff << PAGE_SHIFT; 103394ef640SMichel Lespinasse return vm_unmapped_area(&info); 1041da177e4SLinus Torvalds } 1051da177e4SLinus Torvalds 1067dbaa466SRob Herring unsigned long 1077dbaa466SRob Herring arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, 1087dbaa466SRob Herring const unsigned long len, const unsigned long pgoff, 1097dbaa466SRob Herring const unsigned long flags) 1107dbaa466SRob Herring { 1117dbaa466SRob Herring struct vm_area_struct *vma; 1127dbaa466SRob Herring struct mm_struct *mm = current->mm; 1137dbaa466SRob Herring unsigned long addr = addr0; 1147dbaa466SRob Herring int do_align = 0; 1157dbaa466SRob Herring int aliasing = cache_is_vipt_aliasing(); 116394ef640SMichel Lespinasse struct vm_unmapped_area_info info; 1177dbaa466SRob Herring 1187dbaa466SRob Herring /* 1197dbaa466SRob Herring * We only need to do colour alignment if either the I or D 1207dbaa466SRob Herring * caches alias. 1217dbaa466SRob Herring */ 1227dbaa466SRob Herring if (aliasing) 1237dbaa466SRob Herring do_align = filp || (flags & MAP_SHARED); 1247dbaa466SRob Herring 1257dbaa466SRob Herring /* requested length too big for entire address space */ 1267dbaa466SRob Herring if (len > TASK_SIZE) 1277dbaa466SRob Herring return -ENOMEM; 1287dbaa466SRob Herring 1297dbaa466SRob Herring if (flags & MAP_FIXED) { 1307dbaa466SRob Herring if (aliasing && flags & MAP_SHARED && 1317dbaa466SRob Herring (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) 1327dbaa466SRob Herring return -EINVAL; 1337dbaa466SRob Herring return addr; 1347dbaa466SRob Herring } 1357dbaa466SRob Herring 1367dbaa466SRob Herring /* requesting a specific address */ 1377dbaa466SRob Herring if (addr) { 1387dbaa466SRob Herring if (do_align) 1397dbaa466SRob Herring addr = COLOUR_ALIGN(addr, pgoff); 1407dbaa466SRob Herring else 1417dbaa466SRob Herring addr = PAGE_ALIGN(addr); 1427dbaa466SRob Herring vma = find_vma(mm, addr); 1437dbaa466SRob Herring if (TASK_SIZE - len >= addr && 1447dbaa466SRob Herring (!vma || addr + len <= vma->vm_start)) 1457dbaa466SRob Herring return addr; 1467dbaa466SRob Herring } 1477dbaa466SRob Herring 148394ef640SMichel Lespinasse info.flags = VM_UNMAPPED_AREA_TOPDOWN; 149394ef640SMichel Lespinasse info.length = len; 150d8aa712cSRussell King info.low_limit = FIRST_USER_ADDRESS; 151394ef640SMichel Lespinasse info.high_limit = mm->mmap_base; 152394ef640SMichel Lespinasse info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; 153394ef640SMichel Lespinasse info.align_offset = pgoff << PAGE_SHIFT; 154394ef640SMichel Lespinasse addr = vm_unmapped_area(&info); 1557dbaa466SRob Herring 1567dbaa466SRob Herring /* 1577dbaa466SRob Herring * A failed mmap() very likely causes application failure, 1587dbaa466SRob Herring * so fall back to the bottom-up function here. This scenario 1597dbaa466SRob Herring * can happen with large stack limits and large mmap() 1607dbaa466SRob Herring * allocations. 1617dbaa466SRob Herring */ 162394ef640SMichel Lespinasse if (addr & ~PAGE_MASK) { 163394ef640SMichel Lespinasse VM_BUG_ON(addr != -ENOMEM); 164394ef640SMichel Lespinasse info.flags = 0; 165394ef640SMichel Lespinasse info.low_limit = mm->mmap_base; 166394ef640SMichel Lespinasse info.high_limit = TASK_SIZE; 167394ef640SMichel Lespinasse addr = vm_unmapped_area(&info); 168394ef640SMichel Lespinasse } 1697dbaa466SRob Herring 1707dbaa466SRob Herring return addr; 1717dbaa466SRob Herring } 1727dbaa466SRob Herring 1732b68f6caSKees Cook unsigned long arch_mmap_rnd(void) 174fbbc400fSKees Cook { 175fbbc400fSKees Cook unsigned long rnd; 176fbbc400fSKees Cook 1775ef11c35SDaniel Cashman rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1); 178fbbc400fSKees Cook 179fbbc400fSKees Cook return rnd << PAGE_SHIFT; 180fbbc400fSKees Cook } 181fbbc400fSKees Cook 1827dbaa466SRob Herring void arch_pick_mmap_layout(struct mm_struct *mm) 1837dbaa466SRob Herring { 1847dbaa466SRob Herring unsigned long random_factor = 0UL; 1857dbaa466SRob Herring 186fbbc400fSKees Cook if (current->flags & PF_RANDOMIZE) 1872b68f6caSKees Cook random_factor = arch_mmap_rnd(); 1887dbaa466SRob Herring 1897dbaa466SRob Herring if (mmap_is_legacy()) { 1907dbaa466SRob Herring mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; 1917dbaa466SRob Herring mm->get_unmapped_area = arch_get_unmapped_area; 1927dbaa466SRob Herring } else { 1937dbaa466SRob Herring mm->mmap_base = mmap_base(random_factor); 1947dbaa466SRob Herring mm->get_unmapped_area = arch_get_unmapped_area_topdown; 1957dbaa466SRob Herring } 1967dbaa466SRob Herring } 19751635ad2SLennert Buytenhek 19851635ad2SLennert Buytenhek /* 19951635ad2SLennert Buytenhek * You really shouldn't be using read() or write() on /dev/mem. This 20051635ad2SLennert Buytenhek * might go away in the future. 20151635ad2SLennert Buytenhek */ 2027e6735c3SCyril Chemparathy int valid_phys_addr_range(phys_addr_t addr, size_t size) 20351635ad2SLennert Buytenhek { 2049ae3ae0bSAlexandre Rusev if (addr < PHYS_OFFSET) 2059ae3ae0bSAlexandre Rusev return 0; 2066806bfe1SGreg Ungerer if (addr + size > __pa(high_memory - 1) + 1) 20751635ad2SLennert Buytenhek return 0; 20851635ad2SLennert Buytenhek 20951635ad2SLennert Buytenhek return 1; 21051635ad2SLennert Buytenhek } 21151635ad2SLennert Buytenhek 21251635ad2SLennert Buytenhek /* 2133159f372SSergey Dyasly * Do not allow /dev/mem mappings beyond the supported physical range. 21451635ad2SLennert Buytenhek */ 21551635ad2SLennert Buytenhek int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) 21651635ad2SLennert Buytenhek { 2173159f372SSergey Dyasly return (pfn + (size >> PAGE_SHIFT)) <= (1 + (PHYS_MASK >> PAGE_SHIFT)); 21851635ad2SLennert Buytenhek } 219087aaffcSNicolas Pitre 220087aaffcSNicolas Pitre #ifdef CONFIG_STRICT_DEVMEM 221087aaffcSNicolas Pitre 222087aaffcSNicolas Pitre #include <linux/ioport.h> 223087aaffcSNicolas Pitre 224087aaffcSNicolas Pitre /* 225087aaffcSNicolas Pitre * devmem_is_allowed() checks to see if /dev/mem access to a certain 226087aaffcSNicolas Pitre * address is valid. The argument is a physical page number. 227087aaffcSNicolas Pitre * We mimic x86 here by disallowing access to system RAM as well as 228087aaffcSNicolas Pitre * device-exclusive MMIO regions. This effectively disable read()/write() 229087aaffcSNicolas Pitre * on /dev/mem. 230087aaffcSNicolas Pitre */ 231087aaffcSNicolas Pitre int devmem_is_allowed(unsigned long pfn) 232087aaffcSNicolas Pitre { 233087aaffcSNicolas Pitre if (iomem_is_exclusive(pfn << PAGE_SHIFT)) 234087aaffcSNicolas Pitre return 0; 235087aaffcSNicolas Pitre if (!page_is_ram(pfn)) 236087aaffcSNicolas Pitre return 1; 237087aaffcSNicolas Pitre return 0; 238087aaffcSNicolas Pitre } 239087aaffcSNicolas Pitre 240087aaffcSNicolas Pitre #endif 241