xref: /openbmc/linux/arch/arm/mm/mmap.c (revision 914ee966)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  *  linux/arch/arm/mm/mmap.c
41da177e4SLinus Torvalds  */
51da177e4SLinus Torvalds #include <linux/fs.h>
61da177e4SLinus Torvalds #include <linux/mm.h>
71da177e4SLinus Torvalds #include <linux/mman.h>
81da177e4SLinus Torvalds #include <linux/shm.h>
93f07c014SIngo Molnar #include <linux/sched/signal.h>
1001042607SIngo Molnar #include <linux/sched/mm.h>
1109d9bae0SRussell King #include <linux/io.h>
12df5419a9SNicolas Pitre #include <linux/personality.h>
13cc92c28bSNicolas Pitre #include <linux/random.h>
1441dfaa93SRob Herring #include <asm/cachetype.h>
151da177e4SLinus Torvalds 
161da177e4SLinus Torvalds #define COLOUR_ALIGN(addr,pgoff)		\
171da177e4SLinus Torvalds 	((((addr)+SHMLBA-1)&~(SHMLBA-1)) +	\
181da177e4SLinus Torvalds 	 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
191da177e4SLinus Torvalds 
201da177e4SLinus Torvalds /*
211da177e4SLinus Torvalds  * We need to ensure that shared mappings are correctly aligned to
221da177e4SLinus Torvalds  * avoid aliasing issues with VIPT caches.  We need to ensure that
231da177e4SLinus Torvalds  * a specific page of an object is always mapped at a multiple of
241da177e4SLinus Torvalds  * SHMLBA bytes.
251da177e4SLinus Torvalds  *
261da177e4SLinus Torvalds  * We unconditionally provide this function for all cases, however
271da177e4SLinus Torvalds  * in the VIVT case, we optimise out the alignment rules.
281da177e4SLinus Torvalds  */
291da177e4SLinus Torvalds unsigned long
arch_get_unmapped_area(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)301da177e4SLinus Torvalds arch_get_unmapped_area(struct file *filp, unsigned long addr,
311da177e4SLinus Torvalds 		unsigned long len, unsigned long pgoff, unsigned long flags)
321da177e4SLinus Torvalds {
331da177e4SLinus Torvalds 	struct mm_struct *mm = current->mm;
341da177e4SLinus Torvalds 	struct vm_area_struct *vma;
3541dfaa93SRob Herring 	int do_align = 0;
3641dfaa93SRob Herring 	int aliasing = cache_is_vipt_aliasing();
37394ef640SMichel Lespinasse 	struct vm_unmapped_area_info info;
381da177e4SLinus Torvalds 
391da177e4SLinus Torvalds 	/*
401da177e4SLinus Torvalds 	 * We only need to do colour alignment if either the I or D
4141dfaa93SRob Herring 	 * caches alias.
421da177e4SLinus Torvalds 	 */
431da177e4SLinus Torvalds 	if (aliasing)
4441dfaa93SRob Herring 		do_align = filp || (flags & MAP_SHARED);
451da177e4SLinus Torvalds 
461da177e4SLinus Torvalds 	/*
47acec0ac0SBenjamin Herrenschmidt 	 * We enforce the MAP_FIXED case.
481da177e4SLinus Torvalds 	 */
491da177e4SLinus Torvalds 	if (flags & MAP_FIXED) {
50e77414e0SAl Viro 		if (aliasing && flags & MAP_SHARED &&
51e77414e0SAl Viro 		    (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
521da177e4SLinus Torvalds 			return -EINVAL;
531da177e4SLinus Torvalds 		return addr;
541da177e4SLinus Torvalds 	}
551da177e4SLinus Torvalds 
561da177e4SLinus Torvalds 	if (len > TASK_SIZE)
571da177e4SLinus Torvalds 		return -ENOMEM;
581da177e4SLinus Torvalds 
591da177e4SLinus Torvalds 	if (addr) {
601da177e4SLinus Torvalds 		if (do_align)
611da177e4SLinus Torvalds 			addr = COLOUR_ALIGN(addr, pgoff);
621da177e4SLinus Torvalds 		else
631da177e4SLinus Torvalds 			addr = PAGE_ALIGN(addr);
641da177e4SLinus Torvalds 
651da177e4SLinus Torvalds 		vma = find_vma(mm, addr);
661da177e4SLinus Torvalds 		if (TASK_SIZE - len >= addr &&
671be7107fSHugh Dickins 		    (!vma || addr + len <= vm_start_gap(vma)))
681da177e4SLinus Torvalds 			return addr;
691da177e4SLinus Torvalds 	}
701da177e4SLinus Torvalds 
71394ef640SMichel Lespinasse 	info.flags = 0;
72394ef640SMichel Lespinasse 	info.length = len;
73394ef640SMichel Lespinasse 	info.low_limit = mm->mmap_base;
74394ef640SMichel Lespinasse 	info.high_limit = TASK_SIZE;
75394ef640SMichel Lespinasse 	info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
76394ef640SMichel Lespinasse 	info.align_offset = pgoff << PAGE_SHIFT;
77394ef640SMichel Lespinasse 	return vm_unmapped_area(&info);
781da177e4SLinus Torvalds }
791da177e4SLinus Torvalds 
807dbaa466SRob Herring unsigned long
arch_get_unmapped_area_topdown(struct file * filp,const unsigned long addr0,const unsigned long len,const unsigned long pgoff,const unsigned long flags)817dbaa466SRob Herring arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
827dbaa466SRob Herring 			const unsigned long len, const unsigned long pgoff,
837dbaa466SRob Herring 			const unsigned long flags)
847dbaa466SRob Herring {
857dbaa466SRob Herring 	struct vm_area_struct *vma;
867dbaa466SRob Herring 	struct mm_struct *mm = current->mm;
877dbaa466SRob Herring 	unsigned long addr = addr0;
887dbaa466SRob Herring 	int do_align = 0;
897dbaa466SRob Herring 	int aliasing = cache_is_vipt_aliasing();
90394ef640SMichel Lespinasse 	struct vm_unmapped_area_info info;
917dbaa466SRob Herring 
927dbaa466SRob Herring 	/*
937dbaa466SRob Herring 	 * We only need to do colour alignment if either the I or D
947dbaa466SRob Herring 	 * caches alias.
957dbaa466SRob Herring 	 */
967dbaa466SRob Herring 	if (aliasing)
977dbaa466SRob Herring 		do_align = filp || (flags & MAP_SHARED);
987dbaa466SRob Herring 
997dbaa466SRob Herring 	/* requested length too big for entire address space */
1007dbaa466SRob Herring 	if (len > TASK_SIZE)
1017dbaa466SRob Herring 		return -ENOMEM;
1027dbaa466SRob Herring 
1037dbaa466SRob Herring 	if (flags & MAP_FIXED) {
1047dbaa466SRob Herring 		if (aliasing && flags & MAP_SHARED &&
1057dbaa466SRob Herring 		    (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
1067dbaa466SRob Herring 			return -EINVAL;
1077dbaa466SRob Herring 		return addr;
1087dbaa466SRob Herring 	}
1097dbaa466SRob Herring 
1107dbaa466SRob Herring 	/* requesting a specific address */
1117dbaa466SRob Herring 	if (addr) {
1127dbaa466SRob Herring 		if (do_align)
1137dbaa466SRob Herring 			addr = COLOUR_ALIGN(addr, pgoff);
1147dbaa466SRob Herring 		else
1157dbaa466SRob Herring 			addr = PAGE_ALIGN(addr);
1167dbaa466SRob Herring 		vma = find_vma(mm, addr);
1177dbaa466SRob Herring 		if (TASK_SIZE - len >= addr &&
1181be7107fSHugh Dickins 				(!vma || addr + len <= vm_start_gap(vma)))
1197dbaa466SRob Herring 			return addr;
1207dbaa466SRob Herring 	}
1217dbaa466SRob Herring 
122394ef640SMichel Lespinasse 	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
123394ef640SMichel Lespinasse 	info.length = len;
124d8aa712cSRussell King 	info.low_limit = FIRST_USER_ADDRESS;
125394ef640SMichel Lespinasse 	info.high_limit = mm->mmap_base;
126394ef640SMichel Lespinasse 	info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
127394ef640SMichel Lespinasse 	info.align_offset = pgoff << PAGE_SHIFT;
128394ef640SMichel Lespinasse 	addr = vm_unmapped_area(&info);
1297dbaa466SRob Herring 
1307dbaa466SRob Herring 	/*
1317dbaa466SRob Herring 	 * A failed mmap() very likely causes application failure,
1327dbaa466SRob Herring 	 * so fall back to the bottom-up function here. This scenario
1337dbaa466SRob Herring 	 * can happen with large stack limits and large mmap()
1347dbaa466SRob Herring 	 * allocations.
1357dbaa466SRob Herring 	 */
136394ef640SMichel Lespinasse 	if (addr & ~PAGE_MASK) {
137394ef640SMichel Lespinasse 		VM_BUG_ON(addr != -ENOMEM);
138394ef640SMichel Lespinasse 		info.flags = 0;
139394ef640SMichel Lespinasse 		info.low_limit = mm->mmap_base;
140394ef640SMichel Lespinasse 		info.high_limit = TASK_SIZE;
141394ef640SMichel Lespinasse 		addr = vm_unmapped_area(&info);
142394ef640SMichel Lespinasse 	}
1437dbaa466SRob Herring 
1447dbaa466SRob Herring 	return addr;
1457dbaa466SRob Herring }
1467dbaa466SRob Herring 
14751635ad2SLennert Buytenhek /*
14851635ad2SLennert Buytenhek  * You really shouldn't be using read() or write() on /dev/mem.  This
14951635ad2SLennert Buytenhek  * might go away in the future.
15051635ad2SLennert Buytenhek  */
valid_phys_addr_range(phys_addr_t addr,size_t size)1517e6735c3SCyril Chemparathy int valid_phys_addr_range(phys_addr_t addr, size_t size)
15251635ad2SLennert Buytenhek {
1539ae3ae0bSAlexandre Rusev 	if (addr < PHYS_OFFSET)
1549ae3ae0bSAlexandre Rusev 		return 0;
1556806bfe1SGreg Ungerer 	if (addr + size > __pa(high_memory - 1) + 1)
15651635ad2SLennert Buytenhek 		return 0;
15751635ad2SLennert Buytenhek 
15851635ad2SLennert Buytenhek 	return 1;
15951635ad2SLennert Buytenhek }
16051635ad2SLennert Buytenhek 
16151635ad2SLennert Buytenhek /*
1623159f372SSergey Dyasly  * Do not allow /dev/mem mappings beyond the supported physical range.
16351635ad2SLennert Buytenhek  */
valid_mmap_phys_addr_range(unsigned long pfn,size_t size)16451635ad2SLennert Buytenhek int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
16551635ad2SLennert Buytenhek {
1663159f372SSergey Dyasly 	return (pfn + (size >> PAGE_SHIFT)) <= (1 + (PHYS_MASK >> PAGE_SHIFT));
16751635ad2SLennert Buytenhek }
168