xref: /openbmc/linux/arch/arm/mm/mmap.c (revision e8edc6e0)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  *  linux/arch/arm/mm/mmap.c
31da177e4SLinus Torvalds  */
41da177e4SLinus Torvalds #include <linux/fs.h>
51da177e4SLinus Torvalds #include <linux/mm.h>
61da177e4SLinus Torvalds #include <linux/mman.h>
71da177e4SLinus Torvalds #include <linux/shm.h>
8e8edc6e0SAlexey Dobriyan #include <linux/sched.h>
91da177e4SLinus Torvalds #include <asm/system.h>
101da177e4SLinus Torvalds 
111da177e4SLinus Torvalds #define COLOUR_ALIGN(addr,pgoff)		\
121da177e4SLinus Torvalds 	((((addr)+SHMLBA-1)&~(SHMLBA-1)) +	\
131da177e4SLinus Torvalds 	 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
141da177e4SLinus Torvalds 
151da177e4SLinus Torvalds /*
161da177e4SLinus Torvalds  * We need to ensure that shared mappings are correctly aligned to
171da177e4SLinus Torvalds  * avoid aliasing issues with VIPT caches.  We need to ensure that
181da177e4SLinus Torvalds  * a specific page of an object is always mapped at a multiple of
191da177e4SLinus Torvalds  * SHMLBA bytes.
201da177e4SLinus Torvalds  *
211da177e4SLinus Torvalds  * We unconditionally provide this function for all cases, however
221da177e4SLinus Torvalds  * in the VIVT case, we optimise out the alignment rules.
231da177e4SLinus Torvalds  */
241da177e4SLinus Torvalds unsigned long
251da177e4SLinus Torvalds arch_get_unmapped_area(struct file *filp, unsigned long addr,
261da177e4SLinus Torvalds 		unsigned long len, unsigned long pgoff, unsigned long flags)
271da177e4SLinus Torvalds {
281da177e4SLinus Torvalds 	struct mm_struct *mm = current->mm;
291da177e4SLinus Torvalds 	struct vm_area_struct *vma;
301da177e4SLinus Torvalds 	unsigned long start_addr;
311da177e4SLinus Torvalds #ifdef CONFIG_CPU_V6
321da177e4SLinus Torvalds 	unsigned int cache_type;
331da177e4SLinus Torvalds 	int do_align = 0, aliasing = 0;
341da177e4SLinus Torvalds 
351da177e4SLinus Torvalds 	/*
361da177e4SLinus Torvalds 	 * We only need to do colour alignment if either the I or D
371da177e4SLinus Torvalds 	 * caches alias.  This is indicated by bits 9 and 21 of the
381da177e4SLinus Torvalds 	 * cache type register.
391da177e4SLinus Torvalds 	 */
401da177e4SLinus Torvalds 	cache_type = read_cpuid(CPUID_CACHETYPE);
411da177e4SLinus Torvalds 	if (cache_type != read_cpuid(CPUID_ID)) {
421da177e4SLinus Torvalds 		aliasing = (cache_type | cache_type >> 12) & (1 << 11);
431da177e4SLinus Torvalds 		if (aliasing)
441da177e4SLinus Torvalds 			do_align = filp || flags & MAP_SHARED;
451da177e4SLinus Torvalds 	}
461da177e4SLinus Torvalds #else
471da177e4SLinus Torvalds #define do_align 0
481da177e4SLinus Torvalds #define aliasing 0
491da177e4SLinus Torvalds #endif
501da177e4SLinus Torvalds 
511da177e4SLinus Torvalds 	/*
52acec0ac0SBenjamin Herrenschmidt 	 * We enforce the MAP_FIXED case.
531da177e4SLinus Torvalds 	 */
541da177e4SLinus Torvalds 	if (flags & MAP_FIXED) {
551da177e4SLinus Torvalds 		if (aliasing && flags & MAP_SHARED && addr & (SHMLBA - 1))
561da177e4SLinus Torvalds 			return -EINVAL;
571da177e4SLinus Torvalds 		return addr;
581da177e4SLinus Torvalds 	}
591da177e4SLinus Torvalds 
601da177e4SLinus Torvalds 	if (len > TASK_SIZE)
611da177e4SLinus Torvalds 		return -ENOMEM;
621da177e4SLinus Torvalds 
631da177e4SLinus Torvalds 	if (addr) {
641da177e4SLinus Torvalds 		if (do_align)
651da177e4SLinus Torvalds 			addr = COLOUR_ALIGN(addr, pgoff);
661da177e4SLinus Torvalds 		else
671da177e4SLinus Torvalds 			addr = PAGE_ALIGN(addr);
681da177e4SLinus Torvalds 
691da177e4SLinus Torvalds 		vma = find_vma(mm, addr);
701da177e4SLinus Torvalds 		if (TASK_SIZE - len >= addr &&
711da177e4SLinus Torvalds 		    (!vma || addr + len <= vma->vm_start))
721da177e4SLinus Torvalds 			return addr;
731da177e4SLinus Torvalds 	}
741363c3cdSWolfgang Wander 	if (len > mm->cached_hole_size) {
751da177e4SLinus Torvalds 	        start_addr = addr = mm->free_area_cache;
761363c3cdSWolfgang Wander 	} else {
771363c3cdSWolfgang Wander 	        start_addr = addr = TASK_UNMAPPED_BASE;
781363c3cdSWolfgang Wander 	        mm->cached_hole_size = 0;
791363c3cdSWolfgang Wander 	}
801da177e4SLinus Torvalds 
811da177e4SLinus Torvalds full_search:
821da177e4SLinus Torvalds 	if (do_align)
831da177e4SLinus Torvalds 		addr = COLOUR_ALIGN(addr, pgoff);
841da177e4SLinus Torvalds 	else
851da177e4SLinus Torvalds 		addr = PAGE_ALIGN(addr);
861da177e4SLinus Torvalds 
871da177e4SLinus Torvalds 	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
881da177e4SLinus Torvalds 		/* At this point:  (!vma || addr < vma->vm_end). */
891da177e4SLinus Torvalds 		if (TASK_SIZE - len < addr) {
901da177e4SLinus Torvalds 			/*
911da177e4SLinus Torvalds 			 * Start a new search - just in case we missed
921da177e4SLinus Torvalds 			 * some holes.
931da177e4SLinus Torvalds 			 */
941da177e4SLinus Torvalds 			if (start_addr != TASK_UNMAPPED_BASE) {
951da177e4SLinus Torvalds 				start_addr = addr = TASK_UNMAPPED_BASE;
961363c3cdSWolfgang Wander 				mm->cached_hole_size = 0;
971da177e4SLinus Torvalds 				goto full_search;
981da177e4SLinus Torvalds 			}
991da177e4SLinus Torvalds 			return -ENOMEM;
1001da177e4SLinus Torvalds 		}
1011da177e4SLinus Torvalds 		if (!vma || addr + len <= vma->vm_start) {
1021da177e4SLinus Torvalds 			/*
1031da177e4SLinus Torvalds 			 * Remember the place where we stopped the search:
1041da177e4SLinus Torvalds 			 */
1051da177e4SLinus Torvalds 			mm->free_area_cache = addr + len;
1061da177e4SLinus Torvalds 			return addr;
1071da177e4SLinus Torvalds 		}
1081363c3cdSWolfgang Wander 		if (addr + mm->cached_hole_size < vma->vm_start)
1091363c3cdSWolfgang Wander 		        mm->cached_hole_size = vma->vm_start - addr;
1101da177e4SLinus Torvalds 		addr = vma->vm_end;
1111da177e4SLinus Torvalds 		if (do_align)
1121da177e4SLinus Torvalds 			addr = COLOUR_ALIGN(addr, pgoff);
1131da177e4SLinus Torvalds 	}
1141da177e4SLinus Torvalds }
1151da177e4SLinus Torvalds 
11651635ad2SLennert Buytenhek 
11751635ad2SLennert Buytenhek /*
11851635ad2SLennert Buytenhek  * You really shouldn't be using read() or write() on /dev/mem.  This
11951635ad2SLennert Buytenhek  * might go away in the future.
12051635ad2SLennert Buytenhek  */
12151635ad2SLennert Buytenhek int valid_phys_addr_range(unsigned long addr, size_t size)
12251635ad2SLennert Buytenhek {
12351635ad2SLennert Buytenhek 	if (addr + size > __pa(high_memory))
12451635ad2SLennert Buytenhek 		return 0;
12551635ad2SLennert Buytenhek 
12651635ad2SLennert Buytenhek 	return 1;
12751635ad2SLennert Buytenhek }
12851635ad2SLennert Buytenhek 
12951635ad2SLennert Buytenhek /*
13051635ad2SLennert Buytenhek  * We don't use supersection mappings for mmap() on /dev/mem, which
13151635ad2SLennert Buytenhek  * means that we can't map the memory area above the 4G barrier into
13251635ad2SLennert Buytenhek  * userspace.
13351635ad2SLennert Buytenhek  */
13451635ad2SLennert Buytenhek int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
13551635ad2SLennert Buytenhek {
13651635ad2SLennert Buytenhek 	return !(pfn + (size >> PAGE_SHIFT) > 0x00100000);
13751635ad2SLennert Buytenhek }
138