xref: /openbmc/linux/arch/arm/mm/mmap.c (revision e190bfe5)
1 /*
2  *  linux/arch/arm/mm/mmap.c
3  */
4 #include <linux/fs.h>
5 #include <linux/mm.h>
6 #include <linux/mman.h>
7 #include <linux/shm.h>
8 #include <linux/sched.h>
9 #include <linux/io.h>
10 #include <asm/cputype.h>
11 #include <asm/system.h>
12 
13 #define COLOUR_ALIGN(addr,pgoff)		\
14 	((((addr)+SHMLBA-1)&~(SHMLBA-1)) +	\
15 	 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
16 
17 /*
18  * We need to ensure that shared mappings are correctly aligned to
19  * avoid aliasing issues with VIPT caches.  We need to ensure that
20  * a specific page of an object is always mapped at a multiple of
21  * SHMLBA bytes.
22  *
23  * We unconditionally provide this function for all cases, however
24  * in the VIVT case, we optimise out the alignment rules.
25  */
26 unsigned long
27 arch_get_unmapped_area(struct file *filp, unsigned long addr,
28 		unsigned long len, unsigned long pgoff, unsigned long flags)
29 {
30 	struct mm_struct *mm = current->mm;
31 	struct vm_area_struct *vma;
32 	unsigned long start_addr;
33 #ifdef CONFIG_CPU_V6
34 	unsigned int cache_type;
35 	int do_align = 0, aliasing = 0;
36 
37 	/*
38 	 * We only need to do colour alignment if either the I or D
39 	 * caches alias.  This is indicated by bits 9 and 21 of the
40 	 * cache type register.
41 	 */
42 	cache_type = read_cpuid_cachetype();
43 	if (cache_type != read_cpuid_id()) {
44 		aliasing = (cache_type | cache_type >> 12) & (1 << 11);
45 		if (aliasing)
46 			do_align = filp || flags & MAP_SHARED;
47 	}
48 #else
49 #define do_align 0
50 #define aliasing 0
51 #endif
52 
53 	/*
54 	 * We enforce the MAP_FIXED case.
55 	 */
56 	if (flags & MAP_FIXED) {
57 		if (aliasing && flags & MAP_SHARED &&
58 		    (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
59 			return -EINVAL;
60 		return addr;
61 	}
62 
63 	if (len > TASK_SIZE)
64 		return -ENOMEM;
65 
66 	if (addr) {
67 		if (do_align)
68 			addr = COLOUR_ALIGN(addr, pgoff);
69 		else
70 			addr = PAGE_ALIGN(addr);
71 
72 		vma = find_vma(mm, addr);
73 		if (TASK_SIZE - len >= addr &&
74 		    (!vma || addr + len <= vma->vm_start))
75 			return addr;
76 	}
77 	if (len > mm->cached_hole_size) {
78 	        start_addr = addr = mm->free_area_cache;
79 	} else {
80 	        start_addr = addr = TASK_UNMAPPED_BASE;
81 	        mm->cached_hole_size = 0;
82 	}
83 
84 full_search:
85 	if (do_align)
86 		addr = COLOUR_ALIGN(addr, pgoff);
87 	else
88 		addr = PAGE_ALIGN(addr);
89 
90 	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
91 		/* At this point:  (!vma || addr < vma->vm_end). */
92 		if (TASK_SIZE - len < addr) {
93 			/*
94 			 * Start a new search - just in case we missed
95 			 * some holes.
96 			 */
97 			if (start_addr != TASK_UNMAPPED_BASE) {
98 				start_addr = addr = TASK_UNMAPPED_BASE;
99 				mm->cached_hole_size = 0;
100 				goto full_search;
101 			}
102 			return -ENOMEM;
103 		}
104 		if (!vma || addr + len <= vma->vm_start) {
105 			/*
106 			 * Remember the place where we stopped the search:
107 			 */
108 			mm->free_area_cache = addr + len;
109 			return addr;
110 		}
111 		if (addr + mm->cached_hole_size < vma->vm_start)
112 		        mm->cached_hole_size = vma->vm_start - addr;
113 		addr = vma->vm_end;
114 		if (do_align)
115 			addr = COLOUR_ALIGN(addr, pgoff);
116 	}
117 }
118 
119 
120 /*
121  * You really shouldn't be using read() or write() on /dev/mem.  This
122  * might go away in the future.
123  */
124 int valid_phys_addr_range(unsigned long addr, size_t size)
125 {
126 	if (addr < PHYS_OFFSET)
127 		return 0;
128 	if (addr + size > __pa(high_memory - 1) + 1)
129 		return 0;
130 
131 	return 1;
132 }
133 
134 /*
135  * We don't use supersection mappings for mmap() on /dev/mem, which
136  * means that we can't map the memory area above the 4G barrier into
137  * userspace.
138  */
139 int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
140 {
141 	return !(pfn + (size >> PAGE_SHIFT) > 0x00100000);
142 }
143