xref: /openbmc/linux/arch/arm/mm/mmap.c (revision 29a36d4d)
1 /*
2  *  linux/arch/arm/mm/mmap.c
3  */
4 #include <linux/fs.h>
5 #include <linux/mm.h>
6 #include <linux/mman.h>
7 #include <linux/shm.h>
8 #include <linux/sched.h>
9 #include <linux/io.h>
10 #include <linux/personality.h>
11 #include <linux/random.h>
12 #include <asm/cputype.h>
13 #include <asm/system.h>
14 
15 #define COLOUR_ALIGN(addr,pgoff)		\
16 	((((addr)+SHMLBA-1)&~(SHMLBA-1)) +	\
17 	 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
18 
19 /*
20  * We need to ensure that shared mappings are correctly aligned to
21  * avoid aliasing issues with VIPT caches.  We need to ensure that
22  * a specific page of an object is always mapped at a multiple of
23  * SHMLBA bytes.
24  *
25  * We unconditionally provide this function for all cases, however
26  * in the VIVT case, we optimise out the alignment rules.
27  */
28 unsigned long
29 arch_get_unmapped_area(struct file *filp, unsigned long addr,
30 		unsigned long len, unsigned long pgoff, unsigned long flags)
31 {
32 	struct mm_struct *mm = current->mm;
33 	struct vm_area_struct *vma;
34 	unsigned long start_addr;
35 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
36 	unsigned int cache_type;
37 	int do_align = 0, aliasing = 0;
38 
39 	/*
40 	 * We only need to do colour alignment if either the I or D
41 	 * caches alias.  This is indicated by bits 9 and 21 of the
42 	 * cache type register.
43 	 */
44 	cache_type = read_cpuid_cachetype();
45 	if (cache_type != read_cpuid_id()) {
46 		aliasing = (cache_type | cache_type >> 12) & (1 << 11);
47 		if (aliasing)
48 			do_align = filp || flags & MAP_SHARED;
49 	}
50 #else
51 #define do_align 0
52 #define aliasing 0
53 #endif
54 
55 	/*
56 	 * We enforce the MAP_FIXED case.
57 	 */
58 	if (flags & MAP_FIXED) {
59 		if (aliasing && flags & MAP_SHARED &&
60 		    (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
61 			return -EINVAL;
62 		return addr;
63 	}
64 
65 	if (len > TASK_SIZE)
66 		return -ENOMEM;
67 
68 	if (addr) {
69 		if (do_align)
70 			addr = COLOUR_ALIGN(addr, pgoff);
71 		else
72 			addr = PAGE_ALIGN(addr);
73 
74 		vma = find_vma(mm, addr);
75 		if (TASK_SIZE - len >= addr &&
76 		    (!vma || addr + len <= vma->vm_start))
77 			return addr;
78 	}
79 	if (len > mm->cached_hole_size) {
80 	        start_addr = addr = mm->free_area_cache;
81 	} else {
82 	        start_addr = addr = TASK_UNMAPPED_BASE;
83 	        mm->cached_hole_size = 0;
84 	}
85 	/* 8 bits of randomness in 20 address space bits */
86 	if ((current->flags & PF_RANDOMIZE) &&
87 	    !(current->personality & ADDR_NO_RANDOMIZE))
88 		addr += (get_random_int() % (1 << 8)) << PAGE_SHIFT;
89 
90 full_search:
91 	if (do_align)
92 		addr = COLOUR_ALIGN(addr, pgoff);
93 	else
94 		addr = PAGE_ALIGN(addr);
95 
96 	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
97 		/* At this point:  (!vma || addr < vma->vm_end). */
98 		if (TASK_SIZE - len < addr) {
99 			/*
100 			 * Start a new search - just in case we missed
101 			 * some holes.
102 			 */
103 			if (start_addr != TASK_UNMAPPED_BASE) {
104 				start_addr = addr = TASK_UNMAPPED_BASE;
105 				mm->cached_hole_size = 0;
106 				goto full_search;
107 			}
108 			return -ENOMEM;
109 		}
110 		if (!vma || addr + len <= vma->vm_start) {
111 			/*
112 			 * Remember the place where we stopped the search:
113 			 */
114 			mm->free_area_cache = addr + len;
115 			return addr;
116 		}
117 		if (addr + mm->cached_hole_size < vma->vm_start)
118 		        mm->cached_hole_size = vma->vm_start - addr;
119 		addr = vma->vm_end;
120 		if (do_align)
121 			addr = COLOUR_ALIGN(addr, pgoff);
122 	}
123 }
124 
125 
126 /*
127  * You really shouldn't be using read() or write() on /dev/mem.  This
128  * might go away in the future.
129  */
130 int valid_phys_addr_range(unsigned long addr, size_t size)
131 {
132 	if (addr < PHYS_OFFSET)
133 		return 0;
134 	if (addr + size > __pa(high_memory - 1) + 1)
135 		return 0;
136 
137 	return 1;
138 }
139 
140 /*
141  * We don't use supersection mappings for mmap() on /dev/mem, which
142  * means that we can't map the memory area above the 4G barrier into
143  * userspace.
144  */
145 int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
146 {
147 	return !(pfn + (size >> PAGE_SHIFT) > 0x00100000);
148 }
149 
150 #ifdef CONFIG_STRICT_DEVMEM
151 
152 #include <linux/ioport.h>
153 
154 /*
155  * devmem_is_allowed() checks to see if /dev/mem access to a certain
156  * address is valid. The argument is a physical page number.
157  * We mimic x86 here by disallowing access to system RAM as well as
158  * device-exclusive MMIO regions. This effectively disable read()/write()
159  * on /dev/mem.
160  */
161 int devmem_is_allowed(unsigned long pfn)
162 {
163 	if (iomem_is_exclusive(pfn << PAGE_SHIFT))
164 		return 0;
165 	if (!page_is_ram(pfn))
166 		return 1;
167 	return 0;
168 }
169 
170 #endif
171