xref: /openbmc/linux/arch/sh/mm/mmap.c (revision 95e9fd10)
1 /*
2  * arch/sh/mm/mmap.c
3  *
4  * Copyright (C) 2008 - 2009  Paul Mundt
5  *
6  * This file is subject to the terms and conditions of the GNU General Public
7  * License.  See the file "COPYING" in the main directory of this archive
8  * for more details.
9  */
10 #include <linux/io.h>
11 #include <linux/mm.h>
12 #include <linux/mman.h>
13 #include <linux/module.h>
14 #include <asm/page.h>
15 #include <asm/processor.h>
16 
17 unsigned long shm_align_mask = PAGE_SIZE - 1;	/* Sane caches */
18 EXPORT_SYMBOL(shm_align_mask);
19 
20 #ifdef CONFIG_MMU
21 /*
22  * To avoid cache aliases, we map the shared page with same color.
23  */
24 static inline unsigned long COLOUR_ALIGN(unsigned long addr,
25 					 unsigned long pgoff)
26 {
27 	unsigned long base = (addr + shm_align_mask) & ~shm_align_mask;
28 	unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask;
29 
30 	return base + off;
31 }
32 
33 static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr,
34 					      unsigned long pgoff)
35 {
36 	unsigned long base = addr & ~shm_align_mask;
37 	unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask;
38 
39 	if (base + off <= addr)
40 		return base + off;
41 
42 	return base - off;
43 }
44 
45 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
46 	unsigned long len, unsigned long pgoff, unsigned long flags)
47 {
48 	struct mm_struct *mm = current->mm;
49 	struct vm_area_struct *vma;
50 	unsigned long start_addr;
51 	int do_colour_align;
52 
53 	if (flags & MAP_FIXED) {
54 		/* We do not accept a shared mapping if it would violate
55 		 * cache aliasing constraints.
56 		 */
57 		if ((flags & MAP_SHARED) &&
58 		    ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
59 			return -EINVAL;
60 		return addr;
61 	}
62 
63 	if (unlikely(len > TASK_SIZE))
64 		return -ENOMEM;
65 
66 	do_colour_align = 0;
67 	if (filp || (flags & MAP_SHARED))
68 		do_colour_align = 1;
69 
70 	if (addr) {
71 		if (do_colour_align)
72 			addr = COLOUR_ALIGN(addr, pgoff);
73 		else
74 			addr = PAGE_ALIGN(addr);
75 
76 		vma = find_vma(mm, addr);
77 		if (TASK_SIZE - len >= addr &&
78 		    (!vma || addr + len <= vma->vm_start))
79 			return addr;
80 	}
81 
82 	if (len > mm->cached_hole_size) {
83 		start_addr = addr = mm->free_area_cache;
84 	} else {
85 	        mm->cached_hole_size = 0;
86 		start_addr = addr = TASK_UNMAPPED_BASE;
87 	}
88 
89 full_search:
90 	if (do_colour_align)
91 		addr = COLOUR_ALIGN(addr, pgoff);
92 	else
93 		addr = PAGE_ALIGN(mm->free_area_cache);
94 
95 	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
96 		/* At this point:  (!vma || addr < vma->vm_end). */
97 		if (unlikely(TASK_SIZE - len < addr)) {
98 			/*
99 			 * Start a new search - just in case we missed
100 			 * some holes.
101 			 */
102 			if (start_addr != TASK_UNMAPPED_BASE) {
103 				start_addr = addr = TASK_UNMAPPED_BASE;
104 				mm->cached_hole_size = 0;
105 				goto full_search;
106 			}
107 			return -ENOMEM;
108 		}
109 		if (likely(!vma || addr + len <= vma->vm_start)) {
110 			/*
111 			 * Remember the place where we stopped the search:
112 			 */
113 			mm->free_area_cache = addr + len;
114 			return addr;
115 		}
116 		if (addr + mm->cached_hole_size < vma->vm_start)
117 		        mm->cached_hole_size = vma->vm_start - addr;
118 
119 		addr = vma->vm_end;
120 		if (do_colour_align)
121 			addr = COLOUR_ALIGN(addr, pgoff);
122 	}
123 }
124 
125 unsigned long
126 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
127 			  const unsigned long len, const unsigned long pgoff,
128 			  const unsigned long flags)
129 {
130 	struct vm_area_struct *vma;
131 	struct mm_struct *mm = current->mm;
132 	unsigned long addr = addr0;
133 	int do_colour_align;
134 
135 	if (flags & MAP_FIXED) {
136 		/* We do not accept a shared mapping if it would violate
137 		 * cache aliasing constraints.
138 		 */
139 		if ((flags & MAP_SHARED) &&
140 		    ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
141 			return -EINVAL;
142 		return addr;
143 	}
144 
145 	if (unlikely(len > TASK_SIZE))
146 		return -ENOMEM;
147 
148 	do_colour_align = 0;
149 	if (filp || (flags & MAP_SHARED))
150 		do_colour_align = 1;
151 
152 	/* requesting a specific address */
153 	if (addr) {
154 		if (do_colour_align)
155 			addr = COLOUR_ALIGN(addr, pgoff);
156 		else
157 			addr = PAGE_ALIGN(addr);
158 
159 		vma = find_vma(mm, addr);
160 		if (TASK_SIZE - len >= addr &&
161 		    (!vma || addr + len <= vma->vm_start))
162 			return addr;
163 	}
164 
165 	/* check if free_area_cache is useful for us */
166 	if (len <= mm->cached_hole_size) {
167 	        mm->cached_hole_size = 0;
168 		mm->free_area_cache = mm->mmap_base;
169 	}
170 
171 	/* either no address requested or can't fit in requested address hole */
172 	addr = mm->free_area_cache;
173 	if (do_colour_align) {
174 		unsigned long base = COLOUR_ALIGN_DOWN(addr-len, pgoff);
175 
176 		addr = base + len;
177 	}
178 
179 	/* make sure it can fit in the remaining address space */
180 	if (likely(addr > len)) {
181 		vma = find_vma(mm, addr-len);
182 		if (!vma || addr <= vma->vm_start) {
183 			/* remember the address as a hint for next time */
184 			return (mm->free_area_cache = addr-len);
185 		}
186 	}
187 
188 	if (unlikely(mm->mmap_base < len))
189 		goto bottomup;
190 
191 	addr = mm->mmap_base-len;
192 	if (do_colour_align)
193 		addr = COLOUR_ALIGN_DOWN(addr, pgoff);
194 
195 	do {
196 		/*
197 		 * Lookup failure means no vma is above this address,
198 		 * else if new region fits below vma->vm_start,
199 		 * return with success:
200 		 */
201 		vma = find_vma(mm, addr);
202 		if (likely(!vma || addr+len <= vma->vm_start)) {
203 			/* remember the address as a hint for next time */
204 			return (mm->free_area_cache = addr);
205 		}
206 
207 		/* remember the largest hole we saw so far */
208 		if (addr + mm->cached_hole_size < vma->vm_start)
209 		        mm->cached_hole_size = vma->vm_start - addr;
210 
211 		/* try just below the current vma->vm_start */
212 		addr = vma->vm_start-len;
213 		if (do_colour_align)
214 			addr = COLOUR_ALIGN_DOWN(addr, pgoff);
215 	} while (likely(len < vma->vm_start));
216 
217 bottomup:
218 	/*
219 	 * A failed mmap() very likely causes application failure,
220 	 * so fall back to the bottom-up function here. This scenario
221 	 * can happen with large stack limits and large mmap()
222 	 * allocations.
223 	 */
224 	mm->cached_hole_size = ~0UL;
225 	mm->free_area_cache = TASK_UNMAPPED_BASE;
226 	addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
227 	/*
228 	 * Restore the topdown base:
229 	 */
230 	mm->free_area_cache = mm->mmap_base;
231 	mm->cached_hole_size = ~0UL;
232 
233 	return addr;
234 }
235 #endif /* CONFIG_MMU */
236 
237 /*
238  * You really shouldn't be using read() or write() on /dev/mem.  This
239  * might go away in the future.
240  */
241 int valid_phys_addr_range(unsigned long addr, size_t count)
242 {
243 	if (addr < __MEMORY_START)
244 		return 0;
245 	if (addr + count > __pa(high_memory))
246 		return 0;
247 
248 	return 1;
249 }
250 
251 int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
252 {
253 	return 1;
254 }
255