xref: /openbmc/linux/arch/sh/mm/mmap.c (revision ee1acbfa)
1185aed75SPaul Mundt /*
2185aed75SPaul Mundt  * arch/sh/mm/mmap.c
3185aed75SPaul Mundt  *
4ee1acbfaSPaul Mundt  * Copyright (C) 2008 - 2009  Paul Mundt
5185aed75SPaul Mundt  *
6185aed75SPaul Mundt  * This file is subject to the terms and conditions of the GNU General Public
7185aed75SPaul Mundt  * License.  See the file "COPYING" in the main directory of this archive
8185aed75SPaul Mundt  * for more details.
9185aed75SPaul Mundt  */
10185aed75SPaul Mundt #include <linux/io.h>
11185aed75SPaul Mundt #include <linux/mm.h>
124a4a9be3SPaul Mundt #include <linux/mman.h>
134a4a9be3SPaul Mundt #include <linux/module.h>
14185aed75SPaul Mundt #include <asm/page.h>
154a4a9be3SPaul Mundt #include <asm/processor.h>
164a4a9be3SPaul Mundt 
174a4a9be3SPaul Mundt #ifdef CONFIG_MMU
184a4a9be3SPaul Mundt unsigned long shm_align_mask = PAGE_SIZE - 1;	/* Sane caches */
194a4a9be3SPaul Mundt EXPORT_SYMBOL(shm_align_mask);
204a4a9be3SPaul Mundt 
214a4a9be3SPaul Mundt /*
224a4a9be3SPaul Mundt  * To avoid cache aliases, we map the shared page with same color.
234a4a9be3SPaul Mundt  */
24ee1acbfaSPaul Mundt static inline unsigned long COLOUR_ALIGN(unsigned long addr,
25ee1acbfaSPaul Mundt 					 unsigned long pgoff)
26ee1acbfaSPaul Mundt {
27ee1acbfaSPaul Mundt 	unsigned long base = (addr + shm_align_mask) & ~shm_align_mask;
28ee1acbfaSPaul Mundt 	unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask;
29ee1acbfaSPaul Mundt 
30ee1acbfaSPaul Mundt 	return base + off;
31ee1acbfaSPaul Mundt }
32ee1acbfaSPaul Mundt 
33ee1acbfaSPaul Mundt static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr,
34ee1acbfaSPaul Mundt 					      unsigned long pgoff)
35ee1acbfaSPaul Mundt {
36ee1acbfaSPaul Mundt 	unsigned long base = addr & ~shm_align_mask;
37ee1acbfaSPaul Mundt 	unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask;
38ee1acbfaSPaul Mundt 
39ee1acbfaSPaul Mundt 	if (base + off <= addr)
40ee1acbfaSPaul Mundt 		return base + off;
41ee1acbfaSPaul Mundt 
42ee1acbfaSPaul Mundt 	return base - off;
43ee1acbfaSPaul Mundt }
444a4a9be3SPaul Mundt 
454a4a9be3SPaul Mundt unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
464a4a9be3SPaul Mundt 	unsigned long len, unsigned long pgoff, unsigned long flags)
474a4a9be3SPaul Mundt {
484a4a9be3SPaul Mundt 	struct mm_struct *mm = current->mm;
494a4a9be3SPaul Mundt 	struct vm_area_struct *vma;
504a4a9be3SPaul Mundt 	unsigned long start_addr;
514a4a9be3SPaul Mundt 	int do_colour_align;
524a4a9be3SPaul Mundt 
534a4a9be3SPaul Mundt 	if (flags & MAP_FIXED) {
544a4a9be3SPaul Mundt 		/* We do not accept a shared mapping if it would violate
554a4a9be3SPaul Mundt 		 * cache aliasing constraints.
564a4a9be3SPaul Mundt 		 */
574a4a9be3SPaul Mundt 		if ((flags & MAP_SHARED) && (addr & shm_align_mask))
584a4a9be3SPaul Mundt 			return -EINVAL;
594a4a9be3SPaul Mundt 		return addr;
604a4a9be3SPaul Mundt 	}
614a4a9be3SPaul Mundt 
624a4a9be3SPaul Mundt 	if (unlikely(len > TASK_SIZE))
634a4a9be3SPaul Mundt 		return -ENOMEM;
644a4a9be3SPaul Mundt 
654a4a9be3SPaul Mundt 	do_colour_align = 0;
664a4a9be3SPaul Mundt 	if (filp || (flags & MAP_SHARED))
674a4a9be3SPaul Mundt 		do_colour_align = 1;
684a4a9be3SPaul Mundt 
694a4a9be3SPaul Mundt 	if (addr) {
704a4a9be3SPaul Mundt 		if (do_colour_align)
714a4a9be3SPaul Mundt 			addr = COLOUR_ALIGN(addr, pgoff);
724a4a9be3SPaul Mundt 		else
734a4a9be3SPaul Mundt 			addr = PAGE_ALIGN(addr);
744a4a9be3SPaul Mundt 
754a4a9be3SPaul Mundt 		vma = find_vma(mm, addr);
764a4a9be3SPaul Mundt 		if (TASK_SIZE - len >= addr &&
774a4a9be3SPaul Mundt 		    (!vma || addr + len <= vma->vm_start))
784a4a9be3SPaul Mundt 			return addr;
794a4a9be3SPaul Mundt 	}
804a4a9be3SPaul Mundt 
814a4a9be3SPaul Mundt 	if (len > mm->cached_hole_size) {
824a4a9be3SPaul Mundt 		start_addr = addr = mm->free_area_cache;
834a4a9be3SPaul Mundt 	} else {
844a4a9be3SPaul Mundt 	        mm->cached_hole_size = 0;
854a4a9be3SPaul Mundt 		start_addr = addr = TASK_UNMAPPED_BASE;
864a4a9be3SPaul Mundt 	}
874a4a9be3SPaul Mundt 
884a4a9be3SPaul Mundt full_search:
894a4a9be3SPaul Mundt 	if (do_colour_align)
904a4a9be3SPaul Mundt 		addr = COLOUR_ALIGN(addr, pgoff);
914a4a9be3SPaul Mundt 	else
924a4a9be3SPaul Mundt 		addr = PAGE_ALIGN(mm->free_area_cache);
934a4a9be3SPaul Mundt 
944a4a9be3SPaul Mundt 	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
954a4a9be3SPaul Mundt 		/* At this point:  (!vma || addr < vma->vm_end). */
964a4a9be3SPaul Mundt 		if (unlikely(TASK_SIZE - len < addr)) {
974a4a9be3SPaul Mundt 			/*
984a4a9be3SPaul Mundt 			 * Start a new search - just in case we missed
994a4a9be3SPaul Mundt 			 * some holes.
1004a4a9be3SPaul Mundt 			 */
1014a4a9be3SPaul Mundt 			if (start_addr != TASK_UNMAPPED_BASE) {
1024a4a9be3SPaul Mundt 				start_addr = addr = TASK_UNMAPPED_BASE;
1034a4a9be3SPaul Mundt 				mm->cached_hole_size = 0;
1044a4a9be3SPaul Mundt 				goto full_search;
1054a4a9be3SPaul Mundt 			}
1064a4a9be3SPaul Mundt 			return -ENOMEM;
1074a4a9be3SPaul Mundt 		}
1084a4a9be3SPaul Mundt 		if (likely(!vma || addr + len <= vma->vm_start)) {
1094a4a9be3SPaul Mundt 			/*
1104a4a9be3SPaul Mundt 			 * Remember the place where we stopped the search:
1114a4a9be3SPaul Mundt 			 */
1124a4a9be3SPaul Mundt 			mm->free_area_cache = addr + len;
1134a4a9be3SPaul Mundt 			return addr;
1144a4a9be3SPaul Mundt 		}
1154a4a9be3SPaul Mundt 		if (addr + mm->cached_hole_size < vma->vm_start)
1164a4a9be3SPaul Mundt 		        mm->cached_hole_size = vma->vm_start - addr;
1174a4a9be3SPaul Mundt 
1184a4a9be3SPaul Mundt 		addr = vma->vm_end;
1194a4a9be3SPaul Mundt 		if (do_colour_align)
1204a4a9be3SPaul Mundt 			addr = COLOUR_ALIGN(addr, pgoff);
1214a4a9be3SPaul Mundt 	}
1224a4a9be3SPaul Mundt }
123ee1acbfaSPaul Mundt 
124ee1acbfaSPaul Mundt unsigned long
125ee1acbfaSPaul Mundt arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
126ee1acbfaSPaul Mundt 			  const unsigned long len, const unsigned long pgoff,
127ee1acbfaSPaul Mundt 			  const unsigned long flags)
128ee1acbfaSPaul Mundt {
129ee1acbfaSPaul Mundt 	struct vm_area_struct *vma;
130ee1acbfaSPaul Mundt 	struct mm_struct *mm = current->mm;
131ee1acbfaSPaul Mundt 	unsigned long addr = addr0;
132ee1acbfaSPaul Mundt 	int do_colour_align;
133ee1acbfaSPaul Mundt 
134ee1acbfaSPaul Mundt 	if (flags & MAP_FIXED) {
135ee1acbfaSPaul Mundt 		/* We do not accept a shared mapping if it would violate
136ee1acbfaSPaul Mundt 		 * cache aliasing constraints.
137ee1acbfaSPaul Mundt 		 */
138ee1acbfaSPaul Mundt 		if ((flags & MAP_SHARED) &&
139ee1acbfaSPaul Mundt 		    ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
140ee1acbfaSPaul Mundt 			return -EINVAL;
141ee1acbfaSPaul Mundt 		return addr;
142ee1acbfaSPaul Mundt 	}
143ee1acbfaSPaul Mundt 
144ee1acbfaSPaul Mundt 	if (unlikely(len > TASK_SIZE))
145ee1acbfaSPaul Mundt 		return -ENOMEM;
146ee1acbfaSPaul Mundt 
147ee1acbfaSPaul Mundt 	do_colour_align = 0;
148ee1acbfaSPaul Mundt 	if (filp || (flags & MAP_SHARED))
149ee1acbfaSPaul Mundt 		do_colour_align = 1;
150ee1acbfaSPaul Mundt 
151ee1acbfaSPaul Mundt 	/* requesting a specific address */
152ee1acbfaSPaul Mundt 	if (addr) {
153ee1acbfaSPaul Mundt 		if (do_colour_align)
154ee1acbfaSPaul Mundt 			addr = COLOUR_ALIGN(addr, pgoff);
155ee1acbfaSPaul Mundt 		else
156ee1acbfaSPaul Mundt 			addr = PAGE_ALIGN(addr);
157ee1acbfaSPaul Mundt 
158ee1acbfaSPaul Mundt 		vma = find_vma(mm, addr);
159ee1acbfaSPaul Mundt 		if (TASK_SIZE - len >= addr &&
160ee1acbfaSPaul Mundt 		    (!vma || addr + len <= vma->vm_start))
161ee1acbfaSPaul Mundt 			return addr;
162ee1acbfaSPaul Mundt 	}
163ee1acbfaSPaul Mundt 
164ee1acbfaSPaul Mundt 	/* check if free_area_cache is useful for us */
165ee1acbfaSPaul Mundt 	if (len <= mm->cached_hole_size) {
166ee1acbfaSPaul Mundt 	        mm->cached_hole_size = 0;
167ee1acbfaSPaul Mundt 		mm->free_area_cache = mm->mmap_base;
168ee1acbfaSPaul Mundt 	}
169ee1acbfaSPaul Mundt 
170ee1acbfaSPaul Mundt 	/* either no address requested or can't fit in requested address hole */
171ee1acbfaSPaul Mundt 	addr = mm->free_area_cache;
172ee1acbfaSPaul Mundt 	if (do_colour_align) {
173ee1acbfaSPaul Mundt 		unsigned long base = COLOUR_ALIGN_DOWN(addr-len, pgoff);
174ee1acbfaSPaul Mundt 
175ee1acbfaSPaul Mundt 		addr = base + len;
176ee1acbfaSPaul Mundt 	}
177ee1acbfaSPaul Mundt 
178ee1acbfaSPaul Mundt 	/* make sure it can fit in the remaining address space */
179ee1acbfaSPaul Mundt 	if (likely(addr > len)) {
180ee1acbfaSPaul Mundt 		vma = find_vma(mm, addr-len);
181ee1acbfaSPaul Mundt 		if (!vma || addr <= vma->vm_start) {
182ee1acbfaSPaul Mundt 			/* remember the address as a hint for next time */
183ee1acbfaSPaul Mundt 			return (mm->free_area_cache = addr-len);
184ee1acbfaSPaul Mundt 		}
185ee1acbfaSPaul Mundt 	}
186ee1acbfaSPaul Mundt 
187ee1acbfaSPaul Mundt 	if (unlikely(mm->mmap_base < len))
188ee1acbfaSPaul Mundt 		goto bottomup;
189ee1acbfaSPaul Mundt 
190ee1acbfaSPaul Mundt 	addr = mm->mmap_base-len;
191ee1acbfaSPaul Mundt 	if (do_colour_align)
192ee1acbfaSPaul Mundt 		addr = COLOUR_ALIGN_DOWN(addr, pgoff);
193ee1acbfaSPaul Mundt 
194ee1acbfaSPaul Mundt 	do {
195ee1acbfaSPaul Mundt 		/*
196ee1acbfaSPaul Mundt 		 * Lookup failure means no vma is above this address,
197ee1acbfaSPaul Mundt 		 * else if new region fits below vma->vm_start,
198ee1acbfaSPaul Mundt 		 * return with success:
199ee1acbfaSPaul Mundt 		 */
200ee1acbfaSPaul Mundt 		vma = find_vma(mm, addr);
201ee1acbfaSPaul Mundt 		if (likely(!vma || addr+len <= vma->vm_start)) {
202ee1acbfaSPaul Mundt 			/* remember the address as a hint for next time */
203ee1acbfaSPaul Mundt 			return (mm->free_area_cache = addr);
204ee1acbfaSPaul Mundt 		}
205ee1acbfaSPaul Mundt 
206ee1acbfaSPaul Mundt 		/* remember the largest hole we saw so far */
207ee1acbfaSPaul Mundt 		if (addr + mm->cached_hole_size < vma->vm_start)
208ee1acbfaSPaul Mundt 		        mm->cached_hole_size = vma->vm_start - addr;
209ee1acbfaSPaul Mundt 
210ee1acbfaSPaul Mundt 		/* try just below the current vma->vm_start */
211ee1acbfaSPaul Mundt 		addr = vma->vm_start-len;
212ee1acbfaSPaul Mundt 		if (do_colour_align)
213ee1acbfaSPaul Mundt 			addr = COLOUR_ALIGN_DOWN(addr, pgoff);
214ee1acbfaSPaul Mundt 	} while (likely(len < vma->vm_start));
215ee1acbfaSPaul Mundt 
216ee1acbfaSPaul Mundt bottomup:
217ee1acbfaSPaul Mundt 	/*
218ee1acbfaSPaul Mundt 	 * A failed mmap() very likely causes application failure,
219ee1acbfaSPaul Mundt 	 * so fall back to the bottom-up function here. This scenario
220ee1acbfaSPaul Mundt 	 * can happen with large stack limits and large mmap()
221ee1acbfaSPaul Mundt 	 * allocations.
222ee1acbfaSPaul Mundt 	 */
223ee1acbfaSPaul Mundt 	mm->cached_hole_size = ~0UL;
224ee1acbfaSPaul Mundt 	mm->free_area_cache = TASK_UNMAPPED_BASE;
225ee1acbfaSPaul Mundt 	addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
226ee1acbfaSPaul Mundt 	/*
227ee1acbfaSPaul Mundt 	 * Restore the topdown base:
228ee1acbfaSPaul Mundt 	 */
229ee1acbfaSPaul Mundt 	mm->free_area_cache = mm->mmap_base;
230ee1acbfaSPaul Mundt 	mm->cached_hole_size = ~0UL;
231ee1acbfaSPaul Mundt 
232ee1acbfaSPaul Mundt 	return addr;
233ee1acbfaSPaul Mundt }
2344a4a9be3SPaul Mundt #endif /* CONFIG_MMU */
235185aed75SPaul Mundt 
236185aed75SPaul Mundt /*
237185aed75SPaul Mundt  * You really shouldn't be using read() or write() on /dev/mem.  This
238185aed75SPaul Mundt  * might go away in the future.
239185aed75SPaul Mundt  */
240185aed75SPaul Mundt int valid_phys_addr_range(unsigned long addr, size_t count)
241185aed75SPaul Mundt {
24210840f03SPaul Mundt 	if (addr < __MEMORY_START)
243185aed75SPaul Mundt 		return 0;
244185aed75SPaul Mundt 	if (addr + count > __pa(high_memory))
245185aed75SPaul Mundt 		return 0;
246185aed75SPaul Mundt 
247185aed75SPaul Mundt 	return 1;
248185aed75SPaul Mundt }
249185aed75SPaul Mundt 
250185aed75SPaul Mundt int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
251185aed75SPaul Mundt {
252185aed75SPaul Mundt 	return 1;
253185aed75SPaul Mundt }
254