xref: /openbmc/linux/arch/arm/mm/mmap.c (revision 6ee73861)
1 /*
2  *  linux/arch/arm/mm/mmap.c
3  */
4 #include <linux/fs.h>
5 #include <linux/mm.h>
6 #include <linux/mman.h>
7 #include <linux/shm.h>
8 #include <linux/sched.h>
9 #include <linux/io.h>
10 #include <asm/cputype.h>
11 #include <asm/system.h>
12 
13 #define COLOUR_ALIGN(addr,pgoff)		\
14 	((((addr)+SHMLBA-1)&~(SHMLBA-1)) +	\
15 	 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
16 
17 /*
18  * We need to ensure that shared mappings are correctly aligned to
19  * avoid aliasing issues with VIPT caches.  We need to ensure that
20  * a specific page of an object is always mapped at a multiple of
21  * SHMLBA bytes.
22  *
23  * We unconditionally provide this function for all cases, however
24  * in the VIVT case, we optimise out the alignment rules.
25  */
26 unsigned long
27 arch_get_unmapped_area(struct file *filp, unsigned long addr,
28 		unsigned long len, unsigned long pgoff, unsigned long flags)
29 {
30 	struct mm_struct *mm = current->mm;
31 	struct vm_area_struct *vma;
32 	unsigned long start_addr;
33 #ifdef CONFIG_CPU_V6
34 	unsigned int cache_type;
35 	int do_align = 0, aliasing = 0;
36 
37 	/*
38 	 * We only need to do colour alignment if either the I or D
39 	 * caches alias.  This is indicated by bits 9 and 21 of the
40 	 * cache type register.
41 	 */
42 	cache_type = read_cpuid_cachetype();
43 	if (cache_type != read_cpuid_id()) {
44 		aliasing = (cache_type | cache_type >> 12) & (1 << 11);
45 		if (aliasing)
46 			do_align = filp || flags & MAP_SHARED;
47 	}
48 #else
49 #define do_align 0
50 #define aliasing 0
51 #endif
52 
53 	/*
54 	 * We enforce the MAP_FIXED case.
55 	 */
56 	if (flags & MAP_FIXED) {
57 		if (aliasing && flags & MAP_SHARED && addr & (SHMLBA - 1))
58 			return -EINVAL;
59 		return addr;
60 	}
61 
62 	if (len > TASK_SIZE)
63 		return -ENOMEM;
64 
65 	if (addr) {
66 		if (do_align)
67 			addr = COLOUR_ALIGN(addr, pgoff);
68 		else
69 			addr = PAGE_ALIGN(addr);
70 
71 		vma = find_vma(mm, addr);
72 		if (TASK_SIZE - len >= addr &&
73 		    (!vma || addr + len <= vma->vm_start))
74 			return addr;
75 	}
76 	if (len > mm->cached_hole_size) {
77 	        start_addr = addr = mm->free_area_cache;
78 	} else {
79 	        start_addr = addr = TASK_UNMAPPED_BASE;
80 	        mm->cached_hole_size = 0;
81 	}
82 
83 full_search:
84 	if (do_align)
85 		addr = COLOUR_ALIGN(addr, pgoff);
86 	else
87 		addr = PAGE_ALIGN(addr);
88 
89 	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
90 		/* At this point:  (!vma || addr < vma->vm_end). */
91 		if (TASK_SIZE - len < addr) {
92 			/*
93 			 * Start a new search - just in case we missed
94 			 * some holes.
95 			 */
96 			if (start_addr != TASK_UNMAPPED_BASE) {
97 				start_addr = addr = TASK_UNMAPPED_BASE;
98 				mm->cached_hole_size = 0;
99 				goto full_search;
100 			}
101 			return -ENOMEM;
102 		}
103 		if (!vma || addr + len <= vma->vm_start) {
104 			/*
105 			 * Remember the place where we stopped the search:
106 			 */
107 			mm->free_area_cache = addr + len;
108 			return addr;
109 		}
110 		if (addr + mm->cached_hole_size < vma->vm_start)
111 		        mm->cached_hole_size = vma->vm_start - addr;
112 		addr = vma->vm_end;
113 		if (do_align)
114 			addr = COLOUR_ALIGN(addr, pgoff);
115 	}
116 }
117 
118 
119 /*
120  * You really shouldn't be using read() or write() on /dev/mem.  This
121  * might go away in the future.
122  */
123 int valid_phys_addr_range(unsigned long addr, size_t size)
124 {
125 	if (addr < PHYS_OFFSET)
126 		return 0;
127 	if (addr + size > __pa(high_memory - 1) + 1)
128 		return 0;
129 
130 	return 1;
131 }
132 
133 /*
134  * We don't use supersection mappings for mmap() on /dev/mem, which
135  * means that we can't map the memory area above the 4G barrier into
136  * userspace.
137  */
138 int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
139 {
140 	return !(pfn + (size >> PAGE_SHIFT) > 0x00100000);
141 }
142