xref: /openbmc/linux/arch/sh/mm/mmap.c (revision be58f710)
1 /*
2  * arch/sh/mm/mmap.c
3  *
4  * Copyright (C) 2008 - 2009  Paul Mundt
5  *
6  * This file is subject to the terms and conditions of the GNU General Public
7  * License.  See the file "COPYING" in the main directory of this archive
8  * for more details.
9  */
10 #include <linux/io.h>
11 #include <linux/mm.h>
12 #include <linux/sched/mm.h>
13 #include <linux/mman.h>
14 #include <linux/module.h>
15 #include <asm/page.h>
16 #include <asm/processor.h>
17 
18 unsigned long shm_align_mask = PAGE_SIZE - 1;	/* Sane caches */
19 EXPORT_SYMBOL(shm_align_mask);
20 
21 #ifdef CONFIG_MMU
22 /*
23  * To avoid cache aliases, we map the shared page with same color.
24  */
25 static inline unsigned long COLOUR_ALIGN(unsigned long addr,
26 					 unsigned long pgoff)
27 {
28 	unsigned long base = (addr + shm_align_mask) & ~shm_align_mask;
29 	unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask;
30 
31 	return base + off;
32 }
33 
34 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
35 	unsigned long len, unsigned long pgoff, unsigned long flags)
36 {
37 	struct mm_struct *mm = current->mm;
38 	struct vm_area_struct *vma;
39 	int do_colour_align;
40 	struct vm_unmapped_area_info info;
41 
42 	if (flags & MAP_FIXED) {
43 		/* We do not accept a shared mapping if it would violate
44 		 * cache aliasing constraints.
45 		 */
46 		if ((flags & MAP_SHARED) &&
47 		    ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
48 			return -EINVAL;
49 		return addr;
50 	}
51 
52 	if (unlikely(len > TASK_SIZE))
53 		return -ENOMEM;
54 
55 	do_colour_align = 0;
56 	if (filp || (flags & MAP_SHARED))
57 		do_colour_align = 1;
58 
59 	if (addr) {
60 		if (do_colour_align)
61 			addr = COLOUR_ALIGN(addr, pgoff);
62 		else
63 			addr = PAGE_ALIGN(addr);
64 
65 		vma = find_vma(mm, addr);
66 		if (TASK_SIZE - len >= addr &&
67 		    (!vma || addr + len <= vm_start_gap(vma)))
68 			return addr;
69 	}
70 
71 	info.flags = 0;
72 	info.length = len;
73 	info.low_limit = TASK_UNMAPPED_BASE;
74 	info.high_limit = TASK_SIZE;
75 	info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
76 	info.align_offset = pgoff << PAGE_SHIFT;
77 	return vm_unmapped_area(&info);
78 }
79 
80 unsigned long
81 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
82 			  const unsigned long len, const unsigned long pgoff,
83 			  const unsigned long flags)
84 {
85 	struct vm_area_struct *vma;
86 	struct mm_struct *mm = current->mm;
87 	unsigned long addr = addr0;
88 	int do_colour_align;
89 	struct vm_unmapped_area_info info;
90 
91 	if (flags & MAP_FIXED) {
92 		/* We do not accept a shared mapping if it would violate
93 		 * cache aliasing constraints.
94 		 */
95 		if ((flags & MAP_SHARED) &&
96 		    ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
97 			return -EINVAL;
98 		return addr;
99 	}
100 
101 	if (unlikely(len > TASK_SIZE))
102 		return -ENOMEM;
103 
104 	do_colour_align = 0;
105 	if (filp || (flags & MAP_SHARED))
106 		do_colour_align = 1;
107 
108 	/* requesting a specific address */
109 	if (addr) {
110 		if (do_colour_align)
111 			addr = COLOUR_ALIGN(addr, pgoff);
112 		else
113 			addr = PAGE_ALIGN(addr);
114 
115 		vma = find_vma(mm, addr);
116 		if (TASK_SIZE - len >= addr &&
117 		    (!vma || addr + len <= vm_start_gap(vma)))
118 			return addr;
119 	}
120 
121 	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
122 	info.length = len;
123 	info.low_limit = PAGE_SIZE;
124 	info.high_limit = mm->mmap_base;
125 	info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
126 	info.align_offset = pgoff << PAGE_SHIFT;
127 	addr = vm_unmapped_area(&info);
128 
129 	/*
130 	 * A failed mmap() very likely causes application failure,
131 	 * so fall back to the bottom-up function here. This scenario
132 	 * can happen with large stack limits and large mmap()
133 	 * allocations.
134 	 */
135 	if (addr & ~PAGE_MASK) {
136 		VM_BUG_ON(addr != -ENOMEM);
137 		info.flags = 0;
138 		info.low_limit = TASK_UNMAPPED_BASE;
139 		info.high_limit = TASK_SIZE;
140 		addr = vm_unmapped_area(&info);
141 	}
142 
143 	return addr;
144 }
145 #endif /* CONFIG_MMU */
146 
147 /*
148  * You really shouldn't be using read() or write() on /dev/mem.  This
149  * might go away in the future.
150  */
151 int valid_phys_addr_range(phys_addr_t addr, size_t count)
152 {
153 	if (addr < __MEMORY_START)
154 		return 0;
155 	if (addr + count > __pa(high_memory))
156 		return 0;
157 
158 	return 1;
159 }
160 
161 int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
162 {
163 	return 1;
164 }
165