xref: /openbmc/linux/arch/arm/mm/ioremap.c (revision a09d2831)
1 /*
2  *  linux/arch/arm/mm/ioremap.c
3  *
4  * Re-map IO memory to kernel address space so that we can access it.
5  *
6  * (C) Copyright 1995 1996 Linus Torvalds
7  *
8  * Hacked for ARM by Phil Blundell <philb@gnu.org>
9  * Hacked to allow all architectures to build, and various cleanups
10  * by Russell King
11  *
12  * This allows a driver to remap an arbitrary region of bus memory into
13  * virtual space.  One should *only* use readl, writel, memcpy_toio and
14  * so on with such remapped areas.
15  *
16  * Because the ARM only has a 32-bit address space we can't address the
17  * whole of the (physical) PCI space at once.  PCI huge-mode addressing
18  * allows us to circumvent this restriction by splitting PCI space into
19  * two 2GB chunks and mapping only one at a time into processor memory.
20  * We use MMU protection domains to trap any attempt to access the bank
21  * that is not currently mapped.  (This isn't fully implemented yet.)
22  */
23 #include <linux/module.h>
24 #include <linux/errno.h>
25 #include <linux/mm.h>
26 #include <linux/vmalloc.h>
27 #include <linux/io.h>
28 
29 #include <asm/cputype.h>
30 #include <asm/cacheflush.h>
31 #include <asm/mmu_context.h>
32 #include <asm/pgalloc.h>
33 #include <asm/tlbflush.h>
34 #include <asm/sizes.h>
35 
36 #include <asm/mach/map.h>
37 #include "mm.h"
38 
39 /*
40  * Used by ioremap() and iounmap() code to mark (super)section-mapped
41  * I/O regions in vm_struct->flags field.
42  */
43 #define VM_ARM_SECTION_MAPPING	0x80000000
44 
45 static int remap_area_pte(pmd_t *pmd, unsigned long addr, unsigned long end,
46 			  unsigned long phys_addr, const struct mem_type *type)
47 {
48 	pgprot_t prot = __pgprot(type->prot_pte);
49 	pte_t *pte;
50 
51 	pte = pte_alloc_kernel(pmd, addr);
52 	if (!pte)
53 		return -ENOMEM;
54 
55 	do {
56 		if (!pte_none(*pte))
57 			goto bad;
58 
59 		set_pte_ext(pte, pfn_pte(phys_addr >> PAGE_SHIFT, prot), 0);
60 		phys_addr += PAGE_SIZE;
61 	} while (pte++, addr += PAGE_SIZE, addr != end);
62 	return 0;
63 
64  bad:
65 	printk(KERN_CRIT "remap_area_pte: page already exists\n");
66 	BUG();
67 }
68 
69 static inline int remap_area_pmd(pgd_t *pgd, unsigned long addr,
70 				 unsigned long end, unsigned long phys_addr,
71 				 const struct mem_type *type)
72 {
73 	unsigned long next;
74 	pmd_t *pmd;
75 	int ret = 0;
76 
77 	pmd = pmd_alloc(&init_mm, pgd, addr);
78 	if (!pmd)
79 		return -ENOMEM;
80 
81 	do {
82 		next = pmd_addr_end(addr, end);
83 		ret = remap_area_pte(pmd, addr, next, phys_addr, type);
84 		if (ret)
85 			return ret;
86 		phys_addr += next - addr;
87 	} while (pmd++, addr = next, addr != end);
88 	return ret;
89 }
90 
91 static int remap_area_pages(unsigned long start, unsigned long pfn,
92 			    size_t size, const struct mem_type *type)
93 {
94 	unsigned long addr = start;
95 	unsigned long next, end = start + size;
96 	unsigned long phys_addr = __pfn_to_phys(pfn);
97 	pgd_t *pgd;
98 	int err = 0;
99 
100 	BUG_ON(addr >= end);
101 	pgd = pgd_offset_k(addr);
102 	do {
103 		next = pgd_addr_end(addr, end);
104 		err = remap_area_pmd(pgd, addr, next, phys_addr, type);
105 		if (err)
106 			break;
107 		phys_addr += next - addr;
108 	} while (pgd++, addr = next, addr != end);
109 
110 	return err;
111 }
112 
113 int ioremap_page(unsigned long virt, unsigned long phys,
114 		 const struct mem_type *mtype)
115 {
116 	return remap_area_pages(virt, __phys_to_pfn(phys), PAGE_SIZE, mtype);
117 }
118 EXPORT_SYMBOL(ioremap_page);
119 
120 void __check_kvm_seq(struct mm_struct *mm)
121 {
122 	unsigned int seq;
123 
124 	do {
125 		seq = init_mm.context.kvm_seq;
126 		memcpy(pgd_offset(mm, VMALLOC_START),
127 		       pgd_offset_k(VMALLOC_START),
128 		       sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
129 					pgd_index(VMALLOC_START)));
130 		mm->context.kvm_seq = seq;
131 	} while (seq != init_mm.context.kvm_seq);
132 }
133 
134 #ifndef CONFIG_SMP
135 /*
136  * Section support is unsafe on SMP - If you iounmap and ioremap a region,
137  * the other CPUs will not see this change until their next context switch.
138  * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
139  * which requires the new ioremap'd region to be referenced, the CPU will
140  * reference the _old_ region.
141  *
142  * Note that get_vm_area() allocates a guard 4K page, so we need to mask
143  * the size back to 1MB aligned or we will overflow in the loop below.
144  */
145 static void unmap_area_sections(unsigned long virt, unsigned long size)
146 {
147 	unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
148 	pgd_t *pgd;
149 
150 	flush_cache_vunmap(addr, end);
151 	pgd = pgd_offset_k(addr);
152 	do {
153 		pmd_t pmd, *pmdp = pmd_offset(pgd, addr);
154 
155 		pmd = *pmdp;
156 		if (!pmd_none(pmd)) {
157 			/*
158 			 * Clear the PMD from the page table, and
159 			 * increment the kvm sequence so others
160 			 * notice this change.
161 			 *
162 			 * Note: this is still racy on SMP machines.
163 			 */
164 			pmd_clear(pmdp);
165 			init_mm.context.kvm_seq++;
166 
167 			/*
168 			 * Free the page table, if there was one.
169 			 */
170 			if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
171 				pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
172 		}
173 
174 		addr += PGDIR_SIZE;
175 		pgd++;
176 	} while (addr < end);
177 
178 	/*
179 	 * Ensure that the active_mm is up to date - we want to
180 	 * catch any use-after-iounmap cases.
181 	 */
182 	if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq)
183 		__check_kvm_seq(current->active_mm);
184 
185 	flush_tlb_kernel_range(virt, end);
186 }
187 
188 static int
189 remap_area_sections(unsigned long virt, unsigned long pfn,
190 		    size_t size, const struct mem_type *type)
191 {
192 	unsigned long addr = virt, end = virt + size;
193 	pgd_t *pgd;
194 
195 	/*
196 	 * Remove and free any PTE-based mapping, and
197 	 * sync the current kernel mapping.
198 	 */
199 	unmap_area_sections(virt, size);
200 
201 	pgd = pgd_offset_k(addr);
202 	do {
203 		pmd_t *pmd = pmd_offset(pgd, addr);
204 
205 		pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
206 		pfn += SZ_1M >> PAGE_SHIFT;
207 		pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
208 		pfn += SZ_1M >> PAGE_SHIFT;
209 		flush_pmd_entry(pmd);
210 
211 		addr += PGDIR_SIZE;
212 		pgd++;
213 	} while (addr < end);
214 
215 	return 0;
216 }
217 
218 static int
219 remap_area_supersections(unsigned long virt, unsigned long pfn,
220 			 size_t size, const struct mem_type *type)
221 {
222 	unsigned long addr = virt, end = virt + size;
223 	pgd_t *pgd;
224 
225 	/*
226 	 * Remove and free any PTE-based mapping, and
227 	 * sync the current kernel mapping.
228 	 */
229 	unmap_area_sections(virt, size);
230 
231 	pgd = pgd_offset_k(virt);
232 	do {
233 		unsigned long super_pmd_val, i;
234 
235 		super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
236 				PMD_SECT_SUPER;
237 		super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
238 
239 		for (i = 0; i < 8; i++) {
240 			pmd_t *pmd = pmd_offset(pgd, addr);
241 
242 			pmd[0] = __pmd(super_pmd_val);
243 			pmd[1] = __pmd(super_pmd_val);
244 			flush_pmd_entry(pmd);
245 
246 			addr += PGDIR_SIZE;
247 			pgd++;
248 		}
249 
250 		pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
251 	} while (addr < end);
252 
253 	return 0;
254 }
255 #endif
256 
257 
258 /*
259  * Remap an arbitrary physical address space into the kernel virtual
260  * address space. Needed when the kernel wants to access high addresses
261  * directly.
262  *
263  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
264  * have to convert them into an offset in a page-aligned mapping, but the
265  * caller shouldn't need to know that small detail.
266  *
267  * 'flags' are the extra L_PTE_ flags that you want to specify for this
268  * mapping.  See <asm/pgtable.h> for more information.
269  */
270 void __iomem *
271 __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
272 		  unsigned int mtype)
273 {
274 	const struct mem_type *type;
275 	int err;
276 	unsigned long addr;
277  	struct vm_struct * area;
278 
279 	/*
280 	 * High mappings must be supersection aligned
281 	 */
282 	if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
283 		return NULL;
284 
285 	type = get_mem_type(mtype);
286 	if (!type)
287 		return NULL;
288 
289 	/*
290 	 * Page align the mapping size, taking account of any offset.
291 	 */
292 	size = PAGE_ALIGN(offset + size);
293 
294  	area = get_vm_area(size, VM_IOREMAP);
295  	if (!area)
296  		return NULL;
297  	addr = (unsigned long)area->addr;
298 
299 #ifndef CONFIG_SMP
300 	if (DOMAIN_IO == 0 &&
301 	    (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
302 	       cpu_is_xsc3()) && pfn >= 0x100000 &&
303 	       !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
304 		area->flags |= VM_ARM_SECTION_MAPPING;
305 		err = remap_area_supersections(addr, pfn, size, type);
306 	} else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
307 		area->flags |= VM_ARM_SECTION_MAPPING;
308 		err = remap_area_sections(addr, pfn, size, type);
309 	} else
310 #endif
311 		err = remap_area_pages(addr, pfn, size, type);
312 
313 	if (err) {
314  		vunmap((void *)addr);
315  		return NULL;
316  	}
317 
318 	flush_cache_vmap(addr, addr + size);
319 	return (void __iomem *) (offset + addr);
320 }
321 EXPORT_SYMBOL(__arm_ioremap_pfn);
322 
323 void __iomem *
324 __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
325 {
326 	unsigned long last_addr;
327  	unsigned long offset = phys_addr & ~PAGE_MASK;
328  	unsigned long pfn = __phys_to_pfn(phys_addr);
329 
330  	/*
331  	 * Don't allow wraparound or zero size
332 	 */
333 	last_addr = phys_addr + size - 1;
334 	if (!size || last_addr < phys_addr)
335 		return NULL;
336 
337  	return __arm_ioremap_pfn(pfn, offset, size, mtype);
338 }
339 EXPORT_SYMBOL(__arm_ioremap);
340 
341 void __iounmap(volatile void __iomem *io_addr)
342 {
343 	void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
344 #ifndef CONFIG_SMP
345 	struct vm_struct **p, *tmp;
346 
347 	/*
348 	 * If this is a section based mapping we need to handle it
349 	 * specially as the VM subsystem does not know how to handle
350 	 * such a beast. We need the lock here b/c we need to clear
351 	 * all the mappings before the area can be reclaimed
352 	 * by someone else.
353 	 */
354 	write_lock(&vmlist_lock);
355 	for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
356 		if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) {
357 			if (tmp->flags & VM_ARM_SECTION_MAPPING) {
358 				unmap_area_sections((unsigned long)tmp->addr,
359 						    tmp->size);
360 			}
361 			break;
362 		}
363 	}
364 	write_unlock(&vmlist_lock);
365 #endif
366 
367 	vunmap(addr);
368 }
369 EXPORT_SYMBOL(__iounmap);
370