xref: /openbmc/linux/arch/arm/mm/ioremap.c (revision 9ac17575)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/arch/arm/mm/ioremap.c
4  *
5  * Re-map IO memory to kernel address space so that we can access it.
6  *
7  * (C) Copyright 1995 1996 Linus Torvalds
8  *
9  * Hacked for ARM by Phil Blundell <philb@gnu.org>
10  * Hacked to allow all architectures to build, and various cleanups
11  * by Russell King
12  *
13  * This allows a driver to remap an arbitrary region of bus memory into
14  * virtual space.  One should *only* use readl, writel, memcpy_toio and
15  * so on with such remapped areas.
16  *
17  * Because the ARM only has a 32-bit address space we can't address the
18  * whole of the (physical) PCI space at once.  PCI huge-mode addressing
19  * allows us to circumvent this restriction by splitting PCI space into
20  * two 2GB chunks and mapping only one at a time into processor memory.
21  * We use MMU protection domains to trap any attempt to access the bank
22  * that is not currently mapped.  (This isn't fully implemented yet.)
23  */
24 #include <linux/module.h>
25 #include <linux/errno.h>
26 #include <linux/mm.h>
27 #include <linux/vmalloc.h>
28 #include <linux/io.h>
29 #include <linux/sizes.h>
30 
31 #include <asm/cp15.h>
32 #include <asm/cputype.h>
33 #include <asm/cacheflush.h>
34 #include <asm/early_ioremap.h>
35 #include <asm/mmu_context.h>
36 #include <asm/pgalloc.h>
37 #include <asm/tlbflush.h>
38 #include <asm/system_info.h>
39 
40 #include <asm/mach/map.h>
41 #include <asm/mach/pci.h>
42 #include "mm.h"
43 
44 
45 LIST_HEAD(static_vmlist);
46 
47 static struct static_vm *find_static_vm_paddr(phys_addr_t paddr,
48 			size_t size, unsigned int mtype)
49 {
50 	struct static_vm *svm;
51 	struct vm_struct *vm;
52 
53 	list_for_each_entry(svm, &static_vmlist, list) {
54 		vm = &svm->vm;
55 		if (!(vm->flags & VM_ARM_STATIC_MAPPING))
56 			continue;
57 		if ((vm->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
58 			continue;
59 
60 		if (vm->phys_addr > paddr ||
61 			paddr + size - 1 > vm->phys_addr + vm->size - 1)
62 			continue;
63 
64 		return svm;
65 	}
66 
67 	return NULL;
68 }
69 
70 struct static_vm *find_static_vm_vaddr(void *vaddr)
71 {
72 	struct static_vm *svm;
73 	struct vm_struct *vm;
74 
75 	list_for_each_entry(svm, &static_vmlist, list) {
76 		vm = &svm->vm;
77 
78 		/* static_vmlist is ascending order */
79 		if (vm->addr > vaddr)
80 			break;
81 
82 		if (vm->addr <= vaddr && vm->addr + vm->size > vaddr)
83 			return svm;
84 	}
85 
86 	return NULL;
87 }
88 
89 void __init add_static_vm_early(struct static_vm *svm)
90 {
91 	struct static_vm *curr_svm;
92 	struct vm_struct *vm;
93 	void *vaddr;
94 
95 	vm = &svm->vm;
96 	vm_area_add_early(vm);
97 	vaddr = vm->addr;
98 
99 	list_for_each_entry(curr_svm, &static_vmlist, list) {
100 		vm = &curr_svm->vm;
101 
102 		if (vm->addr > vaddr)
103 			break;
104 	}
105 	list_add_tail(&svm->list, &curr_svm->list);
106 }
107 
108 int ioremap_page(unsigned long virt, unsigned long phys,
109 		 const struct mem_type *mtype)
110 {
111 	return ioremap_page_range(virt, virt + PAGE_SIZE, phys,
112 				  __pgprot(mtype->prot_pte));
113 }
114 EXPORT_SYMBOL(ioremap_page);
115 
116 void __check_vmalloc_seq(struct mm_struct *mm)
117 {
118 	unsigned int seq;
119 
120 	do {
121 		seq = init_mm.context.vmalloc_seq;
122 		memcpy(pgd_offset(mm, VMALLOC_START),
123 		       pgd_offset_k(VMALLOC_START),
124 		       sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
125 					pgd_index(VMALLOC_START)));
126 		mm->context.vmalloc_seq = seq;
127 	} while (seq != init_mm.context.vmalloc_seq);
128 }
129 
130 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
131 /*
132  * Section support is unsafe on SMP - If you iounmap and ioremap a region,
133  * the other CPUs will not see this change until their next context switch.
134  * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
135  * which requires the new ioremap'd region to be referenced, the CPU will
136  * reference the _old_ region.
137  *
138  * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
139  * mask the size back to 1MB aligned or we will overflow in the loop below.
140  */
141 static void unmap_area_sections(unsigned long virt, unsigned long size)
142 {
143 	unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
144 	pgd_t *pgd;
145 	p4d_t *p4d;
146 	pud_t *pud;
147 	pmd_t *pmdp;
148 
149 	flush_cache_vunmap(addr, end);
150 	pgd = pgd_offset_k(addr);
151 	p4d = p4d_offset(pgd, addr);
152 	pud = pud_offset(p4d, addr);
153 	pmdp = pmd_offset(pud, addr);
154 	do {
155 		pmd_t pmd = *pmdp;
156 
157 		if (!pmd_none(pmd)) {
158 			/*
159 			 * Clear the PMD from the page table, and
160 			 * increment the vmalloc sequence so others
161 			 * notice this change.
162 			 *
163 			 * Note: this is still racy on SMP machines.
164 			 */
165 			pmd_clear(pmdp);
166 			init_mm.context.vmalloc_seq++;
167 
168 			/*
169 			 * Free the page table, if there was one.
170 			 */
171 			if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
172 				pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
173 		}
174 
175 		addr += PMD_SIZE;
176 		pmdp += 2;
177 	} while (addr < end);
178 
179 	/*
180 	 * Ensure that the active_mm is up to date - we want to
181 	 * catch any use-after-iounmap cases.
182 	 */
183 	if (current->active_mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)
184 		__check_vmalloc_seq(current->active_mm);
185 
186 	flush_tlb_kernel_range(virt, end);
187 }
188 
189 static int
190 remap_area_sections(unsigned long virt, unsigned long pfn,
191 		    size_t size, const struct mem_type *type)
192 {
193 	unsigned long addr = virt, end = virt + size;
194 	pgd_t *pgd;
195 	p4d_t *p4d;
196 	pud_t *pud;
197 	pmd_t *pmd;
198 
199 	/*
200 	 * Remove and free any PTE-based mapping, and
201 	 * sync the current kernel mapping.
202 	 */
203 	unmap_area_sections(virt, size);
204 
205 	pgd = pgd_offset_k(addr);
206 	p4d = p4d_offset(pgd, addr);
207 	pud = pud_offset(p4d, addr);
208 	pmd = pmd_offset(pud, addr);
209 	do {
210 		pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
211 		pfn += SZ_1M >> PAGE_SHIFT;
212 		pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
213 		pfn += SZ_1M >> PAGE_SHIFT;
214 		flush_pmd_entry(pmd);
215 
216 		addr += PMD_SIZE;
217 		pmd += 2;
218 	} while (addr < end);
219 
220 	return 0;
221 }
222 
223 static int
224 remap_area_supersections(unsigned long virt, unsigned long pfn,
225 			 size_t size, const struct mem_type *type)
226 {
227 	unsigned long addr = virt, end = virt + size;
228 	pgd_t *pgd;
229 	p4d_t *p4d;
230 	pud_t *pud;
231 	pmd_t *pmd;
232 
233 	/*
234 	 * Remove and free any PTE-based mapping, and
235 	 * sync the current kernel mapping.
236 	 */
237 	unmap_area_sections(virt, size);
238 
239 	pgd = pgd_offset_k(virt);
240 	p4d = p4d_offset(pgd, addr);
241 	pud = pud_offset(p4d, addr);
242 	pmd = pmd_offset(pud, addr);
243 	do {
244 		unsigned long super_pmd_val, i;
245 
246 		super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
247 				PMD_SECT_SUPER;
248 		super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
249 
250 		for (i = 0; i < 8; i++) {
251 			pmd[0] = __pmd(super_pmd_val);
252 			pmd[1] = __pmd(super_pmd_val);
253 			flush_pmd_entry(pmd);
254 
255 			addr += PMD_SIZE;
256 			pmd += 2;
257 		}
258 
259 		pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
260 	} while (addr < end);
261 
262 	return 0;
263 }
264 #endif
265 
266 static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
267 	unsigned long offset, size_t size, unsigned int mtype, void *caller)
268 {
269 	const struct mem_type *type;
270 	int err;
271 	unsigned long addr;
272 	struct vm_struct *area;
273 	phys_addr_t paddr = __pfn_to_phys(pfn);
274 
275 #ifndef CONFIG_ARM_LPAE
276 	/*
277 	 * High mappings must be supersection aligned
278 	 */
279 	if (pfn >= 0x100000 && (paddr & ~SUPERSECTION_MASK))
280 		return NULL;
281 #endif
282 
283 	type = get_mem_type(mtype);
284 	if (!type)
285 		return NULL;
286 
287 	/*
288 	 * Page align the mapping size, taking account of any offset.
289 	 */
290 	size = PAGE_ALIGN(offset + size);
291 
292 	/*
293 	 * Try to reuse one of the static mapping whenever possible.
294 	 */
295 	if (size && !(sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) {
296 		struct static_vm *svm;
297 
298 		svm = find_static_vm_paddr(paddr, size, mtype);
299 		if (svm) {
300 			addr = (unsigned long)svm->vm.addr;
301 			addr += paddr - svm->vm.phys_addr;
302 			return (void __iomem *) (offset + addr);
303 		}
304 	}
305 
306 	/*
307 	 * Don't allow RAM to be mapped with mismatched attributes - this
308 	 * causes problems with ARMv6+
309 	 */
310 	if (WARN_ON(pfn_valid(pfn) && mtype != MT_MEMORY_RW))
311 		return NULL;
312 
313 	area = get_vm_area_caller(size, VM_IOREMAP, caller);
314  	if (!area)
315  		return NULL;
316  	addr = (unsigned long)area->addr;
317 	area->phys_addr = paddr;
318 
319 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
320 	if (DOMAIN_IO == 0 &&
321 	    (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
322 	       cpu_is_xsc3()) && pfn >= 0x100000 &&
323 	       !((paddr | size | addr) & ~SUPERSECTION_MASK)) {
324 		area->flags |= VM_ARM_SECTION_MAPPING;
325 		err = remap_area_supersections(addr, pfn, size, type);
326 	} else if (!((paddr | size | addr) & ~PMD_MASK)) {
327 		area->flags |= VM_ARM_SECTION_MAPPING;
328 		err = remap_area_sections(addr, pfn, size, type);
329 	} else
330 #endif
331 		err = ioremap_page_range(addr, addr + size, paddr,
332 					 __pgprot(type->prot_pte));
333 
334 	if (err) {
335  		vunmap((void *)addr);
336  		return NULL;
337  	}
338 
339 	flush_cache_vmap(addr, addr + size);
340 	return (void __iomem *) (offset + addr);
341 }
342 
343 void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
344 	unsigned int mtype, void *caller)
345 {
346 	phys_addr_t last_addr;
347  	unsigned long offset = phys_addr & ~PAGE_MASK;
348  	unsigned long pfn = __phys_to_pfn(phys_addr);
349 
350  	/*
351  	 * Don't allow wraparound or zero size
352 	 */
353 	last_addr = phys_addr + size - 1;
354 	if (!size || last_addr < phys_addr)
355 		return NULL;
356 
357 	return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
358 			caller);
359 }
360 
361 /*
362  * Remap an arbitrary physical address space into the kernel virtual
363  * address space. Needed when the kernel wants to access high addresses
364  * directly.
365  *
366  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
367  * have to convert them into an offset in a page-aligned mapping, but the
368  * caller shouldn't need to know that small detail.
369  */
370 void __iomem *
371 __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
372 		  unsigned int mtype)
373 {
374 	return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
375 					__builtin_return_address(0));
376 }
377 EXPORT_SYMBOL(__arm_ioremap_pfn);
378 
379 void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
380 				      unsigned int, void *) =
381 	__arm_ioremap_caller;
382 
383 void __iomem *ioremap(resource_size_t res_cookie, size_t size)
384 {
385 	return arch_ioremap_caller(res_cookie, size, MT_DEVICE,
386 				   __builtin_return_address(0));
387 }
388 EXPORT_SYMBOL(ioremap);
389 
390 void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
391 {
392 	return arch_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
393 				   __builtin_return_address(0));
394 }
395 EXPORT_SYMBOL(ioremap_cache);
396 
397 void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
398 {
399 	return arch_ioremap_caller(res_cookie, size, MT_DEVICE_WC,
400 				   __builtin_return_address(0));
401 }
402 EXPORT_SYMBOL(ioremap_wc);
403 
404 /*
405  * Remap an arbitrary physical address space into the kernel virtual
406  * address space as memory. Needed when the kernel wants to execute
407  * code in external memory. This is needed for reprogramming source
408  * clocks that would affect normal memory for example. Please see
409  * CONFIG_GENERIC_ALLOCATOR for allocating external memory.
410  */
411 void __iomem *
412 __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
413 {
414 	unsigned int mtype;
415 
416 	if (cached)
417 		mtype = MT_MEMORY_RWX;
418 	else
419 		mtype = MT_MEMORY_RWX_NONCACHED;
420 
421 	return __arm_ioremap_caller(phys_addr, size, mtype,
422 			__builtin_return_address(0));
423 }
424 
425 void *arch_memremap_wb(phys_addr_t phys_addr, size_t size)
426 {
427 	return (__force void *)arch_ioremap_caller(phys_addr, size,
428 						   MT_MEMORY_RW,
429 						   __builtin_return_address(0));
430 }
431 
432 void __iounmap(volatile void __iomem *io_addr)
433 {
434 	void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
435 	struct static_vm *svm;
436 
437 	/* If this is a static mapping, we must leave it alone */
438 	svm = find_static_vm_vaddr(addr);
439 	if (svm)
440 		return;
441 
442 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
443 	{
444 		struct vm_struct *vm;
445 
446 		vm = find_vm_area(addr);
447 
448 		/*
449 		 * If this is a section based mapping we need to handle it
450 		 * specially as the VM subsystem does not know how to handle
451 		 * such a beast.
452 		 */
453 		if (vm && (vm->flags & VM_ARM_SECTION_MAPPING))
454 			unmap_area_sections((unsigned long)vm->addr, vm->size);
455 	}
456 #endif
457 
458 	vunmap(addr);
459 }
460 
461 void (*arch_iounmap)(volatile void __iomem *) = __iounmap;
462 
463 void iounmap(volatile void __iomem *cookie)
464 {
465 	arch_iounmap(cookie);
466 }
467 EXPORT_SYMBOL(iounmap);
468 
469 #ifdef CONFIG_PCI
470 static int pci_ioremap_mem_type = MT_DEVICE;
471 
472 void pci_ioremap_set_mem_type(int mem_type)
473 {
474 	pci_ioremap_mem_type = mem_type;
475 }
476 
477 int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
478 {
479 	BUG_ON(offset + SZ_64K - 1 > IO_SPACE_LIMIT);
480 
481 	return ioremap_page_range(PCI_IO_VIRT_BASE + offset,
482 				  PCI_IO_VIRT_BASE + offset + SZ_64K,
483 				  phys_addr,
484 				  __pgprot(get_mem_type(pci_ioremap_mem_type)->prot_pte));
485 }
486 EXPORT_SYMBOL_GPL(pci_ioremap_io);
487 
488 void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size)
489 {
490 	return arch_ioremap_caller(res_cookie, size, MT_UNCACHED,
491 				   __builtin_return_address(0));
492 }
493 EXPORT_SYMBOL_GPL(pci_remap_cfgspace);
494 #endif
495 
496 /*
497  * Must be called after early_fixmap_init
498  */
499 void __init early_ioremap_init(void)
500 {
501 	early_ioremap_setup();
502 }
503