xref: /openbmc/linux/arch/arm/mm/ioremap.c (revision ceaccbd2)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  *  linux/arch/arm/mm/ioremap.c
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Re-map IO memory to kernel address space so that we can access it.
51da177e4SLinus Torvalds  *
61da177e4SLinus Torvalds  * (C) Copyright 1995 1996 Linus Torvalds
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * Hacked for ARM by Phil Blundell <philb@gnu.org>
91da177e4SLinus Torvalds  * Hacked to allow all architectures to build, and various cleanups
101da177e4SLinus Torvalds  * by Russell King
111da177e4SLinus Torvalds  *
121da177e4SLinus Torvalds  * This allows a driver to remap an arbitrary region of bus memory into
131da177e4SLinus Torvalds  * virtual space.  One should *only* use readl, writel, memcpy_toio and
141da177e4SLinus Torvalds  * so on with such remapped areas.
151da177e4SLinus Torvalds  *
161da177e4SLinus Torvalds  * Because the ARM only has a 32-bit address space we can't address the
171da177e4SLinus Torvalds  * whole of the (physical) PCI space at once.  PCI huge-mode addressing
181da177e4SLinus Torvalds  * allows us to circumvent this restriction by splitting PCI space into
191da177e4SLinus Torvalds  * two 2GB chunks and mapping only one at a time into processor memory.
201da177e4SLinus Torvalds  * We use MMU protection domains to trap any attempt to access the bank
211da177e4SLinus Torvalds  * that is not currently mapped.  (This isn't fully implemented yet.)
221da177e4SLinus Torvalds  */
231da177e4SLinus Torvalds #include <linux/module.h>
241da177e4SLinus Torvalds #include <linux/errno.h>
251da177e4SLinus Torvalds #include <linux/mm.h>
261da177e4SLinus Torvalds #include <linux/vmalloc.h>
271da177e4SLinus Torvalds 
281da177e4SLinus Torvalds #include <asm/cacheflush.h>
291da177e4SLinus Torvalds #include <asm/io.h>
30ff0daca5SRussell King #include <asm/mmu_context.h>
31ff0daca5SRussell King #include <asm/pgalloc.h>
321da177e4SLinus Torvalds #include <asm/tlbflush.h>
33ff0daca5SRussell King #include <asm/sizes.h>
34ff0daca5SRussell King 
35ff0daca5SRussell King /*
36a069c896SLennert Buytenhek  * Used by ioremap() and iounmap() code to mark (super)section-mapped
37a069c896SLennert Buytenhek  * I/O regions in vm_struct->flags field.
38ff0daca5SRussell King  */
39ff0daca5SRussell King #define VM_ARM_SECTION_MAPPING	0x80000000
401da177e4SLinus Torvalds 
411da177e4SLinus Torvalds static inline void
421da177e4SLinus Torvalds remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
431da177e4SLinus Torvalds 	       unsigned long phys_addr, pgprot_t pgprot)
441da177e4SLinus Torvalds {
451da177e4SLinus Torvalds 	unsigned long end;
461da177e4SLinus Torvalds 
471da177e4SLinus Torvalds 	address &= ~PMD_MASK;
481da177e4SLinus Torvalds 	end = address + size;
491da177e4SLinus Torvalds 	if (end > PMD_SIZE)
501da177e4SLinus Torvalds 		end = PMD_SIZE;
511da177e4SLinus Torvalds 	BUG_ON(address >= end);
521da177e4SLinus Torvalds 	do {
531da177e4SLinus Torvalds 		if (!pte_none(*pte))
541da177e4SLinus Torvalds 			goto bad;
551da177e4SLinus Torvalds 
561da177e4SLinus Torvalds 		set_pte(pte, pfn_pte(phys_addr >> PAGE_SHIFT, pgprot));
571da177e4SLinus Torvalds 		address += PAGE_SIZE;
581da177e4SLinus Torvalds 		phys_addr += PAGE_SIZE;
591da177e4SLinus Torvalds 		pte++;
601da177e4SLinus Torvalds 	} while (address && (address < end));
611da177e4SLinus Torvalds 	return;
621da177e4SLinus Torvalds 
631da177e4SLinus Torvalds  bad:
641da177e4SLinus Torvalds 	printk("remap_area_pte: page already exists\n");
651da177e4SLinus Torvalds 	BUG();
661da177e4SLinus Torvalds }
671da177e4SLinus Torvalds 
681da177e4SLinus Torvalds static inline int
691da177e4SLinus Torvalds remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
701da177e4SLinus Torvalds 	       unsigned long phys_addr, unsigned long flags)
711da177e4SLinus Torvalds {
721da177e4SLinus Torvalds 	unsigned long end;
731da177e4SLinus Torvalds 	pgprot_t pgprot;
741da177e4SLinus Torvalds 
751da177e4SLinus Torvalds 	address &= ~PGDIR_MASK;
761da177e4SLinus Torvalds 	end = address + size;
771da177e4SLinus Torvalds 
781da177e4SLinus Torvalds 	if (end > PGDIR_SIZE)
791da177e4SLinus Torvalds 		end = PGDIR_SIZE;
801da177e4SLinus Torvalds 
811da177e4SLinus Torvalds 	phys_addr -= address;
821da177e4SLinus Torvalds 	BUG_ON(address >= end);
831da177e4SLinus Torvalds 
841da177e4SLinus Torvalds 	pgprot = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_WRITE | flags);
851da177e4SLinus Torvalds 	do {
86872fec16SHugh Dickins 		pte_t * pte = pte_alloc_kernel(pmd, address);
871da177e4SLinus Torvalds 		if (!pte)
881da177e4SLinus Torvalds 			return -ENOMEM;
891da177e4SLinus Torvalds 		remap_area_pte(pte, address, end - address, address + phys_addr, pgprot);
901da177e4SLinus Torvalds 		address = (address + PMD_SIZE) & PMD_MASK;
911da177e4SLinus Torvalds 		pmd++;
921da177e4SLinus Torvalds 	} while (address && (address < end));
931da177e4SLinus Torvalds 	return 0;
941da177e4SLinus Torvalds }
951da177e4SLinus Torvalds 
961da177e4SLinus Torvalds static int
979d4ae727SDeepak Saxena remap_area_pages(unsigned long start, unsigned long pfn,
981da177e4SLinus Torvalds 		 unsigned long size, unsigned long flags)
991da177e4SLinus Torvalds {
1001da177e4SLinus Torvalds 	unsigned long address = start;
1011da177e4SLinus Torvalds 	unsigned long end = start + size;
1029d4ae727SDeepak Saxena 	unsigned long phys_addr = __pfn_to_phys(pfn);
1031da177e4SLinus Torvalds 	int err = 0;
1041da177e4SLinus Torvalds 	pgd_t * dir;
1051da177e4SLinus Torvalds 
1061da177e4SLinus Torvalds 	phys_addr -= address;
1071da177e4SLinus Torvalds 	dir = pgd_offset(&init_mm, address);
1081da177e4SLinus Torvalds 	BUG_ON(address >= end);
1091da177e4SLinus Torvalds 	do {
1101da177e4SLinus Torvalds 		pmd_t *pmd = pmd_alloc(&init_mm, dir, address);
1111da177e4SLinus Torvalds 		if (!pmd) {
1121da177e4SLinus Torvalds 			err = -ENOMEM;
1131da177e4SLinus Torvalds 			break;
1141da177e4SLinus Torvalds 		}
1151da177e4SLinus Torvalds 		if (remap_area_pmd(pmd, address, end - address,
1161da177e4SLinus Torvalds 					 phys_addr + address, flags)) {
1171da177e4SLinus Torvalds 			err = -ENOMEM;
1181da177e4SLinus Torvalds 			break;
1191da177e4SLinus Torvalds 		}
1201da177e4SLinus Torvalds 
1211da177e4SLinus Torvalds 		address = (address + PGDIR_SIZE) & PGDIR_MASK;
1221da177e4SLinus Torvalds 		dir++;
1231da177e4SLinus Torvalds 	} while (address && (address < end));
1241da177e4SLinus Torvalds 
1251da177e4SLinus Torvalds 	return err;
1261da177e4SLinus Torvalds }
1271da177e4SLinus Torvalds 
128ff0daca5SRussell King 
129ff0daca5SRussell King void __check_kvm_seq(struct mm_struct *mm)
130ff0daca5SRussell King {
131ff0daca5SRussell King 	unsigned int seq;
132ff0daca5SRussell King 
133ff0daca5SRussell King 	do {
134ff0daca5SRussell King 		seq = init_mm.context.kvm_seq;
135ff0daca5SRussell King 		memcpy(pgd_offset(mm, VMALLOC_START),
136ff0daca5SRussell King 		       pgd_offset_k(VMALLOC_START),
137ff0daca5SRussell King 		       sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
138ff0daca5SRussell King 					pgd_index(VMALLOC_START)));
139ff0daca5SRussell King 		mm->context.kvm_seq = seq;
140ff0daca5SRussell King 	} while (seq != init_mm.context.kvm_seq);
141ff0daca5SRussell King }
142ff0daca5SRussell King 
143ff0daca5SRussell King #ifndef CONFIG_SMP
144ff0daca5SRussell King /*
145ff0daca5SRussell King  * Section support is unsafe on SMP - If you iounmap and ioremap a region,
146ff0daca5SRussell King  * the other CPUs will not see this change until their next context switch.
147ff0daca5SRussell King  * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
148ff0daca5SRussell King  * which requires the new ioremap'd region to be referenced, the CPU will
149ff0daca5SRussell King  * reference the _old_ region.
150ff0daca5SRussell King  *
151ff0daca5SRussell King  * Note that get_vm_area() allocates a guard 4K page, so we need to mask
152ff0daca5SRussell King  * the size back to 1MB aligned or we will overflow in the loop below.
153ff0daca5SRussell King  */
154ff0daca5SRussell King static void unmap_area_sections(unsigned long virt, unsigned long size)
155ff0daca5SRussell King {
156ff0daca5SRussell King 	unsigned long addr = virt, end = virt + (size & ~SZ_1M);
157ff0daca5SRussell King 	pgd_t *pgd;
158ff0daca5SRussell King 
159ff0daca5SRussell King 	flush_cache_vunmap(addr, end);
160ff0daca5SRussell King 	pgd = pgd_offset_k(addr);
161ff0daca5SRussell King 	do {
162ff0daca5SRussell King 		pmd_t pmd, *pmdp = pmd_offset(pgd, addr);
163ff0daca5SRussell King 
164ff0daca5SRussell King 		pmd = *pmdp;
165ff0daca5SRussell King 		if (!pmd_none(pmd)) {
166ff0daca5SRussell King 			/*
167ff0daca5SRussell King 			 * Clear the PMD from the page table, and
168ff0daca5SRussell King 			 * increment the kvm sequence so others
169ff0daca5SRussell King 			 * notice this change.
170ff0daca5SRussell King 			 *
171ff0daca5SRussell King 			 * Note: this is still racy on SMP machines.
172ff0daca5SRussell King 			 */
173ff0daca5SRussell King 			pmd_clear(pmdp);
174ff0daca5SRussell King 			init_mm.context.kvm_seq++;
175ff0daca5SRussell King 
176ff0daca5SRussell King 			/*
177ff0daca5SRussell King 			 * Free the page table, if there was one.
178ff0daca5SRussell King 			 */
179ff0daca5SRussell King 			if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
180ff0daca5SRussell King 				pte_free_kernel(pmd_page_kernel(pmd));
181ff0daca5SRussell King 		}
182ff0daca5SRussell King 
183ff0daca5SRussell King 		addr += PGDIR_SIZE;
184ff0daca5SRussell King 		pgd++;
185ff0daca5SRussell King 	} while (addr < end);
186ff0daca5SRussell King 
187ff0daca5SRussell King 	/*
188ff0daca5SRussell King 	 * Ensure that the active_mm is up to date - we want to
189ff0daca5SRussell King 	 * catch any use-after-iounmap cases.
190ff0daca5SRussell King 	 */
191ff0daca5SRussell King 	if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq)
192ff0daca5SRussell King 		__check_kvm_seq(current->active_mm);
193ff0daca5SRussell King 
194ff0daca5SRussell King 	flush_tlb_kernel_range(virt, end);
195ff0daca5SRussell King }
196ff0daca5SRussell King 
197ff0daca5SRussell King static int
198ff0daca5SRussell King remap_area_sections(unsigned long virt, unsigned long pfn,
199ff0daca5SRussell King 		    unsigned long size, unsigned long flags)
200ff0daca5SRussell King {
201ff0daca5SRussell King 	unsigned long prot, addr = virt, end = virt + size;
202ff0daca5SRussell King 	pgd_t *pgd;
203ff0daca5SRussell King 
204ff0daca5SRussell King 	/*
205ff0daca5SRussell King 	 * Remove and free any PTE-based mapping, and
206ff0daca5SRussell King 	 * sync the current kernel mapping.
207ff0daca5SRussell King 	 */
208ff0daca5SRussell King 	unmap_area_sections(virt, size);
209ff0daca5SRussell King 
210ff0daca5SRussell King 	prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_DOMAIN(DOMAIN_IO) |
211ff0daca5SRussell King 	       (flags & (L_PTE_CACHEABLE | L_PTE_BUFFERABLE));
212ff0daca5SRussell King 
213ff0daca5SRussell King 	/*
214ff0daca5SRussell King 	 * ARMv6 and above need XN set to prevent speculative prefetches
215ff0daca5SRussell King 	 * hitting IO.
216ff0daca5SRussell King 	 */
217ff0daca5SRussell King 	if (cpu_architecture() >= CPU_ARCH_ARMv6)
218ff0daca5SRussell King 		prot |= PMD_SECT_XN;
219ff0daca5SRussell King 
220ff0daca5SRussell King 	pgd = pgd_offset_k(addr);
221ff0daca5SRussell King 	do {
222ff0daca5SRussell King 		pmd_t *pmd = pmd_offset(pgd, addr);
223ff0daca5SRussell King 
224ff0daca5SRussell King 		pmd[0] = __pmd(__pfn_to_phys(pfn) | prot);
225ff0daca5SRussell King 		pfn += SZ_1M >> PAGE_SHIFT;
226ff0daca5SRussell King 		pmd[1] = __pmd(__pfn_to_phys(pfn) | prot);
227ff0daca5SRussell King 		pfn += SZ_1M >> PAGE_SHIFT;
228ff0daca5SRussell King 		flush_pmd_entry(pmd);
229ff0daca5SRussell King 
230ff0daca5SRussell King 		addr += PGDIR_SIZE;
231ff0daca5SRussell King 		pgd++;
232ff0daca5SRussell King 	} while (addr < end);
233ff0daca5SRussell King 
234ff0daca5SRussell King 	return 0;
235ff0daca5SRussell King }
236a069c896SLennert Buytenhek 
237a069c896SLennert Buytenhek static int
238a069c896SLennert Buytenhek remap_area_supersections(unsigned long virt, unsigned long pfn,
239a069c896SLennert Buytenhek 			 unsigned long size, unsigned long flags)
240a069c896SLennert Buytenhek {
241a069c896SLennert Buytenhek 	unsigned long prot, addr = virt, end = virt + size;
242a069c896SLennert Buytenhek 	pgd_t *pgd;
243a069c896SLennert Buytenhek 
244a069c896SLennert Buytenhek 	/*
245a069c896SLennert Buytenhek 	 * Remove and free any PTE-based mapping, and
246a069c896SLennert Buytenhek 	 * sync the current kernel mapping.
247a069c896SLennert Buytenhek 	 */
248a069c896SLennert Buytenhek 	unmap_area_sections(virt, size);
249a069c896SLennert Buytenhek 
250a069c896SLennert Buytenhek 	prot = PMD_TYPE_SECT | PMD_SECT_SUPER | PMD_SECT_AP_WRITE |
251a069c896SLennert Buytenhek 			PMD_DOMAIN(DOMAIN_IO) |
252a069c896SLennert Buytenhek 			(flags & (L_PTE_CACHEABLE | L_PTE_BUFFERABLE));
253a069c896SLennert Buytenhek 
254a069c896SLennert Buytenhek 	/*
255a069c896SLennert Buytenhek 	 * ARMv6 and above need XN set to prevent speculative prefetches
256a069c896SLennert Buytenhek 	 * hitting IO.
257a069c896SLennert Buytenhek 	 */
258a069c896SLennert Buytenhek 	if (cpu_architecture() >= CPU_ARCH_ARMv6)
259a069c896SLennert Buytenhek 		prot |= PMD_SECT_XN;
260a069c896SLennert Buytenhek 
261a069c896SLennert Buytenhek 	pgd = pgd_offset_k(virt);
262a069c896SLennert Buytenhek 	do {
263a069c896SLennert Buytenhek 		unsigned long super_pmd_val, i;
264a069c896SLennert Buytenhek 
265a069c896SLennert Buytenhek 		super_pmd_val = __pfn_to_phys(pfn) | prot;
266a069c896SLennert Buytenhek 		super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
267a069c896SLennert Buytenhek 
268a069c896SLennert Buytenhek 		for (i = 0; i < 8; i++) {
269a069c896SLennert Buytenhek 			pmd_t *pmd = pmd_offset(pgd, addr);
270a069c896SLennert Buytenhek 
271a069c896SLennert Buytenhek 			pmd[0] = __pmd(super_pmd_val);
272a069c896SLennert Buytenhek 			pmd[1] = __pmd(super_pmd_val);
273a069c896SLennert Buytenhek 			flush_pmd_entry(pmd);
274a069c896SLennert Buytenhek 
275a069c896SLennert Buytenhek 			addr += PGDIR_SIZE;
276a069c896SLennert Buytenhek 			pgd++;
277a069c896SLennert Buytenhek 		}
278a069c896SLennert Buytenhek 
279a069c896SLennert Buytenhek 		pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
280a069c896SLennert Buytenhek 	} while (addr < end);
281a069c896SLennert Buytenhek 
282a069c896SLennert Buytenhek 	return 0;
283a069c896SLennert Buytenhek }
284ff0daca5SRussell King #endif
285ff0daca5SRussell King 
286ff0daca5SRussell King 
2871da177e4SLinus Torvalds /*
2881da177e4SLinus Torvalds  * Remap an arbitrary physical address space into the kernel virtual
2891da177e4SLinus Torvalds  * address space. Needed when the kernel wants to access high addresses
2901da177e4SLinus Torvalds  * directly.
2911da177e4SLinus Torvalds  *
2921da177e4SLinus Torvalds  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
2931da177e4SLinus Torvalds  * have to convert them into an offset in a page-aligned mapping, but the
2941da177e4SLinus Torvalds  * caller shouldn't need to know that small detail.
2951da177e4SLinus Torvalds  *
2961da177e4SLinus Torvalds  * 'flags' are the extra L_PTE_ flags that you want to specify for this
2971da177e4SLinus Torvalds  * mapping.  See include/asm-arm/proc-armv/pgtable.h for more information.
2981da177e4SLinus Torvalds  */
2991da177e4SLinus Torvalds void __iomem *
3009d4ae727SDeepak Saxena __ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
3019d4ae727SDeepak Saxena 	      unsigned long flags)
3029d4ae727SDeepak Saxena {
303ff0daca5SRussell King 	int err;
3049d4ae727SDeepak Saxena 	unsigned long addr;
3059d4ae727SDeepak Saxena  	struct vm_struct * area;
306a069c896SLennert Buytenhek 
307a069c896SLennert Buytenhek 	/*
308a069c896SLennert Buytenhek 	 * High mappings must be supersection aligned
309a069c896SLennert Buytenhek 	 */
310a069c896SLennert Buytenhek 	if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
311a069c896SLennert Buytenhek 		return NULL;
3129d4ae727SDeepak Saxena 
3139d4ae727SDeepak Saxena  	area = get_vm_area(size, VM_IOREMAP);
3149d4ae727SDeepak Saxena  	if (!area)
3159d4ae727SDeepak Saxena  		return NULL;
3169d4ae727SDeepak Saxena  	addr = (unsigned long)area->addr;
317ff0daca5SRussell King 
318ff0daca5SRussell King #ifndef CONFIG_SMP
31967f3a588SRussell King 	if ((((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
320a069c896SLennert Buytenhek 	       cpu_is_xsc3()) &&
321a069c896SLennert Buytenhek 	       !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
322a069c896SLennert Buytenhek 		area->flags |= VM_ARM_SECTION_MAPPING;
323a069c896SLennert Buytenhek 		err = remap_area_supersections(addr, pfn, size, flags);
324a069c896SLennert Buytenhek 	} else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
325ff0daca5SRussell King 		area->flags |= VM_ARM_SECTION_MAPPING;
326ff0daca5SRussell King 		err = remap_area_sections(addr, pfn, size, flags);
327ff0daca5SRussell King 	} else
328ff0daca5SRussell King #endif
329ff0daca5SRussell King 		err = remap_area_pages(addr, pfn, size, flags);
330ff0daca5SRussell King 
331ff0daca5SRussell King 	if (err) {
332478922c2SCatalin Marinas  		vunmap((void *)addr);
3339d4ae727SDeepak Saxena  		return NULL;
3349d4ae727SDeepak Saxena  	}
335ff0daca5SRussell King 
336ff0daca5SRussell King 	flush_cache_vmap(addr, addr + size);
337ff0daca5SRussell King 	return (void __iomem *) (offset + addr);
3389d4ae727SDeepak Saxena }
3399d4ae727SDeepak Saxena EXPORT_SYMBOL(__ioremap_pfn);
3409d4ae727SDeepak Saxena 
3419d4ae727SDeepak Saxena void __iomem *
34267a1901fSRussell King __ioremap(unsigned long phys_addr, size_t size, unsigned long flags)
3431da177e4SLinus Torvalds {
3449d4ae727SDeepak Saxena 	unsigned long last_addr;
3459d4ae727SDeepak Saxena  	unsigned long offset = phys_addr & ~PAGE_MASK;
3469d4ae727SDeepak Saxena  	unsigned long pfn = __phys_to_pfn(phys_addr);
3471da177e4SLinus Torvalds 
3489d4ae727SDeepak Saxena  	/*
3499d4ae727SDeepak Saxena  	 * Don't allow wraparound or zero size
3509d4ae727SDeepak Saxena 	 */
3511da177e4SLinus Torvalds 	last_addr = phys_addr + size - 1;
3521da177e4SLinus Torvalds 	if (!size || last_addr < phys_addr)
3531da177e4SLinus Torvalds 		return NULL;
3541da177e4SLinus Torvalds 
3551da177e4SLinus Torvalds 	/*
3569d4ae727SDeepak Saxena  	 * Page align the mapping size
3571da177e4SLinus Torvalds 	 */
3581da177e4SLinus Torvalds 	size = PAGE_ALIGN(last_addr + 1) - phys_addr;
3591da177e4SLinus Torvalds 
3609d4ae727SDeepak Saxena  	return __ioremap_pfn(pfn, offset, size, flags);
3611da177e4SLinus Torvalds }
3621da177e4SLinus Torvalds EXPORT_SYMBOL(__ioremap);
3631da177e4SLinus Torvalds 
3641da177e4SLinus Torvalds void __iounmap(void __iomem *addr)
3651da177e4SLinus Torvalds {
366ceaccbd2SCatalin Marinas #ifndef CONFIG_SMP
367ff0daca5SRussell King 	struct vm_struct **p, *tmp;
368ceaccbd2SCatalin Marinas #endif
369ff0daca5SRussell King 	unsigned int section_mapping = 0;
370ff0daca5SRussell King 
371ff0daca5SRussell King 	addr = (void __iomem *)(PAGE_MASK & (unsigned long)addr);
372ff0daca5SRussell King 
3737cddc397SLennert Buytenhek #ifndef CONFIG_SMP
374ff0daca5SRussell King 	/*
375ff0daca5SRussell King 	 * If this is a section based mapping we need to handle it
376ff0daca5SRussell King 	 * specially as the VM subysystem does not know how to handle
377ff0daca5SRussell King 	 * such a beast. We need the lock here b/c we need to clear
378ff0daca5SRussell King 	 * all the mappings before the area can be reclaimed
379ff0daca5SRussell King 	 * by someone else.
380ff0daca5SRussell King 	 */
381ff0daca5SRussell King 	write_lock(&vmlist_lock);
382ff0daca5SRussell King 	for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
383ff0daca5SRussell King 		if((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) {
384ff0daca5SRussell King 			if (tmp->flags & VM_ARM_SECTION_MAPPING) {
385ff0daca5SRussell King 				*p = tmp->next;
386ff0daca5SRussell King 				unmap_area_sections((unsigned long)tmp->addr,
387ff0daca5SRussell King 						    tmp->size);
388ff0daca5SRussell King 				kfree(tmp);
389ff0daca5SRussell King 				section_mapping = 1;
390ff0daca5SRussell King 			}
391ff0daca5SRussell King 			break;
392ff0daca5SRussell King 		}
393ff0daca5SRussell King 	}
394ff0daca5SRussell King 	write_unlock(&vmlist_lock);
3957cddc397SLennert Buytenhek #endif
396ff0daca5SRussell King 
397ff0daca5SRussell King 	if (!section_mapping)
398ff0daca5SRussell King 		vunmap(addr);
3991da177e4SLinus Torvalds }
4001da177e4SLinus Torvalds EXPORT_SYMBOL(__iounmap);
401