xref: /openbmc/linux/arch/x86/mm/ioremap.c (revision 82ced6fd)
1 /*
2  * Re-map IO memory to kernel address space so that we can access it.
3  * This is needed for high PCI addresses that aren't mapped in the
4  * 640k-1MB IO memory area on PC's
5  *
6  * (C) Copyright 1995 1996 Linus Torvalds
7  */
8 
9 #include <linux/bootmem.h>
10 #include <linux/init.h>
11 #include <linux/io.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mmiotrace.h>
16 
17 #include <asm/cacheflush.h>
18 #include <asm/e820.h>
19 #include <asm/fixmap.h>
20 #include <asm/pgtable.h>
21 #include <asm/tlbflush.h>
22 #include <asm/pgalloc.h>
23 #include <asm/pat.h>
24 
25 static inline int phys_addr_valid(resource_size_t addr)
26 {
27 #ifdef CONFIG_PHYS_ADDR_T_64BIT
28 	return !(addr >> boot_cpu_data.x86_phys_bits);
29 #else
30 	return 1;
31 #endif
32 }
33 
34 #ifdef CONFIG_X86_64
35 
36 unsigned long __phys_addr(unsigned long x)
37 {
38 	if (x >= __START_KERNEL_map) {
39 		x -= __START_KERNEL_map;
40 		VIRTUAL_BUG_ON(x >= KERNEL_IMAGE_SIZE);
41 		x += phys_base;
42 	} else {
43 		VIRTUAL_BUG_ON(x < PAGE_OFFSET);
44 		x -= PAGE_OFFSET;
45 		VIRTUAL_BUG_ON(!phys_addr_valid(x));
46 	}
47 	return x;
48 }
49 EXPORT_SYMBOL(__phys_addr);
50 
51 bool __virt_addr_valid(unsigned long x)
52 {
53 	if (x >= __START_KERNEL_map) {
54 		x -= __START_KERNEL_map;
55 		if (x >= KERNEL_IMAGE_SIZE)
56 			return false;
57 		x += phys_base;
58 	} else {
59 		if (x < PAGE_OFFSET)
60 			return false;
61 		x -= PAGE_OFFSET;
62 		if (!phys_addr_valid(x))
63 			return false;
64 	}
65 
66 	return pfn_valid(x >> PAGE_SHIFT);
67 }
68 EXPORT_SYMBOL(__virt_addr_valid);
69 
70 #else
71 
72 #ifdef CONFIG_DEBUG_VIRTUAL
73 unsigned long __phys_addr(unsigned long x)
74 {
75 	/* VMALLOC_* aren't constants  */
76 	VIRTUAL_BUG_ON(x < PAGE_OFFSET);
77 	VIRTUAL_BUG_ON(__vmalloc_start_set && is_vmalloc_addr((void *) x));
78 	return x - PAGE_OFFSET;
79 }
80 EXPORT_SYMBOL(__phys_addr);
81 #endif
82 
83 bool __virt_addr_valid(unsigned long x)
84 {
85 	if (x < PAGE_OFFSET)
86 		return false;
87 	if (__vmalloc_start_set && is_vmalloc_addr((void *) x))
88 		return false;
89 	if (x >= FIXADDR_START)
90 		return false;
91 	return pfn_valid((x - PAGE_OFFSET) >> PAGE_SHIFT);
92 }
93 EXPORT_SYMBOL(__virt_addr_valid);
94 
95 #endif
96 
97 int page_is_ram(unsigned long pagenr)
98 {
99 	resource_size_t addr, end;
100 	int i;
101 
102 	/*
103 	 * A special case is the first 4Kb of memory;
104 	 * This is a BIOS owned area, not kernel ram, but generally
105 	 * not listed as such in the E820 table.
106 	 */
107 	if (pagenr == 0)
108 		return 0;
109 
110 	/*
111 	 * Second special case: Some BIOSen report the PC BIOS
112 	 * area (640->1Mb) as ram even though it is not.
113 	 */
114 	if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
115 		    pagenr < (BIOS_END >> PAGE_SHIFT))
116 		return 0;
117 
118 	for (i = 0; i < e820.nr_map; i++) {
119 		/*
120 		 * Not usable memory:
121 		 */
122 		if (e820.map[i].type != E820_RAM)
123 			continue;
124 		addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
125 		end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
126 
127 
128 		if ((pagenr >= addr) && (pagenr < end))
129 			return 1;
130 	}
131 	return 0;
132 }
133 
134 /*
135  * Fix up the linear direct mapping of the kernel to avoid cache attribute
136  * conflicts.
137  */
138 int ioremap_change_attr(unsigned long vaddr, unsigned long size,
139 			       unsigned long prot_val)
140 {
141 	unsigned long nrpages = size >> PAGE_SHIFT;
142 	int err;
143 
144 	switch (prot_val) {
145 	case _PAGE_CACHE_UC:
146 	default:
147 		err = _set_memory_uc(vaddr, nrpages);
148 		break;
149 	case _PAGE_CACHE_WC:
150 		err = _set_memory_wc(vaddr, nrpages);
151 		break;
152 	case _PAGE_CACHE_WB:
153 		err = _set_memory_wb(vaddr, nrpages);
154 		break;
155 	}
156 
157 	return err;
158 }
159 
160 /*
161  * Remap an arbitrary physical address space into the kernel virtual
162  * address space. Needed when the kernel wants to access high addresses
163  * directly.
164  *
165  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
166  * have to convert them into an offset in a page-aligned mapping, but the
167  * caller shouldn't need to know that small detail.
168  */
169 static void __iomem *__ioremap_caller(resource_size_t phys_addr,
170 		unsigned long size, unsigned long prot_val, void *caller)
171 {
172 	unsigned long pfn, offset, vaddr;
173 	resource_size_t last_addr;
174 	const resource_size_t unaligned_phys_addr = phys_addr;
175 	const unsigned long unaligned_size = size;
176 	struct vm_struct *area;
177 	unsigned long new_prot_val;
178 	pgprot_t prot;
179 	int retval;
180 	void __iomem *ret_addr;
181 
182 	/* Don't allow wraparound or zero size */
183 	last_addr = phys_addr + size - 1;
184 	if (!size || last_addr < phys_addr)
185 		return NULL;
186 
187 	if (!phys_addr_valid(phys_addr)) {
188 		printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
189 		       (unsigned long long)phys_addr);
190 		WARN_ON_ONCE(1);
191 		return NULL;
192 	}
193 
194 	/*
195 	 * Don't remap the low PCI/ISA area, it's always mapped..
196 	 */
197 	if (is_ISA_range(phys_addr, last_addr))
198 		return (__force void __iomem *)phys_to_virt(phys_addr);
199 
200 	/*
201 	 * Check if the request spans more than any BAR in the iomem resource
202 	 * tree.
203 	 */
204 	WARN_ONCE(iomem_map_sanity_check(phys_addr, size),
205 		  KERN_INFO "Info: mapping multiple BARs. Your kernel is fine.");
206 
207 	/*
208 	 * Don't allow anybody to remap normal RAM that we're using..
209 	 */
210 	for (pfn = phys_addr >> PAGE_SHIFT;
211 				(pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
212 				pfn++) {
213 
214 		int is_ram = page_is_ram(pfn);
215 
216 		if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
217 			return NULL;
218 		WARN_ON_ONCE(is_ram);
219 	}
220 
221 	/*
222 	 * Mappings have to be page-aligned
223 	 */
224 	offset = phys_addr & ~PAGE_MASK;
225 	phys_addr &= PAGE_MASK;
226 	size = PAGE_ALIGN(last_addr+1) - phys_addr;
227 
228 	retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
229 						prot_val, &new_prot_val);
230 	if (retval) {
231 		pr_debug("Warning: reserve_memtype returned %d\n", retval);
232 		return NULL;
233 	}
234 
235 	if (prot_val != new_prot_val) {
236 		/*
237 		 * Do not fallback to certain memory types with certain
238 		 * requested type:
239 		 * - request is uc-, return cannot be write-back
240 		 * - request is uc-, return cannot be write-combine
241 		 * - request is write-combine, return cannot be write-back
242 		 */
243 		if ((prot_val == _PAGE_CACHE_UC_MINUS &&
244 		     (new_prot_val == _PAGE_CACHE_WB ||
245 		      new_prot_val == _PAGE_CACHE_WC)) ||
246 		    (prot_val == _PAGE_CACHE_WC &&
247 		     new_prot_val == _PAGE_CACHE_WB)) {
248 			pr_debug(
249 		"ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
250 				(unsigned long long)phys_addr,
251 				(unsigned long long)(phys_addr + size),
252 				prot_val, new_prot_val);
253 			free_memtype(phys_addr, phys_addr + size);
254 			return NULL;
255 		}
256 		prot_val = new_prot_val;
257 	}
258 
259 	switch (prot_val) {
260 	case _PAGE_CACHE_UC:
261 	default:
262 		prot = PAGE_KERNEL_IO_NOCACHE;
263 		break;
264 	case _PAGE_CACHE_UC_MINUS:
265 		prot = PAGE_KERNEL_IO_UC_MINUS;
266 		break;
267 	case _PAGE_CACHE_WC:
268 		prot = PAGE_KERNEL_IO_WC;
269 		break;
270 	case _PAGE_CACHE_WB:
271 		prot = PAGE_KERNEL_IO;
272 		break;
273 	}
274 
275 	/*
276 	 * Ok, go for it..
277 	 */
278 	area = get_vm_area_caller(size, VM_IOREMAP, caller);
279 	if (!area)
280 		return NULL;
281 	area->phys_addr = phys_addr;
282 	vaddr = (unsigned long) area->addr;
283 
284 	if (kernel_map_sync_memtype(phys_addr, size, prot_val)) {
285 		free_memtype(phys_addr, phys_addr + size);
286 		free_vm_area(area);
287 		return NULL;
288 	}
289 
290 	if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
291 		free_memtype(phys_addr, phys_addr + size);
292 		free_vm_area(area);
293 		return NULL;
294 	}
295 
296 	ret_addr = (void __iomem *) (vaddr + offset);
297 	mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
298 
299 	return ret_addr;
300 }
301 
302 /**
303  * ioremap_nocache     -   map bus memory into CPU space
304  * @offset:    bus address of the memory
305  * @size:      size of the resource to map
306  *
307  * ioremap_nocache performs a platform specific sequence of operations to
308  * make bus memory CPU accessible via the readb/readw/readl/writeb/
309  * writew/writel functions and the other mmio helpers. The returned
310  * address is not guaranteed to be usable directly as a virtual
311  * address.
312  *
313  * This version of ioremap ensures that the memory is marked uncachable
314  * on the CPU as well as honouring existing caching rules from things like
315  * the PCI bus. Note that there are other caches and buffers on many
316  * busses. In particular driver authors should read up on PCI writes
317  *
318  * It's useful if some control registers are in such an area and
319  * write combining or read caching is not desirable:
320  *
321  * Must be freed with iounmap.
322  */
323 void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
324 {
325 	/*
326 	 * Ideally, this should be:
327 	 *	pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
328 	 *
329 	 * Till we fix all X drivers to use ioremap_wc(), we will use
330 	 * UC MINUS.
331 	 */
332 	unsigned long val = _PAGE_CACHE_UC_MINUS;
333 
334 	return __ioremap_caller(phys_addr, size, val,
335 				__builtin_return_address(0));
336 }
337 EXPORT_SYMBOL(ioremap_nocache);
338 
339 /**
340  * ioremap_wc	-	map memory into CPU space write combined
341  * @offset:	bus address of the memory
342  * @size:	size of the resource to map
343  *
344  * This version of ioremap ensures that the memory is marked write combining.
345  * Write combining allows faster writes to some hardware devices.
346  *
347  * Must be freed with iounmap.
348  */
349 void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
350 {
351 	if (pat_enabled)
352 		return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
353 					__builtin_return_address(0));
354 	else
355 		return ioremap_nocache(phys_addr, size);
356 }
357 EXPORT_SYMBOL(ioremap_wc);
358 
359 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
360 {
361 	return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
362 				__builtin_return_address(0));
363 }
364 EXPORT_SYMBOL(ioremap_cache);
365 
366 static void __iomem *ioremap_default(resource_size_t phys_addr,
367 					unsigned long size)
368 {
369 	unsigned long flags;
370 	void __iomem *ret;
371 	int err;
372 
373 	/*
374 	 * - WB for WB-able memory and no other conflicting mappings
375 	 * - UC_MINUS for non-WB-able memory with no other conflicting mappings
376 	 * - Inherit from confliting mappings otherwise
377 	 */
378 	err = reserve_memtype(phys_addr, phys_addr + size,
379 				_PAGE_CACHE_WB, &flags);
380 	if (err < 0)
381 		return NULL;
382 
383 	ret = __ioremap_caller(phys_addr, size, flags,
384 			       __builtin_return_address(0));
385 
386 	free_memtype(phys_addr, phys_addr + size);
387 	return ret;
388 }
389 
390 void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
391 				unsigned long prot_val)
392 {
393 	return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
394 				__builtin_return_address(0));
395 }
396 EXPORT_SYMBOL(ioremap_prot);
397 
398 /**
399  * iounmap - Free a IO remapping
400  * @addr: virtual address from ioremap_*
401  *
402  * Caller must ensure there is only one unmapping for the same pointer.
403  */
404 void iounmap(volatile void __iomem *addr)
405 {
406 	struct vm_struct *p, *o;
407 
408 	if ((void __force *)addr <= high_memory)
409 		return;
410 
411 	/*
412 	 * __ioremap special-cases the PCI/ISA range by not instantiating a
413 	 * vm_area and by simply returning an address into the kernel mapping
414 	 * of ISA space.   So handle that here.
415 	 */
416 	if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
417 	    (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
418 		return;
419 
420 	addr = (volatile void __iomem *)
421 		(PAGE_MASK & (unsigned long __force)addr);
422 
423 	mmiotrace_iounmap(addr);
424 
425 	/* Use the vm area unlocked, assuming the caller
426 	   ensures there isn't another iounmap for the same address
427 	   in parallel. Reuse of the virtual address is prevented by
428 	   leaving it in the global lists until we're done with it.
429 	   cpa takes care of the direct mappings. */
430 	read_lock(&vmlist_lock);
431 	for (p = vmlist; p; p = p->next) {
432 		if (p->addr == (void __force *)addr)
433 			break;
434 	}
435 	read_unlock(&vmlist_lock);
436 
437 	if (!p) {
438 		printk(KERN_ERR "iounmap: bad address %p\n", addr);
439 		dump_stack();
440 		return;
441 	}
442 
443 	free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
444 
445 	/* Finally remove it */
446 	o = remove_vm_area((void __force *)addr);
447 	BUG_ON(p != o || o == NULL);
448 	kfree(p);
449 }
450 EXPORT_SYMBOL(iounmap);
451 
452 /*
453  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
454  * access
455  */
456 void *xlate_dev_mem_ptr(unsigned long phys)
457 {
458 	void *addr;
459 	unsigned long start = phys & PAGE_MASK;
460 
461 	/* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
462 	if (page_is_ram(start >> PAGE_SHIFT))
463 		return __va(phys);
464 
465 	addr = (void __force *)ioremap_default(start, PAGE_SIZE);
466 	if (addr)
467 		addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
468 
469 	return addr;
470 }
471 
472 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
473 {
474 	if (page_is_ram(phys >> PAGE_SHIFT))
475 		return;
476 
477 	iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
478 	return;
479 }
480 
481 static int __initdata early_ioremap_debug;
482 
483 static int __init early_ioremap_debug_setup(char *str)
484 {
485 	early_ioremap_debug = 1;
486 
487 	return 0;
488 }
489 early_param("early_ioremap_debug", early_ioremap_debug_setup);
490 
491 static __initdata int after_paging_init;
492 static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
493 
494 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
495 {
496 	/* Don't assume we're using swapper_pg_dir at this point */
497 	pgd_t *base = __va(read_cr3());
498 	pgd_t *pgd = &base[pgd_index(addr)];
499 	pud_t *pud = pud_offset(pgd, addr);
500 	pmd_t *pmd = pmd_offset(pud, addr);
501 
502 	return pmd;
503 }
504 
505 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
506 {
507 	return &bm_pte[pte_index(addr)];
508 }
509 
510 static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata;
511 
512 void __init early_ioremap_init(void)
513 {
514 	pmd_t *pmd;
515 	int i;
516 
517 	if (early_ioremap_debug)
518 		printk(KERN_INFO "early_ioremap_init()\n");
519 
520 	for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
521 		slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
522 
523 	pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
524 	memset(bm_pte, 0, sizeof(bm_pte));
525 	pmd_populate_kernel(&init_mm, pmd, bm_pte);
526 
527 	/*
528 	 * The boot-ioremap range spans multiple pmds, for which
529 	 * we are not prepared:
530 	 */
531 	if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
532 		WARN_ON(1);
533 		printk(KERN_WARNING "pmd %p != %p\n",
534 		       pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
535 		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
536 			fix_to_virt(FIX_BTMAP_BEGIN));
537 		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n",
538 			fix_to_virt(FIX_BTMAP_END));
539 
540 		printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
541 		printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n",
542 		       FIX_BTMAP_BEGIN);
543 	}
544 }
545 
546 void __init early_ioremap_reset(void)
547 {
548 	after_paging_init = 1;
549 }
550 
551 static void __init __early_set_fixmap(enum fixed_addresses idx,
552 				      phys_addr_t phys, pgprot_t flags)
553 {
554 	unsigned long addr = __fix_to_virt(idx);
555 	pte_t *pte;
556 
557 	if (idx >= __end_of_fixed_addresses) {
558 		BUG();
559 		return;
560 	}
561 	pte = early_ioremap_pte(addr);
562 
563 	if (pgprot_val(flags))
564 		set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
565 	else
566 		pte_clear(&init_mm, addr, pte);
567 	__flush_tlb_one(addr);
568 }
569 
570 static inline void __init early_set_fixmap(enum fixed_addresses idx,
571 					   phys_addr_t phys, pgprot_t prot)
572 {
573 	if (after_paging_init)
574 		__set_fixmap(idx, phys, prot);
575 	else
576 		__early_set_fixmap(idx, phys, prot);
577 }
578 
579 static inline void __init early_clear_fixmap(enum fixed_addresses idx)
580 {
581 	if (after_paging_init)
582 		clear_fixmap(idx);
583 	else
584 		__early_set_fixmap(idx, 0, __pgprot(0));
585 }
586 
587 static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
588 static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
589 
590 static int __init check_early_ioremap_leak(void)
591 {
592 	int count = 0;
593 	int i;
594 
595 	for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
596 		if (prev_map[i])
597 			count++;
598 
599 	if (!count)
600 		return 0;
601 	WARN(1, KERN_WARNING
602 	       "Debug warning: early ioremap leak of %d areas detected.\n",
603 		count);
604 	printk(KERN_WARNING
605 		"please boot with early_ioremap_debug and report the dmesg.\n");
606 
607 	return 1;
608 }
609 late_initcall(check_early_ioremap_leak);
610 
611 static void __init __iomem *
612 __early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
613 {
614 	unsigned long offset;
615 	resource_size_t last_addr;
616 	unsigned int nrpages;
617 	enum fixed_addresses idx0, idx;
618 	int i, slot;
619 
620 	WARN_ON(system_state != SYSTEM_BOOTING);
621 
622 	slot = -1;
623 	for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
624 		if (!prev_map[i]) {
625 			slot = i;
626 			break;
627 		}
628 	}
629 
630 	if (slot < 0) {
631 		printk(KERN_INFO "early_iomap(%08llx, %08lx) not found slot\n",
632 			 (u64)phys_addr, size);
633 		WARN_ON(1);
634 		return NULL;
635 	}
636 
637 	if (early_ioremap_debug) {
638 		printk(KERN_INFO "early_ioremap(%08llx, %08lx) [%d] => ",
639 		       (u64)phys_addr, size, slot);
640 		dump_stack();
641 	}
642 
643 	/* Don't allow wraparound or zero size */
644 	last_addr = phys_addr + size - 1;
645 	if (!size || last_addr < phys_addr) {
646 		WARN_ON(1);
647 		return NULL;
648 	}
649 
650 	prev_size[slot] = size;
651 	/*
652 	 * Mappings have to be page-aligned
653 	 */
654 	offset = phys_addr & ~PAGE_MASK;
655 	phys_addr &= PAGE_MASK;
656 	size = PAGE_ALIGN(last_addr + 1) - phys_addr;
657 
658 	/*
659 	 * Mappings have to fit in the FIX_BTMAP area.
660 	 */
661 	nrpages = size >> PAGE_SHIFT;
662 	if (nrpages > NR_FIX_BTMAPS) {
663 		WARN_ON(1);
664 		return NULL;
665 	}
666 
667 	/*
668 	 * Ok, go for it..
669 	 */
670 	idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
671 	idx = idx0;
672 	while (nrpages > 0) {
673 		early_set_fixmap(idx, phys_addr, prot);
674 		phys_addr += PAGE_SIZE;
675 		--idx;
676 		--nrpages;
677 	}
678 	if (early_ioremap_debug)
679 		printk(KERN_CONT "%08lx + %08lx\n", offset, slot_virt[slot]);
680 
681 	prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]);
682 	return prev_map[slot];
683 }
684 
685 /* Remap an IO device */
686 void __init __iomem *
687 early_ioremap(resource_size_t phys_addr, unsigned long size)
688 {
689 	return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO);
690 }
691 
692 /* Remap memory */
693 void __init __iomem *
694 early_memremap(resource_size_t phys_addr, unsigned long size)
695 {
696 	return __early_ioremap(phys_addr, size, PAGE_KERNEL);
697 }
698 
699 void __init early_iounmap(void __iomem *addr, unsigned long size)
700 {
701 	unsigned long virt_addr;
702 	unsigned long offset;
703 	unsigned int nrpages;
704 	enum fixed_addresses idx;
705 	int i, slot;
706 
707 	slot = -1;
708 	for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
709 		if (prev_map[i] == addr) {
710 			slot = i;
711 			break;
712 		}
713 	}
714 
715 	if (slot < 0) {
716 		printk(KERN_INFO "early_iounmap(%p, %08lx) not found slot\n",
717 			 addr, size);
718 		WARN_ON(1);
719 		return;
720 	}
721 
722 	if (prev_size[slot] != size) {
723 		printk(KERN_INFO "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n",
724 			 addr, size, slot, prev_size[slot]);
725 		WARN_ON(1);
726 		return;
727 	}
728 
729 	if (early_ioremap_debug) {
730 		printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
731 		       size, slot);
732 		dump_stack();
733 	}
734 
735 	virt_addr = (unsigned long)addr;
736 	if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
737 		WARN_ON(1);
738 		return;
739 	}
740 	offset = virt_addr & ~PAGE_MASK;
741 	nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
742 
743 	idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
744 	while (nrpages > 0) {
745 		early_clear_fixmap(idx);
746 		--idx;
747 		--nrpages;
748 	}
749 	prev_map[slot] = NULL;
750 }
751