xref: /openbmc/linux/arch/x86/mm/ioremap.c (revision ba748d22)
1e64c8aa0SThomas Gleixner /*
2e64c8aa0SThomas Gleixner  * Re-map IO memory to kernel address space so that we can access it.
3e64c8aa0SThomas Gleixner  * This is needed for high PCI addresses that aren't mapped in the
4e64c8aa0SThomas Gleixner  * 640k-1MB IO memory area on PC's
5e64c8aa0SThomas Gleixner  *
6e64c8aa0SThomas Gleixner  * (C) Copyright 1995 1996 Linus Torvalds
7e64c8aa0SThomas Gleixner  */
8e64c8aa0SThomas Gleixner 
9e64c8aa0SThomas Gleixner #include <linux/bootmem.h>
10e64c8aa0SThomas Gleixner #include <linux/init.h>
11e64c8aa0SThomas Gleixner #include <linux/io.h>
12e64c8aa0SThomas Gleixner #include <linux/module.h>
13e64c8aa0SThomas Gleixner #include <linux/slab.h>
14e64c8aa0SThomas Gleixner #include <linux/vmalloc.h>
15e64c8aa0SThomas Gleixner 
16e64c8aa0SThomas Gleixner #include <asm/cacheflush.h>
17e64c8aa0SThomas Gleixner #include <asm/e820.h>
18e64c8aa0SThomas Gleixner #include <asm/fixmap.h>
19e64c8aa0SThomas Gleixner #include <asm/pgtable.h>
20e64c8aa0SThomas Gleixner #include <asm/tlbflush.h>
21f6df72e7SJeremy Fitzhardinge #include <asm/pgalloc.h>
22e64c8aa0SThomas Gleixner 
23d806e5eeSThomas Gleixner enum ioremap_mode {
24d806e5eeSThomas Gleixner 	IOR_MODE_UNCACHED,
25d806e5eeSThomas Gleixner 	IOR_MODE_CACHED,
26d806e5eeSThomas Gleixner };
27d806e5eeSThomas Gleixner 
28e64c8aa0SThomas Gleixner #ifdef CONFIG_X86_64
29e64c8aa0SThomas Gleixner 
30e64c8aa0SThomas Gleixner unsigned long __phys_addr(unsigned long x)
31e64c8aa0SThomas Gleixner {
32e64c8aa0SThomas Gleixner 	if (x >= __START_KERNEL_map)
33e64c8aa0SThomas Gleixner 		return x - __START_KERNEL_map + phys_base;
34e64c8aa0SThomas Gleixner 	return x - PAGE_OFFSET;
35e64c8aa0SThomas Gleixner }
36e64c8aa0SThomas Gleixner EXPORT_SYMBOL(__phys_addr);
37e64c8aa0SThomas Gleixner 
38e3100c82SThomas Gleixner static inline int phys_addr_valid(unsigned long addr)
39e3100c82SThomas Gleixner {
40e3100c82SThomas Gleixner 	return addr < (1UL << boot_cpu_data.x86_phys_bits);
41e3100c82SThomas Gleixner }
42e3100c82SThomas Gleixner 
43e3100c82SThomas Gleixner #else
44e3100c82SThomas Gleixner 
45e3100c82SThomas Gleixner static inline int phys_addr_valid(unsigned long addr)
46e3100c82SThomas Gleixner {
47e3100c82SThomas Gleixner 	return 1;
48e3100c82SThomas Gleixner }
49e3100c82SThomas Gleixner 
50e64c8aa0SThomas Gleixner #endif
51e64c8aa0SThomas Gleixner 
525f5192b9SThomas Gleixner int page_is_ram(unsigned long pagenr)
535f5192b9SThomas Gleixner {
545f5192b9SThomas Gleixner 	unsigned long addr, end;
555f5192b9SThomas Gleixner 	int i;
565f5192b9SThomas Gleixner 
57d8a9e6a5SArjan van de Ven 	/*
58d8a9e6a5SArjan van de Ven 	 * A special case is the first 4Kb of memory;
59d8a9e6a5SArjan van de Ven 	 * This is a BIOS owned area, not kernel ram, but generally
60d8a9e6a5SArjan van de Ven 	 * not listed as such in the E820 table.
61d8a9e6a5SArjan van de Ven 	 */
62d8a9e6a5SArjan van de Ven 	if (pagenr == 0)
63d8a9e6a5SArjan van de Ven 		return 0;
64d8a9e6a5SArjan van de Ven 
65156fbc3fSArjan van de Ven 	/*
66156fbc3fSArjan van de Ven 	 * Second special case: Some BIOSen report the PC BIOS
67156fbc3fSArjan van de Ven 	 * area (640->1Mb) as ram even though it is not.
68156fbc3fSArjan van de Ven 	 */
69156fbc3fSArjan van de Ven 	if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
70156fbc3fSArjan van de Ven 		    pagenr < (BIOS_END >> PAGE_SHIFT))
71156fbc3fSArjan van de Ven 		return 0;
72d8a9e6a5SArjan van de Ven 
735f5192b9SThomas Gleixner 	for (i = 0; i < e820.nr_map; i++) {
745f5192b9SThomas Gleixner 		/*
755f5192b9SThomas Gleixner 		 * Not usable memory:
765f5192b9SThomas Gleixner 		 */
775f5192b9SThomas Gleixner 		if (e820.map[i].type != E820_RAM)
785f5192b9SThomas Gleixner 			continue;
795f5192b9SThomas Gleixner 		addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
805f5192b9SThomas Gleixner 		end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
81950f9d95SThomas Gleixner 
82950f9d95SThomas Gleixner 
835f5192b9SThomas Gleixner 		if ((pagenr >= addr) && (pagenr < end))
845f5192b9SThomas Gleixner 			return 1;
855f5192b9SThomas Gleixner 	}
865f5192b9SThomas Gleixner 	return 0;
875f5192b9SThomas Gleixner }
885f5192b9SThomas Gleixner 
89e64c8aa0SThomas Gleixner /*
90e64c8aa0SThomas Gleixner  * Fix up the linear direct mapping of the kernel to avoid cache attribute
91e64c8aa0SThomas Gleixner  * conflicts.
92e64c8aa0SThomas Gleixner  */
9375ab43bfSThomas Gleixner static int ioremap_change_attr(unsigned long vaddr, unsigned long size,
94d806e5eeSThomas Gleixner 			       enum ioremap_mode mode)
95e64c8aa0SThomas Gleixner {
96d806e5eeSThomas Gleixner 	unsigned long nrpages = size >> PAGE_SHIFT;
9793809be8SHarvey Harrison 	int err;
98e64c8aa0SThomas Gleixner 
99d806e5eeSThomas Gleixner 	switch (mode) {
100d806e5eeSThomas Gleixner 	case IOR_MODE_UNCACHED:
101d806e5eeSThomas Gleixner 	default:
102d806e5eeSThomas Gleixner 		err = set_memory_uc(vaddr, nrpages);
103d806e5eeSThomas Gleixner 		break;
104d806e5eeSThomas Gleixner 	case IOR_MODE_CACHED:
105d806e5eeSThomas Gleixner 		err = set_memory_wb(vaddr, nrpages);
106d806e5eeSThomas Gleixner 		break;
107d806e5eeSThomas Gleixner 	}
108e64c8aa0SThomas Gleixner 
109e64c8aa0SThomas Gleixner 	return err;
110e64c8aa0SThomas Gleixner }
111e64c8aa0SThomas Gleixner 
112e64c8aa0SThomas Gleixner /*
113e64c8aa0SThomas Gleixner  * Remap an arbitrary physical address space into the kernel virtual
114e64c8aa0SThomas Gleixner  * address space. Needed when the kernel wants to access high addresses
115e64c8aa0SThomas Gleixner  * directly.
116e64c8aa0SThomas Gleixner  *
117e64c8aa0SThomas Gleixner  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
118e64c8aa0SThomas Gleixner  * have to convert them into an offset in a page-aligned mapping, but the
119e64c8aa0SThomas Gleixner  * caller shouldn't need to know that small detail.
120e64c8aa0SThomas Gleixner  */
121b9e76a00SLinus Torvalds static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
122d806e5eeSThomas Gleixner 			       enum ioremap_mode mode)
123e64c8aa0SThomas Gleixner {
124e66aadbeSThomas Gleixner 	unsigned long pfn, offset, last_addr, vaddr;
125e64c8aa0SThomas Gleixner 	struct vm_struct *area;
126d806e5eeSThomas Gleixner 	pgprot_t prot;
127e64c8aa0SThomas Gleixner 
128e64c8aa0SThomas Gleixner 	/* Don't allow wraparound or zero size */
129e64c8aa0SThomas Gleixner 	last_addr = phys_addr + size - 1;
130e64c8aa0SThomas Gleixner 	if (!size || last_addr < phys_addr)
131e64c8aa0SThomas Gleixner 		return NULL;
132e64c8aa0SThomas Gleixner 
133e3100c82SThomas Gleixner 	if (!phys_addr_valid(phys_addr)) {
134e3100c82SThomas Gleixner 		printk(KERN_WARNING "ioremap: invalid physical address %lx\n",
135e3100c82SThomas Gleixner 		       phys_addr);
136e3100c82SThomas Gleixner 		WARN_ON_ONCE(1);
137e3100c82SThomas Gleixner 		return NULL;
138e3100c82SThomas Gleixner 	}
139e3100c82SThomas Gleixner 
140e64c8aa0SThomas Gleixner 	/*
141e64c8aa0SThomas Gleixner 	 * Don't remap the low PCI/ISA area, it's always mapped..
142e64c8aa0SThomas Gleixner 	 */
143e64c8aa0SThomas Gleixner 	if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
144e64c8aa0SThomas Gleixner 		return (__force void __iomem *)phys_to_virt(phys_addr);
145e64c8aa0SThomas Gleixner 
146e64c8aa0SThomas Gleixner 	/*
147e64c8aa0SThomas Gleixner 	 * Don't allow anybody to remap normal RAM that we're using..
148e64c8aa0SThomas Gleixner 	 */
149bdd3cee2SIngo Molnar 	for (pfn = phys_addr >> PAGE_SHIFT;
15038cb47baSIngo Molnar 				(pfn << PAGE_SHIFT) < last_addr; pfn++) {
151bdd3cee2SIngo Molnar 
152ba748d22SIngo Molnar 		int is_ram = page_is_ram(pfn);
153ba748d22SIngo Molnar 
154ba748d22SIngo Molnar 		if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
155e64c8aa0SThomas Gleixner 			return NULL;
156ba748d22SIngo Molnar 		WARN_ON_ONCE(is_ram);
157e64c8aa0SThomas Gleixner 	}
158e64c8aa0SThomas Gleixner 
159d806e5eeSThomas Gleixner 	switch (mode) {
160d806e5eeSThomas Gleixner 	case IOR_MODE_UNCACHED:
161d806e5eeSThomas Gleixner 	default:
162d546b67aSSuresh Siddha 		/*
163d546b67aSSuresh Siddha 		 * FIXME: we will use UC MINUS for now, as video fb drivers
164d546b67aSSuresh Siddha 		 * depend on it. Upcoming ioremap_wc() will fix this behavior.
165d546b67aSSuresh Siddha 		 */
166d546b67aSSuresh Siddha 		prot = PAGE_KERNEL_UC_MINUS;
167d806e5eeSThomas Gleixner 		break;
168d806e5eeSThomas Gleixner 	case IOR_MODE_CACHED:
169d806e5eeSThomas Gleixner 		prot = PAGE_KERNEL;
170d806e5eeSThomas Gleixner 		break;
171d806e5eeSThomas Gleixner 	}
172e64c8aa0SThomas Gleixner 
173e64c8aa0SThomas Gleixner 	/*
174e64c8aa0SThomas Gleixner 	 * Mappings have to be page-aligned
175e64c8aa0SThomas Gleixner 	 */
176e64c8aa0SThomas Gleixner 	offset = phys_addr & ~PAGE_MASK;
177e64c8aa0SThomas Gleixner 	phys_addr &= PAGE_MASK;
178e64c8aa0SThomas Gleixner 	size = PAGE_ALIGN(last_addr+1) - phys_addr;
179e64c8aa0SThomas Gleixner 
180e64c8aa0SThomas Gleixner 	/*
181e64c8aa0SThomas Gleixner 	 * Ok, go for it..
182e64c8aa0SThomas Gleixner 	 */
183e64c8aa0SThomas Gleixner 	area = get_vm_area(size, VM_IOREMAP);
184e64c8aa0SThomas Gleixner 	if (!area)
185e64c8aa0SThomas Gleixner 		return NULL;
186e64c8aa0SThomas Gleixner 	area->phys_addr = phys_addr;
187e66aadbeSThomas Gleixner 	vaddr = (unsigned long) area->addr;
188e66aadbeSThomas Gleixner 	if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
189b16bf712SIngo Molnar 		free_vm_area(area);
190e64c8aa0SThomas Gleixner 		return NULL;
191e64c8aa0SThomas Gleixner 	}
192e64c8aa0SThomas Gleixner 
19375ab43bfSThomas Gleixner 	if (ioremap_change_attr(vaddr, size, mode) < 0) {
194e66aadbeSThomas Gleixner 		vunmap(area->addr);
195e64c8aa0SThomas Gleixner 		return NULL;
196e64c8aa0SThomas Gleixner 	}
197e64c8aa0SThomas Gleixner 
198e66aadbeSThomas Gleixner 	return (void __iomem *) (vaddr + offset);
199e64c8aa0SThomas Gleixner }
200e64c8aa0SThomas Gleixner 
201e64c8aa0SThomas Gleixner /**
202e64c8aa0SThomas Gleixner  * ioremap_nocache     -   map bus memory into CPU space
203e64c8aa0SThomas Gleixner  * @offset:    bus address of the memory
204e64c8aa0SThomas Gleixner  * @size:      size of the resource to map
205e64c8aa0SThomas Gleixner  *
206e64c8aa0SThomas Gleixner  * ioremap_nocache performs a platform specific sequence of operations to
207e64c8aa0SThomas Gleixner  * make bus memory CPU accessible via the readb/readw/readl/writeb/
208e64c8aa0SThomas Gleixner  * writew/writel functions and the other mmio helpers. The returned
209e64c8aa0SThomas Gleixner  * address is not guaranteed to be usable directly as a virtual
210e64c8aa0SThomas Gleixner  * address.
211e64c8aa0SThomas Gleixner  *
212e64c8aa0SThomas Gleixner  * This version of ioremap ensures that the memory is marked uncachable
213e64c8aa0SThomas Gleixner  * on the CPU as well as honouring existing caching rules from things like
214e64c8aa0SThomas Gleixner  * the PCI bus. Note that there are other caches and buffers on many
215e64c8aa0SThomas Gleixner  * busses. In particular driver authors should read up on PCI writes
216e64c8aa0SThomas Gleixner  *
217e64c8aa0SThomas Gleixner  * It's useful if some control registers are in such an area and
218e64c8aa0SThomas Gleixner  * write combining or read caching is not desirable:
219e64c8aa0SThomas Gleixner  *
220e64c8aa0SThomas Gleixner  * Must be freed with iounmap.
221e64c8aa0SThomas Gleixner  */
222b9e76a00SLinus Torvalds void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
223e64c8aa0SThomas Gleixner {
224d806e5eeSThomas Gleixner 	return __ioremap(phys_addr, size, IOR_MODE_UNCACHED);
225e64c8aa0SThomas Gleixner }
226e64c8aa0SThomas Gleixner EXPORT_SYMBOL(ioremap_nocache);
227e64c8aa0SThomas Gleixner 
228b9e76a00SLinus Torvalds void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
2295f868152SThomas Gleixner {
230d806e5eeSThomas Gleixner 	return __ioremap(phys_addr, size, IOR_MODE_CACHED);
2315f868152SThomas Gleixner }
2325f868152SThomas Gleixner EXPORT_SYMBOL(ioremap_cache);
2335f868152SThomas Gleixner 
234e64c8aa0SThomas Gleixner /**
235e64c8aa0SThomas Gleixner  * iounmap - Free a IO remapping
236e64c8aa0SThomas Gleixner  * @addr: virtual address from ioremap_*
237e64c8aa0SThomas Gleixner  *
238e64c8aa0SThomas Gleixner  * Caller must ensure there is only one unmapping for the same pointer.
239e64c8aa0SThomas Gleixner  */
240e64c8aa0SThomas Gleixner void iounmap(volatile void __iomem *addr)
241e64c8aa0SThomas Gleixner {
242e64c8aa0SThomas Gleixner 	struct vm_struct *p, *o;
243e64c8aa0SThomas Gleixner 
244e64c8aa0SThomas Gleixner 	if ((void __force *)addr <= high_memory)
245e64c8aa0SThomas Gleixner 		return;
246e64c8aa0SThomas Gleixner 
247e64c8aa0SThomas Gleixner 	/*
248e64c8aa0SThomas Gleixner 	 * __ioremap special-cases the PCI/ISA range by not instantiating a
249e64c8aa0SThomas Gleixner 	 * vm_area and by simply returning an address into the kernel mapping
250e64c8aa0SThomas Gleixner 	 * of ISA space.   So handle that here.
251e64c8aa0SThomas Gleixner 	 */
252e64c8aa0SThomas Gleixner 	if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
253e64c8aa0SThomas Gleixner 	    addr < phys_to_virt(ISA_END_ADDRESS))
254e64c8aa0SThomas Gleixner 		return;
255e64c8aa0SThomas Gleixner 
256e64c8aa0SThomas Gleixner 	addr = (volatile void __iomem *)
257e64c8aa0SThomas Gleixner 		(PAGE_MASK & (unsigned long __force)addr);
258e64c8aa0SThomas Gleixner 
259e64c8aa0SThomas Gleixner 	/* Use the vm area unlocked, assuming the caller
260e64c8aa0SThomas Gleixner 	   ensures there isn't another iounmap for the same address
261e64c8aa0SThomas Gleixner 	   in parallel. Reuse of the virtual address is prevented by
262e64c8aa0SThomas Gleixner 	   leaving it in the global lists until we're done with it.
263e64c8aa0SThomas Gleixner 	   cpa takes care of the direct mappings. */
264e64c8aa0SThomas Gleixner 	read_lock(&vmlist_lock);
265e64c8aa0SThomas Gleixner 	for (p = vmlist; p; p = p->next) {
266e64c8aa0SThomas Gleixner 		if (p->addr == addr)
267e64c8aa0SThomas Gleixner 			break;
268e64c8aa0SThomas Gleixner 	}
269e64c8aa0SThomas Gleixner 	read_unlock(&vmlist_lock);
270e64c8aa0SThomas Gleixner 
271e64c8aa0SThomas Gleixner 	if (!p) {
272e64c8aa0SThomas Gleixner 		printk(KERN_ERR "iounmap: bad address %p\n", addr);
273e64c8aa0SThomas Gleixner 		dump_stack();
274e64c8aa0SThomas Gleixner 		return;
275e64c8aa0SThomas Gleixner 	}
276e64c8aa0SThomas Gleixner 
277e64c8aa0SThomas Gleixner 	/* Finally remove it */
278e64c8aa0SThomas Gleixner 	o = remove_vm_area((void *)addr);
279e64c8aa0SThomas Gleixner 	BUG_ON(p != o || o == NULL);
280e64c8aa0SThomas Gleixner 	kfree(p);
281e64c8aa0SThomas Gleixner }
282e64c8aa0SThomas Gleixner EXPORT_SYMBOL(iounmap);
283e64c8aa0SThomas Gleixner 
284e64c8aa0SThomas Gleixner #ifdef CONFIG_X86_32
285e64c8aa0SThomas Gleixner 
286e64c8aa0SThomas Gleixner int __initdata early_ioremap_debug;
287e64c8aa0SThomas Gleixner 
288e64c8aa0SThomas Gleixner static int __init early_ioremap_debug_setup(char *str)
289e64c8aa0SThomas Gleixner {
290e64c8aa0SThomas Gleixner 	early_ioremap_debug = 1;
291e64c8aa0SThomas Gleixner 
292e64c8aa0SThomas Gleixner 	return 0;
293e64c8aa0SThomas Gleixner }
294e64c8aa0SThomas Gleixner early_param("early_ioremap_debug", early_ioremap_debug_setup);
295e64c8aa0SThomas Gleixner 
296e64c8aa0SThomas Gleixner static __initdata int after_paging_init;
297c92a7a54SIan Campbell static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)]
298c92a7a54SIan Campbell 		__section(.bss.page_aligned);
299e64c8aa0SThomas Gleixner 
300551889a6SIan Campbell static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
301e64c8aa0SThomas Gleixner {
30237cc8d7fSJeremy Fitzhardinge 	/* Don't assume we're using swapper_pg_dir at this point */
30337cc8d7fSJeremy Fitzhardinge 	pgd_t *base = __va(read_cr3());
30437cc8d7fSJeremy Fitzhardinge 	pgd_t *pgd = &base[pgd_index(addr)];
305551889a6SIan Campbell 	pud_t *pud = pud_offset(pgd, addr);
306551889a6SIan Campbell 	pmd_t *pmd = pmd_offset(pud, addr);
307551889a6SIan Campbell 
308551889a6SIan Campbell 	return pmd;
309e64c8aa0SThomas Gleixner }
310e64c8aa0SThomas Gleixner 
311551889a6SIan Campbell static inline pte_t * __init early_ioremap_pte(unsigned long addr)
312e64c8aa0SThomas Gleixner {
313551889a6SIan Campbell 	return &bm_pte[pte_index(addr)];
314e64c8aa0SThomas Gleixner }
315e64c8aa0SThomas Gleixner 
316e64c8aa0SThomas Gleixner void __init early_ioremap_init(void)
317e64c8aa0SThomas Gleixner {
318551889a6SIan Campbell 	pmd_t *pmd;
319e64c8aa0SThomas Gleixner 
320e64c8aa0SThomas Gleixner 	if (early_ioremap_debug)
321adafdf6aSIngo Molnar 		printk(KERN_INFO "early_ioremap_init()\n");
322e64c8aa0SThomas Gleixner 
323551889a6SIan Campbell 	pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
324e64c8aa0SThomas Gleixner 	memset(bm_pte, 0, sizeof(bm_pte));
325b6fbb669SIan Campbell 	pmd_populate_kernel(&init_mm, pmd, bm_pte);
326551889a6SIan Campbell 
327e64c8aa0SThomas Gleixner 	/*
328551889a6SIan Campbell 	 * The boot-ioremap range spans multiple pmds, for which
329e64c8aa0SThomas Gleixner 	 * we are not prepared:
330e64c8aa0SThomas Gleixner 	 */
331551889a6SIan Campbell 	if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
332e64c8aa0SThomas Gleixner 		WARN_ON(1);
333551889a6SIan Campbell 		printk(KERN_WARNING "pmd %p != %p\n",
334551889a6SIan Campbell 		       pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
335e64c8aa0SThomas Gleixner 		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
336e64c8aa0SThomas Gleixner 			fix_to_virt(FIX_BTMAP_BEGIN));
337e64c8aa0SThomas Gleixner 		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n",
338e64c8aa0SThomas Gleixner 			fix_to_virt(FIX_BTMAP_END));
339e64c8aa0SThomas Gleixner 
340e64c8aa0SThomas Gleixner 		printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
341e64c8aa0SThomas Gleixner 		printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n",
342e64c8aa0SThomas Gleixner 		       FIX_BTMAP_BEGIN);
343e64c8aa0SThomas Gleixner 	}
344e64c8aa0SThomas Gleixner }
345e64c8aa0SThomas Gleixner 
346e64c8aa0SThomas Gleixner void __init early_ioremap_clear(void)
347e64c8aa0SThomas Gleixner {
348551889a6SIan Campbell 	pmd_t *pmd;
349e64c8aa0SThomas Gleixner 
350e64c8aa0SThomas Gleixner 	if (early_ioremap_debug)
351adafdf6aSIngo Molnar 		printk(KERN_INFO "early_ioremap_clear()\n");
352e64c8aa0SThomas Gleixner 
353551889a6SIan Campbell 	pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
354551889a6SIan Campbell 	pmd_clear(pmd);
355b6fbb669SIan Campbell 	paravirt_release_pt(__pa(bm_pte) >> PAGE_SHIFT);
356e64c8aa0SThomas Gleixner 	__flush_tlb_all();
357e64c8aa0SThomas Gleixner }
358e64c8aa0SThomas Gleixner 
359e64c8aa0SThomas Gleixner void __init early_ioremap_reset(void)
360e64c8aa0SThomas Gleixner {
361e64c8aa0SThomas Gleixner 	enum fixed_addresses idx;
362551889a6SIan Campbell 	unsigned long addr, phys;
363551889a6SIan Campbell 	pte_t *pte;
364e64c8aa0SThomas Gleixner 
365e64c8aa0SThomas Gleixner 	after_paging_init = 1;
366e64c8aa0SThomas Gleixner 	for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
367e64c8aa0SThomas Gleixner 		addr = fix_to_virt(idx);
368e64c8aa0SThomas Gleixner 		pte = early_ioremap_pte(addr);
369551889a6SIan Campbell 		if (pte_present(*pte)) {
370551889a6SIan Campbell 			phys = pte_val(*pte) & PAGE_MASK;
371e64c8aa0SThomas Gleixner 			set_fixmap(idx, phys);
372e64c8aa0SThomas Gleixner 		}
373e64c8aa0SThomas Gleixner 	}
374e64c8aa0SThomas Gleixner }
375e64c8aa0SThomas Gleixner 
376e64c8aa0SThomas Gleixner static void __init __early_set_fixmap(enum fixed_addresses idx,
377e64c8aa0SThomas Gleixner 				   unsigned long phys, pgprot_t flags)
378e64c8aa0SThomas Gleixner {
379551889a6SIan Campbell 	unsigned long addr = __fix_to_virt(idx);
380551889a6SIan Campbell 	pte_t *pte;
381e64c8aa0SThomas Gleixner 
382e64c8aa0SThomas Gleixner 	if (idx >= __end_of_fixed_addresses) {
383e64c8aa0SThomas Gleixner 		BUG();
384e64c8aa0SThomas Gleixner 		return;
385e64c8aa0SThomas Gleixner 	}
386e64c8aa0SThomas Gleixner 	pte = early_ioremap_pte(addr);
387e64c8aa0SThomas Gleixner 	if (pgprot_val(flags))
388551889a6SIan Campbell 		set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
389e64c8aa0SThomas Gleixner 	else
390551889a6SIan Campbell 		pte_clear(NULL, addr, pte);
391e64c8aa0SThomas Gleixner 	__flush_tlb_one(addr);
392e64c8aa0SThomas Gleixner }
393e64c8aa0SThomas Gleixner 
394e64c8aa0SThomas Gleixner static inline void __init early_set_fixmap(enum fixed_addresses idx,
395e64c8aa0SThomas Gleixner 					unsigned long phys)
396e64c8aa0SThomas Gleixner {
397e64c8aa0SThomas Gleixner 	if (after_paging_init)
398e64c8aa0SThomas Gleixner 		set_fixmap(idx, phys);
399e64c8aa0SThomas Gleixner 	else
400e64c8aa0SThomas Gleixner 		__early_set_fixmap(idx, phys, PAGE_KERNEL);
401e64c8aa0SThomas Gleixner }
402e64c8aa0SThomas Gleixner 
403e64c8aa0SThomas Gleixner static inline void __init early_clear_fixmap(enum fixed_addresses idx)
404e64c8aa0SThomas Gleixner {
405e64c8aa0SThomas Gleixner 	if (after_paging_init)
406e64c8aa0SThomas Gleixner 		clear_fixmap(idx);
407e64c8aa0SThomas Gleixner 	else
408e64c8aa0SThomas Gleixner 		__early_set_fixmap(idx, 0, __pgprot(0));
409e64c8aa0SThomas Gleixner }
410e64c8aa0SThomas Gleixner 
411e64c8aa0SThomas Gleixner 
412e64c8aa0SThomas Gleixner int __initdata early_ioremap_nested;
413e64c8aa0SThomas Gleixner 
414e64c8aa0SThomas Gleixner static int __init check_early_ioremap_leak(void)
415e64c8aa0SThomas Gleixner {
416e64c8aa0SThomas Gleixner 	if (!early_ioremap_nested)
417e64c8aa0SThomas Gleixner 		return 0;
418e64c8aa0SThomas Gleixner 
419e64c8aa0SThomas Gleixner 	printk(KERN_WARNING
420e64c8aa0SThomas Gleixner 	       "Debug warning: early ioremap leak of %d areas detected.\n",
421e64c8aa0SThomas Gleixner 	       early_ioremap_nested);
422e64c8aa0SThomas Gleixner 	printk(KERN_WARNING
423e64c8aa0SThomas Gleixner 	       "please boot with early_ioremap_debug and report the dmesg.\n");
424e64c8aa0SThomas Gleixner 	WARN_ON(1);
425e64c8aa0SThomas Gleixner 
426e64c8aa0SThomas Gleixner 	return 1;
427e64c8aa0SThomas Gleixner }
428e64c8aa0SThomas Gleixner late_initcall(check_early_ioremap_leak);
429e64c8aa0SThomas Gleixner 
430e64c8aa0SThomas Gleixner void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
431e64c8aa0SThomas Gleixner {
432e64c8aa0SThomas Gleixner 	unsigned long offset, last_addr;
433e64c8aa0SThomas Gleixner 	unsigned int nrpages, nesting;
434e64c8aa0SThomas Gleixner 	enum fixed_addresses idx0, idx;
435e64c8aa0SThomas Gleixner 
436e64c8aa0SThomas Gleixner 	WARN_ON(system_state != SYSTEM_BOOTING);
437e64c8aa0SThomas Gleixner 
438e64c8aa0SThomas Gleixner 	nesting = early_ioremap_nested;
439e64c8aa0SThomas Gleixner 	if (early_ioremap_debug) {
440adafdf6aSIngo Molnar 		printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
441e64c8aa0SThomas Gleixner 		       phys_addr, size, nesting);
442e64c8aa0SThomas Gleixner 		dump_stack();
443e64c8aa0SThomas Gleixner 	}
444e64c8aa0SThomas Gleixner 
445e64c8aa0SThomas Gleixner 	/* Don't allow wraparound or zero size */
446e64c8aa0SThomas Gleixner 	last_addr = phys_addr + size - 1;
447e64c8aa0SThomas Gleixner 	if (!size || last_addr < phys_addr) {
448e64c8aa0SThomas Gleixner 		WARN_ON(1);
449e64c8aa0SThomas Gleixner 		return NULL;
450e64c8aa0SThomas Gleixner 	}
451e64c8aa0SThomas Gleixner 
452e64c8aa0SThomas Gleixner 	if (nesting >= FIX_BTMAPS_NESTING) {
453e64c8aa0SThomas Gleixner 		WARN_ON(1);
454e64c8aa0SThomas Gleixner 		return NULL;
455e64c8aa0SThomas Gleixner 	}
456e64c8aa0SThomas Gleixner 	early_ioremap_nested++;
457e64c8aa0SThomas Gleixner 	/*
458e64c8aa0SThomas Gleixner 	 * Mappings have to be page-aligned
459e64c8aa0SThomas Gleixner 	 */
460e64c8aa0SThomas Gleixner 	offset = phys_addr & ~PAGE_MASK;
461e64c8aa0SThomas Gleixner 	phys_addr &= PAGE_MASK;
462e64c8aa0SThomas Gleixner 	size = PAGE_ALIGN(last_addr) - phys_addr;
463e64c8aa0SThomas Gleixner 
464e64c8aa0SThomas Gleixner 	/*
465e64c8aa0SThomas Gleixner 	 * Mappings have to fit in the FIX_BTMAP area.
466e64c8aa0SThomas Gleixner 	 */
467e64c8aa0SThomas Gleixner 	nrpages = size >> PAGE_SHIFT;
468e64c8aa0SThomas Gleixner 	if (nrpages > NR_FIX_BTMAPS) {
469e64c8aa0SThomas Gleixner 		WARN_ON(1);
470e64c8aa0SThomas Gleixner 		return NULL;
471e64c8aa0SThomas Gleixner 	}
472e64c8aa0SThomas Gleixner 
473e64c8aa0SThomas Gleixner 	/*
474e64c8aa0SThomas Gleixner 	 * Ok, go for it..
475e64c8aa0SThomas Gleixner 	 */
476e64c8aa0SThomas Gleixner 	idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
477e64c8aa0SThomas Gleixner 	idx = idx0;
478e64c8aa0SThomas Gleixner 	while (nrpages > 0) {
479e64c8aa0SThomas Gleixner 		early_set_fixmap(idx, phys_addr);
480e64c8aa0SThomas Gleixner 		phys_addr += PAGE_SIZE;
481e64c8aa0SThomas Gleixner 		--idx;
482e64c8aa0SThomas Gleixner 		--nrpages;
483e64c8aa0SThomas Gleixner 	}
484e64c8aa0SThomas Gleixner 	if (early_ioremap_debug)
485e64c8aa0SThomas Gleixner 		printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
486e64c8aa0SThomas Gleixner 
487e64c8aa0SThomas Gleixner 	return (void *) (offset + fix_to_virt(idx0));
488e64c8aa0SThomas Gleixner }
489e64c8aa0SThomas Gleixner 
490e64c8aa0SThomas Gleixner void __init early_iounmap(void *addr, unsigned long size)
491e64c8aa0SThomas Gleixner {
492e64c8aa0SThomas Gleixner 	unsigned long virt_addr;
493e64c8aa0SThomas Gleixner 	unsigned long offset;
494e64c8aa0SThomas Gleixner 	unsigned int nrpages;
495e64c8aa0SThomas Gleixner 	enum fixed_addresses idx;
496e64c8aa0SThomas Gleixner 	unsigned int nesting;
497e64c8aa0SThomas Gleixner 
498e64c8aa0SThomas Gleixner 	nesting = --early_ioremap_nested;
499e64c8aa0SThomas Gleixner 	WARN_ON(nesting < 0);
500e64c8aa0SThomas Gleixner 
501e64c8aa0SThomas Gleixner 	if (early_ioremap_debug) {
502adafdf6aSIngo Molnar 		printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
503e64c8aa0SThomas Gleixner 		       size, nesting);
504e64c8aa0SThomas Gleixner 		dump_stack();
505e64c8aa0SThomas Gleixner 	}
506e64c8aa0SThomas Gleixner 
507e64c8aa0SThomas Gleixner 	virt_addr = (unsigned long)addr;
508e64c8aa0SThomas Gleixner 	if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
509e64c8aa0SThomas Gleixner 		WARN_ON(1);
510e64c8aa0SThomas Gleixner 		return;
511e64c8aa0SThomas Gleixner 	}
512e64c8aa0SThomas Gleixner 	offset = virt_addr & ~PAGE_MASK;
513e64c8aa0SThomas Gleixner 	nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
514e64c8aa0SThomas Gleixner 
515e64c8aa0SThomas Gleixner 	idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
516e64c8aa0SThomas Gleixner 	while (nrpages > 0) {
517e64c8aa0SThomas Gleixner 		early_clear_fixmap(idx);
518e64c8aa0SThomas Gleixner 		--idx;
519e64c8aa0SThomas Gleixner 		--nrpages;
520e64c8aa0SThomas Gleixner 	}
521e64c8aa0SThomas Gleixner }
522e64c8aa0SThomas Gleixner 
523e64c8aa0SThomas Gleixner void __this_fixmap_does_not_exist(void)
524e64c8aa0SThomas Gleixner {
525e64c8aa0SThomas Gleixner 	WARN_ON(1);
526e64c8aa0SThomas Gleixner }
527e64c8aa0SThomas Gleixner 
528e64c8aa0SThomas Gleixner #endif /* CONFIG_X86_32 */
529