xref: /openbmc/linux/mm/early_ioremap.c (revision 9e5c33d7aeeef62e5fa7e74f94432685bd03026b)
1*9e5c33d7SMark Salter /*
2*9e5c33d7SMark Salter  * Provide common bits of early_ioremap() support for architectures needing
3*9e5c33d7SMark Salter  * temporary mappings during boot before ioremap() is available.
4*9e5c33d7SMark Salter  *
5*9e5c33d7SMark Salter  * This is mostly a direct copy of the x86 early_ioremap implementation.
6*9e5c33d7SMark Salter  *
7*9e5c33d7SMark Salter  * (C) Copyright 1995 1996, 2014 Linus Torvalds
8*9e5c33d7SMark Salter  *
9*9e5c33d7SMark Salter  */
10*9e5c33d7SMark Salter #include <linux/kernel.h>
11*9e5c33d7SMark Salter #include <linux/init.h>
12*9e5c33d7SMark Salter #include <linux/io.h>
13*9e5c33d7SMark Salter #include <linux/module.h>
14*9e5c33d7SMark Salter #include <linux/slab.h>
15*9e5c33d7SMark Salter #include <linux/mm.h>
16*9e5c33d7SMark Salter #include <linux/vmalloc.h>
17*9e5c33d7SMark Salter #include <asm/fixmap.h>
18*9e5c33d7SMark Salter 
19*9e5c33d7SMark Salter #ifdef CONFIG_MMU
20*9e5c33d7SMark Salter static int early_ioremap_debug __initdata;
21*9e5c33d7SMark Salter 
22*9e5c33d7SMark Salter static int __init early_ioremap_debug_setup(char *str)
23*9e5c33d7SMark Salter {
24*9e5c33d7SMark Salter 	early_ioremap_debug = 1;
25*9e5c33d7SMark Salter 
26*9e5c33d7SMark Salter 	return 0;
27*9e5c33d7SMark Salter }
28*9e5c33d7SMark Salter early_param("early_ioremap_debug", early_ioremap_debug_setup);
29*9e5c33d7SMark Salter 
30*9e5c33d7SMark Salter static int after_paging_init __initdata;
31*9e5c33d7SMark Salter 
32*9e5c33d7SMark Salter void __init __weak early_ioremap_shutdown(void)
33*9e5c33d7SMark Salter {
34*9e5c33d7SMark Salter }
35*9e5c33d7SMark Salter 
36*9e5c33d7SMark Salter void __init early_ioremap_reset(void)
37*9e5c33d7SMark Salter {
38*9e5c33d7SMark Salter 	early_ioremap_shutdown();
39*9e5c33d7SMark Salter 	after_paging_init = 1;
40*9e5c33d7SMark Salter }
41*9e5c33d7SMark Salter 
42*9e5c33d7SMark Salter /*
43*9e5c33d7SMark Salter  * Generally, ioremap() is available after paging_init() has been called.
44*9e5c33d7SMark Salter  * Architectures wanting to allow early_ioremap after paging_init() can
45*9e5c33d7SMark Salter  * define __late_set_fixmap and __late_clear_fixmap to do the right thing.
46*9e5c33d7SMark Salter  */
47*9e5c33d7SMark Salter #ifndef __late_set_fixmap
48*9e5c33d7SMark Salter static inline void __init __late_set_fixmap(enum fixed_addresses idx,
49*9e5c33d7SMark Salter 					    phys_addr_t phys, pgprot_t prot)
50*9e5c33d7SMark Salter {
51*9e5c33d7SMark Salter 	BUG();
52*9e5c33d7SMark Salter }
53*9e5c33d7SMark Salter #endif
54*9e5c33d7SMark Salter 
55*9e5c33d7SMark Salter #ifndef __late_clear_fixmap
56*9e5c33d7SMark Salter static inline void __init __late_clear_fixmap(enum fixed_addresses idx)
57*9e5c33d7SMark Salter {
58*9e5c33d7SMark Salter 	BUG();
59*9e5c33d7SMark Salter }
60*9e5c33d7SMark Salter #endif
61*9e5c33d7SMark Salter 
62*9e5c33d7SMark Salter static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
63*9e5c33d7SMark Salter static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
64*9e5c33d7SMark Salter static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata;
65*9e5c33d7SMark Salter 
66*9e5c33d7SMark Salter void __init early_ioremap_setup(void)
67*9e5c33d7SMark Salter {
68*9e5c33d7SMark Salter 	int i;
69*9e5c33d7SMark Salter 
70*9e5c33d7SMark Salter 	for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
71*9e5c33d7SMark Salter 		if (WARN_ON(prev_map[i]))
72*9e5c33d7SMark Salter 			break;
73*9e5c33d7SMark Salter 
74*9e5c33d7SMark Salter 	for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
75*9e5c33d7SMark Salter 		slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
76*9e5c33d7SMark Salter }
77*9e5c33d7SMark Salter 
78*9e5c33d7SMark Salter static int __init check_early_ioremap_leak(void)
79*9e5c33d7SMark Salter {
80*9e5c33d7SMark Salter 	int count = 0;
81*9e5c33d7SMark Salter 	int i;
82*9e5c33d7SMark Salter 
83*9e5c33d7SMark Salter 	for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
84*9e5c33d7SMark Salter 		if (prev_map[i])
85*9e5c33d7SMark Salter 			count++;
86*9e5c33d7SMark Salter 
87*9e5c33d7SMark Salter 	if (WARN(count, KERN_WARNING
88*9e5c33d7SMark Salter 		 "Debug warning: early ioremap leak of %d areas detected.\n"
89*9e5c33d7SMark Salter 		 "please boot with early_ioremap_debug and report the dmesg.\n",
90*9e5c33d7SMark Salter 		 count))
91*9e5c33d7SMark Salter 		return 1;
92*9e5c33d7SMark Salter 	return 0;
93*9e5c33d7SMark Salter }
94*9e5c33d7SMark Salter late_initcall(check_early_ioremap_leak);
95*9e5c33d7SMark Salter 
96*9e5c33d7SMark Salter static void __init __iomem *
97*9e5c33d7SMark Salter __early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
98*9e5c33d7SMark Salter {
99*9e5c33d7SMark Salter 	unsigned long offset;
100*9e5c33d7SMark Salter 	resource_size_t last_addr;
101*9e5c33d7SMark Salter 	unsigned int nrpages;
102*9e5c33d7SMark Salter 	enum fixed_addresses idx;
103*9e5c33d7SMark Salter 	int i, slot;
104*9e5c33d7SMark Salter 
105*9e5c33d7SMark Salter 	WARN_ON(system_state != SYSTEM_BOOTING);
106*9e5c33d7SMark Salter 
107*9e5c33d7SMark Salter 	slot = -1;
108*9e5c33d7SMark Salter 	for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
109*9e5c33d7SMark Salter 		if (!prev_map[i]) {
110*9e5c33d7SMark Salter 			slot = i;
111*9e5c33d7SMark Salter 			break;
112*9e5c33d7SMark Salter 		}
113*9e5c33d7SMark Salter 	}
114*9e5c33d7SMark Salter 
115*9e5c33d7SMark Salter 	if (WARN(slot < 0, "%s(%08llx, %08lx) not found slot\n",
116*9e5c33d7SMark Salter 		 __func__, (u64)phys_addr, size))
117*9e5c33d7SMark Salter 		return NULL;
118*9e5c33d7SMark Salter 
119*9e5c33d7SMark Salter 	/* Don't allow wraparound or zero size */
120*9e5c33d7SMark Salter 	last_addr = phys_addr + size - 1;
121*9e5c33d7SMark Salter 	if (WARN_ON(!size || last_addr < phys_addr))
122*9e5c33d7SMark Salter 		return NULL;
123*9e5c33d7SMark Salter 
124*9e5c33d7SMark Salter 	prev_size[slot] = size;
125*9e5c33d7SMark Salter 	/*
126*9e5c33d7SMark Salter 	 * Mappings have to be page-aligned
127*9e5c33d7SMark Salter 	 */
128*9e5c33d7SMark Salter 	offset = phys_addr & ~PAGE_MASK;
129*9e5c33d7SMark Salter 	phys_addr &= PAGE_MASK;
130*9e5c33d7SMark Salter 	size = PAGE_ALIGN(last_addr + 1) - phys_addr;
131*9e5c33d7SMark Salter 
132*9e5c33d7SMark Salter 	/*
133*9e5c33d7SMark Salter 	 * Mappings have to fit in the FIX_BTMAP area.
134*9e5c33d7SMark Salter 	 */
135*9e5c33d7SMark Salter 	nrpages = size >> PAGE_SHIFT;
136*9e5c33d7SMark Salter 	if (WARN_ON(nrpages > NR_FIX_BTMAPS))
137*9e5c33d7SMark Salter 		return NULL;
138*9e5c33d7SMark Salter 
139*9e5c33d7SMark Salter 	/*
140*9e5c33d7SMark Salter 	 * Ok, go for it..
141*9e5c33d7SMark Salter 	 */
142*9e5c33d7SMark Salter 	idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
143*9e5c33d7SMark Salter 	while (nrpages > 0) {
144*9e5c33d7SMark Salter 		if (after_paging_init)
145*9e5c33d7SMark Salter 			__late_set_fixmap(idx, phys_addr, prot);
146*9e5c33d7SMark Salter 		else
147*9e5c33d7SMark Salter 			__early_set_fixmap(idx, phys_addr, prot);
148*9e5c33d7SMark Salter 		phys_addr += PAGE_SIZE;
149*9e5c33d7SMark Salter 		--idx;
150*9e5c33d7SMark Salter 		--nrpages;
151*9e5c33d7SMark Salter 	}
152*9e5c33d7SMark Salter 	WARN(early_ioremap_debug, "%s(%08llx, %08lx) [%d] => %08lx + %08lx\n",
153*9e5c33d7SMark Salter 	     __func__, (u64)phys_addr, size, slot, offset, slot_virt[slot]);
154*9e5c33d7SMark Salter 
155*9e5c33d7SMark Salter 	prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]);
156*9e5c33d7SMark Salter 	return prev_map[slot];
157*9e5c33d7SMark Salter }
158*9e5c33d7SMark Salter 
159*9e5c33d7SMark Salter void __init early_iounmap(void __iomem *addr, unsigned long size)
160*9e5c33d7SMark Salter {
161*9e5c33d7SMark Salter 	unsigned long virt_addr;
162*9e5c33d7SMark Salter 	unsigned long offset;
163*9e5c33d7SMark Salter 	unsigned int nrpages;
164*9e5c33d7SMark Salter 	enum fixed_addresses idx;
165*9e5c33d7SMark Salter 	int i, slot;
166*9e5c33d7SMark Salter 
167*9e5c33d7SMark Salter 	slot = -1;
168*9e5c33d7SMark Salter 	for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
169*9e5c33d7SMark Salter 		if (prev_map[i] == addr) {
170*9e5c33d7SMark Salter 			slot = i;
171*9e5c33d7SMark Salter 			break;
172*9e5c33d7SMark Salter 		}
173*9e5c33d7SMark Salter 	}
174*9e5c33d7SMark Salter 
175*9e5c33d7SMark Salter 	if (WARN(slot < 0, "early_iounmap(%p, %08lx) not found slot\n",
176*9e5c33d7SMark Salter 		 addr, size))
177*9e5c33d7SMark Salter 		return;
178*9e5c33d7SMark Salter 
179*9e5c33d7SMark Salter 	if (WARN(prev_size[slot] != size,
180*9e5c33d7SMark Salter 		 "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n",
181*9e5c33d7SMark Salter 		 addr, size, slot, prev_size[slot]))
182*9e5c33d7SMark Salter 		return;
183*9e5c33d7SMark Salter 
184*9e5c33d7SMark Salter 	WARN(early_ioremap_debug, "early_iounmap(%p, %08lx) [%d]\n",
185*9e5c33d7SMark Salter 	     addr, size, slot);
186*9e5c33d7SMark Salter 
187*9e5c33d7SMark Salter 	virt_addr = (unsigned long)addr;
188*9e5c33d7SMark Salter 	if (WARN_ON(virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)))
189*9e5c33d7SMark Salter 		return;
190*9e5c33d7SMark Salter 
191*9e5c33d7SMark Salter 	offset = virt_addr & ~PAGE_MASK;
192*9e5c33d7SMark Salter 	nrpages = PAGE_ALIGN(offset + size) >> PAGE_SHIFT;
193*9e5c33d7SMark Salter 
194*9e5c33d7SMark Salter 	idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
195*9e5c33d7SMark Salter 	while (nrpages > 0) {
196*9e5c33d7SMark Salter 		if (after_paging_init)
197*9e5c33d7SMark Salter 			__late_clear_fixmap(idx);
198*9e5c33d7SMark Salter 		else
199*9e5c33d7SMark Salter 			__early_set_fixmap(idx, 0, FIXMAP_PAGE_CLEAR);
200*9e5c33d7SMark Salter 		--idx;
201*9e5c33d7SMark Salter 		--nrpages;
202*9e5c33d7SMark Salter 	}
203*9e5c33d7SMark Salter 	prev_map[slot] = NULL;
204*9e5c33d7SMark Salter }
205*9e5c33d7SMark Salter 
206*9e5c33d7SMark Salter /* Remap an IO device */
207*9e5c33d7SMark Salter void __init __iomem *
208*9e5c33d7SMark Salter early_ioremap(resource_size_t phys_addr, unsigned long size)
209*9e5c33d7SMark Salter {
210*9e5c33d7SMark Salter 	return __early_ioremap(phys_addr, size, FIXMAP_PAGE_IO);
211*9e5c33d7SMark Salter }
212*9e5c33d7SMark Salter 
213*9e5c33d7SMark Salter /* Remap memory */
214*9e5c33d7SMark Salter void __init *
215*9e5c33d7SMark Salter early_memremap(resource_size_t phys_addr, unsigned long size)
216*9e5c33d7SMark Salter {
217*9e5c33d7SMark Salter 	return (__force void *)__early_ioremap(phys_addr, size,
218*9e5c33d7SMark Salter 					       FIXMAP_PAGE_NORMAL);
219*9e5c33d7SMark Salter }
220*9e5c33d7SMark Salter #else /* CONFIG_MMU */
221*9e5c33d7SMark Salter 
222*9e5c33d7SMark Salter void __init __iomem *
223*9e5c33d7SMark Salter early_ioremap(resource_size_t phys_addr, unsigned long size)
224*9e5c33d7SMark Salter {
225*9e5c33d7SMark Salter 	return (__force void __iomem *)phys_addr;
226*9e5c33d7SMark Salter }
227*9e5c33d7SMark Salter 
228*9e5c33d7SMark Salter /* Remap memory */
229*9e5c33d7SMark Salter void __init *
230*9e5c33d7SMark Salter early_memremap(resource_size_t phys_addr, unsigned long size)
231*9e5c33d7SMark Salter {
232*9e5c33d7SMark Salter 	return (void *)phys_addr;
233*9e5c33d7SMark Salter }
234*9e5c33d7SMark Salter 
235*9e5c33d7SMark Salter void __init early_iounmap(void __iomem *addr, unsigned long size)
236*9e5c33d7SMark Salter {
237*9e5c33d7SMark Salter }
238*9e5c33d7SMark Salter 
239*9e5c33d7SMark Salter #endif /* CONFIG_MMU */
240*9e5c33d7SMark Salter 
241*9e5c33d7SMark Salter 
242*9e5c33d7SMark Salter void __init early_memunmap(void *addr, unsigned long size)
243*9e5c33d7SMark Salter {
244*9e5c33d7SMark Salter 	early_iounmap((__force void __iomem *)addr, size);
245*9e5c33d7SMark Salter }
246