1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_EARLY_IOREMAP_H_
3 #define _ASM_EARLY_IOREMAP_H_
4 
5 #include <linux/types.h>
6 
7 /*
8  * early_ioremap() and early_iounmap() are for temporary early boot-time
9  * mappings, before the real ioremap() is functional.
10  */
11 extern void __iomem *early_ioremap(resource_size_t phys_addr,
12 				   unsigned long size);
13 extern void *early_memremap(resource_size_t phys_addr,
14 			    unsigned long size);
15 extern void *early_memremap_ro(resource_size_t phys_addr,
16 			       unsigned long size);
17 extern void *early_memremap_prot(resource_size_t phys_addr,
18 				 unsigned long size, unsigned long prot_val);
19 extern void early_iounmap(void __iomem *addr, unsigned long size);
20 extern void early_memunmap(void *addr, unsigned long size);
21 
22 /*
23  * Weak function called by early_ioremap_reset(). It does nothing, but
24  * architectures may provide their own version to do any needed cleanups.
25  */
26 extern void early_ioremap_shutdown(void);
27 
28 #if defined(CONFIG_GENERIC_EARLY_IOREMAP) && defined(CONFIG_MMU)
29 /* Arch-specific initialization */
30 extern void early_ioremap_init(void);
31 
32 /* Generic initialization called by architecture code */
33 extern void early_ioremap_setup(void);
34 
35 /*
36  * Called as last step in paging_init() so library can act
37  * accordingly for subsequent map/unmap requests.
38  */
39 extern void early_ioremap_reset(void);
40 
41 /*
42  * Early copy from unmapped memory to kernel mapped memory.
43  */
44 extern void copy_from_early_mem(void *dest, phys_addr_t src,
45 				unsigned long size);
46 
47 #else
48 static inline void early_ioremap_init(void) { }
49 static inline void early_ioremap_setup(void) { }
50 static inline void early_ioremap_reset(void) { }
51 #endif
52 
53 #endif /* _ASM_EARLY_IOREMAP_H_ */
54