xref: /openbmc/linux/arch/ia64/mm/ioremap.c (revision 9e3bd0f6)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * (c) Copyright 2006, 2007 Hewlett-Packard Development Company, L.P.
4  *	Bjorn Helgaas <bjorn.helgaas@hp.com>
5  */
6 
7 #include <linux/compiler.h>
8 #include <linux/module.h>
9 #include <linux/efi.h>
10 #include <linux/io.h>
11 #include <linux/vmalloc.h>
12 #include <asm/io.h>
13 #include <asm/meminit.h>
14 
15 static inline void __iomem *
16 __ioremap_uc(unsigned long phys_addr)
17 {
18 	return (void __iomem *) (__IA64_UNCACHED_OFFSET | phys_addr);
19 }
20 
21 void __iomem *
22 early_ioremap (unsigned long phys_addr, unsigned long size)
23 {
24 	u64 attr;
25 	attr = kern_mem_attribute(phys_addr, size);
26 	if (attr & EFI_MEMORY_WB)
27 		return (void __iomem *) phys_to_virt(phys_addr);
28 	return __ioremap_uc(phys_addr);
29 }
30 
31 void __iomem *
32 ioremap (unsigned long phys_addr, unsigned long size)
33 {
34 	void __iomem *addr;
35 	struct vm_struct *area;
36 	unsigned long offset;
37 	pgprot_t prot;
38 	u64 attr;
39 	unsigned long gran_base, gran_size;
40 	unsigned long page_base;
41 
42 	/*
43 	 * For things in kern_memmap, we must use the same attribute
44 	 * as the rest of the kernel.  For more details, see
45 	 * Documentation/ia64/aliasing.rst.
46 	 */
47 	attr = kern_mem_attribute(phys_addr, size);
48 	if (attr & EFI_MEMORY_WB)
49 		return (void __iomem *) phys_to_virt(phys_addr);
50 	else if (attr & EFI_MEMORY_UC)
51 		return __ioremap_uc(phys_addr);
52 
53 	/*
54 	 * Some chipsets don't support UC access to memory.  If
55 	 * WB is supported for the whole granule, we prefer that.
56 	 */
57 	gran_base = GRANULEROUNDDOWN(phys_addr);
58 	gran_size = GRANULEROUNDUP(phys_addr + size) - gran_base;
59 	if (efi_mem_attribute(gran_base, gran_size) & EFI_MEMORY_WB)
60 		return (void __iomem *) phys_to_virt(phys_addr);
61 
62 	/*
63 	 * WB is not supported for the whole granule, so we can't use
64 	 * the region 7 identity mapping.  If we can safely cover the
65 	 * area with kernel page table mappings, we can use those
66 	 * instead.
67 	 */
68 	page_base = phys_addr & PAGE_MASK;
69 	size = PAGE_ALIGN(phys_addr + size) - page_base;
70 	if (efi_mem_attribute(page_base, size) & EFI_MEMORY_WB) {
71 		prot = PAGE_KERNEL;
72 
73 		/*
74 		 * Mappings have to be page-aligned
75 		 */
76 		offset = phys_addr & ~PAGE_MASK;
77 		phys_addr &= PAGE_MASK;
78 
79 		/*
80 		 * Ok, go for it..
81 		 */
82 		area = get_vm_area(size, VM_IOREMAP);
83 		if (!area)
84 			return NULL;
85 
86 		area->phys_addr = phys_addr;
87 		addr = (void __iomem *) area->addr;
88 		if (ioremap_page_range((unsigned long) addr,
89 				(unsigned long) addr + size, phys_addr, prot)) {
90 			vunmap((void __force *) addr);
91 			return NULL;
92 		}
93 
94 		return (void __iomem *) (offset + (char __iomem *)addr);
95 	}
96 
97 	return __ioremap_uc(phys_addr);
98 }
99 EXPORT_SYMBOL(ioremap);
100 
101 void __iomem *
102 ioremap_nocache (unsigned long phys_addr, unsigned long size)
103 {
104 	if (kern_mem_attribute(phys_addr, size) & EFI_MEMORY_WB)
105 		return NULL;
106 
107 	return __ioremap_uc(phys_addr);
108 }
109 EXPORT_SYMBOL(ioremap_nocache);
110 
111 void
112 early_iounmap (volatile void __iomem *addr, unsigned long size)
113 {
114 }
115 
116 void
117 iounmap (volatile void __iomem *addr)
118 {
119 	if (REGION_NUMBER(addr) == RGN_GATE)
120 		vunmap((void *) ((unsigned long) addr & PAGE_MASK));
121 }
122 EXPORT_SYMBOL(iounmap);
123