xref: /openbmc/linux/arch/ia64/mm/ioremap.c (revision 1edd0337)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * (c) Copyright 2006, 2007 Hewlett-Packard Development Company, L.P.
4  *	Bjorn Helgaas <bjorn.helgaas@hp.com>
5  */
6 
7 #include <linux/compiler.h>
8 #include <linux/module.h>
9 #include <linux/efi.h>
10 #include <linux/io.h>
11 #include <linux/mm.h>
12 #include <linux/vmalloc.h>
13 #include <asm/io.h>
14 #include <asm/meminit.h>
15 
16 static inline void __iomem *
17 __ioremap_uc(unsigned long phys_addr)
18 {
19 	return (void __iomem *) (__IA64_UNCACHED_OFFSET | phys_addr);
20 }
21 
22 void __iomem *
23 early_ioremap (unsigned long phys_addr, unsigned long size)
24 {
25 	u64 attr;
26 	attr = kern_mem_attribute(phys_addr, size);
27 	if (attr & EFI_MEMORY_WB)
28 		return (void __iomem *) phys_to_virt(phys_addr);
29 	return __ioremap_uc(phys_addr);
30 }
31 
32 void __iomem *
33 ioremap (unsigned long phys_addr, unsigned long size)
34 {
35 	void __iomem *addr;
36 	struct vm_struct *area;
37 	unsigned long offset;
38 	pgprot_t prot;
39 	u64 attr;
40 	unsigned long gran_base, gran_size;
41 	unsigned long page_base;
42 
43 	/*
44 	 * For things in kern_memmap, we must use the same attribute
45 	 * as the rest of the kernel.  For more details, see
46 	 * Documentation/ia64/aliasing.rst.
47 	 */
48 	attr = kern_mem_attribute(phys_addr, size);
49 	if (attr & EFI_MEMORY_WB)
50 		return (void __iomem *) phys_to_virt(phys_addr);
51 	else if (attr & EFI_MEMORY_UC)
52 		return __ioremap_uc(phys_addr);
53 
54 	/*
55 	 * Some chipsets don't support UC access to memory.  If
56 	 * WB is supported for the whole granule, we prefer that.
57 	 */
58 	gran_base = GRANULEROUNDDOWN(phys_addr);
59 	gran_size = GRANULEROUNDUP(phys_addr + size) - gran_base;
60 	if (efi_mem_attribute(gran_base, gran_size) & EFI_MEMORY_WB)
61 		return (void __iomem *) phys_to_virt(phys_addr);
62 
63 	/*
64 	 * WB is not supported for the whole granule, so we can't use
65 	 * the region 7 identity mapping.  If we can safely cover the
66 	 * area with kernel page table mappings, we can use those
67 	 * instead.
68 	 */
69 	page_base = phys_addr & PAGE_MASK;
70 	size = PAGE_ALIGN(phys_addr + size) - page_base;
71 	if (efi_mem_attribute(page_base, size) & EFI_MEMORY_WB) {
72 		prot = PAGE_KERNEL;
73 
74 		/*
75 		 * Mappings have to be page-aligned
76 		 */
77 		offset = phys_addr & ~PAGE_MASK;
78 		phys_addr &= PAGE_MASK;
79 
80 		/*
81 		 * Ok, go for it..
82 		 */
83 		area = get_vm_area(size, VM_IOREMAP);
84 		if (!area)
85 			return NULL;
86 
87 		area->phys_addr = phys_addr;
88 		addr = (void __iomem *) area->addr;
89 		if (ioremap_page_range((unsigned long) addr,
90 				(unsigned long) addr + size, phys_addr, prot)) {
91 			vunmap((void __force *) addr);
92 			return NULL;
93 		}
94 
95 		return (void __iomem *) (offset + (char __iomem *)addr);
96 	}
97 
98 	return __ioremap_uc(phys_addr);
99 }
100 EXPORT_SYMBOL(ioremap);
101 
102 void __iomem *
103 ioremap_uc(unsigned long phys_addr, unsigned long size)
104 {
105 	if (kern_mem_attribute(phys_addr, size) & EFI_MEMORY_WB)
106 		return NULL;
107 
108 	return __ioremap_uc(phys_addr);
109 }
110 EXPORT_SYMBOL(ioremap_uc);
111 
112 void
113 early_iounmap (volatile void __iomem *addr, unsigned long size)
114 {
115 }
116 
117 void
118 iounmap (volatile void __iomem *addr)
119 {
120 	if (REGION_NUMBER(addr) == RGN_GATE)
121 		vunmap((void *) ((unsigned long) addr & PAGE_MASK));
122 }
123 EXPORT_SYMBOL(iounmap);
124