1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #include <linux/device.h> 3 #include <linux/types.h> 4 #include <linux/io.h> 5 #include <linux/mm.h> 6 #include <linux/ioremap.h> 7 8 #ifndef ioremap_cache 9 /* temporary while we convert existing ioremap_cache users to memremap */ 10 __weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size) 11 { 12 return ioremap(offset, size); 13 } 14 #endif 15 16 #ifndef arch_memremap_wb 17 static void *arch_memremap_wb(resource_size_t offset, unsigned long size) 18 { 19 return (__force void *)ioremap_cache(offset, size); 20 } 21 #endif 22 23 #ifndef arch_memremap_can_ram_remap 24 static bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size, 25 unsigned long flags) 26 { 27 return true; 28 } 29 #endif 30 31 static void *try_ram_remap(resource_size_t offset, size_t size, 32 unsigned long flags) 33 { 34 unsigned long pfn = PHYS_PFN(offset); 35 36 /* In the simple case just return the existing linear address */ 37 if (pfn_valid(pfn) && !PageHighMem(pfn_to_page(pfn)) && 38 arch_memremap_can_ram_remap(offset, size, flags)) 39 return __va(offset); 40 41 return NULL; /* fallback to arch_memremap_wb */ 42 } 43 44 /** 45 * memremap() - remap an iomem_resource as cacheable memory 46 * @offset: iomem resource start address 47 * @size: size of remap 48 * @flags: any of MEMREMAP_WB, MEMREMAP_WT, MEMREMAP_WC, 49 * MEMREMAP_ENC, MEMREMAP_DEC 50 * 51 * memremap() is "ioremap" for cases where it is known that the resource 52 * being mapped does not have i/o side effects and the __iomem 53 * annotation is not applicable. In the case of multiple flags, the different 54 * mapping types will be attempted in the order listed below until one of 55 * them succeeds. 56 * 57 * MEMREMAP_WB - matches the default mapping for System RAM on 58 * the architecture. This is usually a read-allocate write-back cache. 59 * Moreover, if MEMREMAP_WB is specified and the requested remap region is RAM 60 * memremap() will bypass establishing a new mapping and instead return 61 * a pointer into the direct map. 62 * 63 * MEMREMAP_WT - establish a mapping whereby writes either bypass the 64 * cache or are written through to memory and never exist in a 65 * cache-dirty state with respect to program visibility. Attempts to 66 * map System RAM with this mapping type will fail. 67 * 68 * MEMREMAP_WC - establish a writecombine mapping, whereby writes may 69 * be coalesced together (e.g. in the CPU's write buffers), but is otherwise 70 * uncached. Attempts to map System RAM with this mapping type will fail. 71 */ 72 void *memremap(resource_size_t offset, size_t size, unsigned long flags) 73 { 74 int is_ram = region_intersects(offset, size, 75 IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE); 76 void *addr = NULL; 77 78 if (!flags) 79 return NULL; 80 81 if (is_ram == REGION_MIXED) { 82 WARN_ONCE(1, "memremap attempted on mixed range %pa size: %#lx\n", 83 &offset, (unsigned long) size); 84 return NULL; 85 } 86 87 /* Try all mapping types requested until one returns non-NULL */ 88 if (flags & MEMREMAP_WB) { 89 /* 90 * MEMREMAP_WB is special in that it can be satisfied 91 * from the direct map. Some archs depend on the 92 * capability of memremap() to autodetect cases where 93 * the requested range is potentially in System RAM. 94 */ 95 if (is_ram == REGION_INTERSECTS) 96 addr = try_ram_remap(offset, size, flags); 97 if (!addr) 98 addr = arch_memremap_wb(offset, size); 99 } 100 101 /* 102 * If we don't have a mapping yet and other request flags are 103 * present then we will be attempting to establish a new virtual 104 * address mapping. Enforce that this mapping is not aliasing 105 * System RAM. 106 */ 107 if (!addr && is_ram == REGION_INTERSECTS && flags != MEMREMAP_WB) { 108 WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx\n", 109 &offset, (unsigned long) size); 110 return NULL; 111 } 112 113 if (!addr && (flags & MEMREMAP_WT)) 114 addr = ioremap_wt(offset, size); 115 116 if (!addr && (flags & MEMREMAP_WC)) 117 addr = ioremap_wc(offset, size); 118 119 return addr; 120 } 121 EXPORT_SYMBOL(memremap); 122 123 void memunmap(void *addr) 124 { 125 if (is_ioremap_addr(addr)) 126 iounmap((void __iomem *) addr); 127 } 128 EXPORT_SYMBOL(memunmap); 129 130 static void devm_memremap_release(struct device *dev, void *res) 131 { 132 memunmap(*(void **)res); 133 } 134 135 static int devm_memremap_match(struct device *dev, void *res, void *match_data) 136 { 137 return *(void **)res == match_data; 138 } 139 140 void *devm_memremap(struct device *dev, resource_size_t offset, 141 size_t size, unsigned long flags) 142 { 143 void **ptr, *addr; 144 145 ptr = devres_alloc_node(devm_memremap_release, sizeof(*ptr), GFP_KERNEL, 146 dev_to_node(dev)); 147 if (!ptr) 148 return ERR_PTR(-ENOMEM); 149 150 addr = memremap(offset, size, flags); 151 if (addr) { 152 *ptr = addr; 153 devres_add(dev, ptr); 154 } else { 155 devres_free(ptr); 156 return ERR_PTR(-ENXIO); 157 } 158 159 return addr; 160 } 161 EXPORT_SYMBOL(devm_memremap); 162 163 void devm_memunmap(struct device *dev, void *addr) 164 { 165 WARN_ON(devres_release(dev, devm_memremap_release, 166 devm_memremap_match, addr)); 167 } 168 EXPORT_SYMBOL(devm_memunmap); 169