1 /* 2 * Coherency fabric (Aurora) support for Armada 370, 375, 38x and XP 3 * platforms. 4 * 5 * Copyright (C) 2012 Marvell 6 * 7 * Yehuda Yitschak <yehuday@marvell.com> 8 * Gregory Clement <gregory.clement@free-electrons.com> 9 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> 10 * 11 * This file is licensed under the terms of the GNU General Public 12 * License version 2. This program is licensed "as is" without any 13 * warranty of any kind, whether express or implied. 14 * 15 * The Armada 370, 375, 38x and XP SOCs have a coherency fabric which is 16 * responsible for ensuring hardware coherency between all CPUs and between 17 * CPUs and I/O masters. This file initializes the coherency fabric and 18 * supplies basic routines for configuring and controlling hardware coherency 19 */ 20 21 #define pr_fmt(fmt) "mvebu-coherency: " fmt 22 23 #include <linux/kernel.h> 24 #include <linux/init.h> 25 #include <linux/of_address.h> 26 #include <linux/io.h> 27 #include <linux/smp.h> 28 #include <linux/dma-mapping.h> 29 #include <linux/platform_device.h> 30 #include <linux/slab.h> 31 #include <linux/mbus.h> 32 #include <linux/pci.h> 33 #include <asm/smp_plat.h> 34 #include <asm/cacheflush.h> 35 #include <asm/mach/map.h> 36 #include "coherency.h" 37 #include "mvebu-soc-id.h" 38 39 unsigned long coherency_phys_base; 40 void __iomem *coherency_base; 41 static void __iomem *coherency_cpu_base; 42 43 /* Coherency fabric registers */ 44 #define IO_SYNC_BARRIER_CTL_OFFSET 0x0 45 46 enum { 47 COHERENCY_FABRIC_TYPE_NONE, 48 COHERENCY_FABRIC_TYPE_ARMADA_370_XP, 49 COHERENCY_FABRIC_TYPE_ARMADA_375, 50 COHERENCY_FABRIC_TYPE_ARMADA_380, 51 }; 52 53 static struct of_device_id of_coherency_table[] = { 54 {.compatible = "marvell,coherency-fabric", 55 .data = (void *) COHERENCY_FABRIC_TYPE_ARMADA_370_XP }, 56 {.compatible = "marvell,armada-375-coherency-fabric", 57 .data = (void *) COHERENCY_FABRIC_TYPE_ARMADA_375 }, 58 {.compatible = "marvell,armada-380-coherency-fabric", 59 .data = (void *) COHERENCY_FABRIC_TYPE_ARMADA_380 }, 60 { /* end of list */ }, 61 }; 62 63 /* Functions defined in coherency_ll.S */ 64 int ll_enable_coherency(void); 65 void ll_add_cpu_to_smp_group(void); 66 67 int set_cpu_coherent(void) 68 { 69 if (!coherency_base) { 70 pr_warn("Can't make current CPU cache coherent.\n"); 71 pr_warn("Coherency fabric is not initialized\n"); 72 return 1; 73 } 74 75 ll_add_cpu_to_smp_group(); 76 return ll_enable_coherency(); 77 } 78 79 static inline void mvebu_hwcc_sync_io_barrier(void) 80 { 81 writel(0x1, coherency_cpu_base + IO_SYNC_BARRIER_CTL_OFFSET); 82 while (readl(coherency_cpu_base + IO_SYNC_BARRIER_CTL_OFFSET) & 0x1); 83 } 84 85 static dma_addr_t mvebu_hwcc_dma_map_page(struct device *dev, struct page *page, 86 unsigned long offset, size_t size, 87 enum dma_data_direction dir, 88 struct dma_attrs *attrs) 89 { 90 if (dir != DMA_TO_DEVICE) 91 mvebu_hwcc_sync_io_barrier(); 92 return pfn_to_dma(dev, page_to_pfn(page)) + offset; 93 } 94 95 96 static void mvebu_hwcc_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, 97 size_t size, enum dma_data_direction dir, 98 struct dma_attrs *attrs) 99 { 100 if (dir != DMA_TO_DEVICE) 101 mvebu_hwcc_sync_io_barrier(); 102 } 103 104 static void mvebu_hwcc_dma_sync(struct device *dev, dma_addr_t dma_handle, 105 size_t size, enum dma_data_direction dir) 106 { 107 if (dir != DMA_TO_DEVICE) 108 mvebu_hwcc_sync_io_barrier(); 109 } 110 111 static struct dma_map_ops mvebu_hwcc_dma_ops = { 112 .alloc = arm_dma_alloc, 113 .free = arm_dma_free, 114 .mmap = arm_dma_mmap, 115 .map_page = mvebu_hwcc_dma_map_page, 116 .unmap_page = mvebu_hwcc_dma_unmap_page, 117 .get_sgtable = arm_dma_get_sgtable, 118 .map_sg = arm_dma_map_sg, 119 .unmap_sg = arm_dma_unmap_sg, 120 .sync_single_for_cpu = mvebu_hwcc_dma_sync, 121 .sync_single_for_device = mvebu_hwcc_dma_sync, 122 .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu, 123 .sync_sg_for_device = arm_dma_sync_sg_for_device, 124 .set_dma_mask = arm_dma_set_mask, 125 }; 126 127 static int mvebu_hwcc_notifier(struct notifier_block *nb, 128 unsigned long event, void *__dev) 129 { 130 struct device *dev = __dev; 131 132 if (event != BUS_NOTIFY_ADD_DEVICE) 133 return NOTIFY_DONE; 134 set_dma_ops(dev, &mvebu_hwcc_dma_ops); 135 136 return NOTIFY_OK; 137 } 138 139 static struct notifier_block mvebu_hwcc_nb = { 140 .notifier_call = mvebu_hwcc_notifier, 141 }; 142 143 static struct notifier_block mvebu_hwcc_pci_nb = { 144 .notifier_call = mvebu_hwcc_notifier, 145 }; 146 147 static void __init armada_370_coherency_init(struct device_node *np) 148 { 149 struct resource res; 150 151 of_address_to_resource(np, 0, &res); 152 coherency_phys_base = res.start; 153 /* 154 * Ensure secondary CPUs will see the updated value, 155 * which they read before they join the coherency 156 * fabric, and therefore before they are coherent with 157 * the boot CPU cache. 158 */ 159 sync_cache_w(&coherency_phys_base); 160 coherency_base = of_iomap(np, 0); 161 coherency_cpu_base = of_iomap(np, 1); 162 set_cpu_coherent(); 163 } 164 165 /* 166 * This ioremap hook is used on Armada 375/38x to ensure that PCIe 167 * memory areas are mapped as MT_UNCACHED instead of MT_DEVICE. This 168 * is needed as a workaround for a deadlock issue between the PCIe 169 * interface and the cache controller. 170 */ 171 static void __iomem * 172 armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size, 173 unsigned int mtype, void *caller) 174 { 175 struct resource pcie_mem; 176 177 mvebu_mbus_get_pcie_mem_aperture(&pcie_mem); 178 179 if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end) 180 mtype = MT_UNCACHED; 181 182 return __arm_ioremap_caller(phys_addr, size, mtype, caller); 183 } 184 185 static void __init armada_375_380_coherency_init(struct device_node *np) 186 { 187 struct device_node *cache_dn; 188 189 coherency_cpu_base = of_iomap(np, 0); 190 arch_ioremap_caller = armada_pcie_wa_ioremap_caller; 191 192 /* 193 * We should switch the PL310 to I/O coherency mode only if 194 * I/O coherency is actually enabled. 195 */ 196 if (!coherency_available()) 197 return; 198 199 /* 200 * Add the PL310 property "arm,io-coherent". This makes sure the 201 * outer sync operation is not used, which allows to 202 * workaround the system erratum that causes deadlocks when 203 * doing PCIe in an SMP situation on Armada 375 and Armada 204 * 38x. 205 */ 206 for_each_compatible_node(cache_dn, NULL, "arm,pl310-cache") { 207 struct property *p; 208 209 p = kzalloc(sizeof(*p), GFP_KERNEL); 210 p->name = kstrdup("arm,io-coherent", GFP_KERNEL); 211 of_add_property(cache_dn, p); 212 } 213 } 214 215 static int coherency_type(void) 216 { 217 struct device_node *np; 218 const struct of_device_id *match; 219 int type; 220 221 /* 222 * The coherency fabric is needed: 223 * - For coherency between processors on Armada XP, so only 224 * when SMP is enabled. 225 * - For coherency between the processor and I/O devices, but 226 * this coherency requires many pre-requisites (write 227 * allocate cache policy, shareable pages, SMP bit set) that 228 * are only meant in SMP situations. 229 * 230 * Note that this means that on Armada 370, there is currently 231 * no way to use hardware I/O coherency, because even when 232 * CONFIG_SMP is enabled, is_smp() returns false due to the 233 * Armada 370 being a single-core processor. To lift this 234 * limitation, we would have to find a way to make the cache 235 * policy set to write-allocate (on all Armada SoCs), and to 236 * set the shareable attribute in page tables (on all Armada 237 * SoCs except the Armada 370). Unfortunately, such decisions 238 * are taken very early in the kernel boot process, at a point 239 * where we don't know yet on which SoC we are running. 240 241 */ 242 if (!is_smp()) 243 return COHERENCY_FABRIC_TYPE_NONE; 244 245 np = of_find_matching_node_and_match(NULL, of_coherency_table, &match); 246 if (!np) 247 return COHERENCY_FABRIC_TYPE_NONE; 248 249 type = (int) match->data; 250 251 of_node_put(np); 252 253 return type; 254 } 255 256 /* 257 * As a precaution, we currently completely disable hardware I/O 258 * coherency, until enough testing is done with automatic I/O 259 * synchronization barriers to validate that it is a proper solution. 260 */ 261 int coherency_available(void) 262 { 263 return false; 264 } 265 266 int __init coherency_init(void) 267 { 268 int type = coherency_type(); 269 struct device_node *np; 270 271 np = of_find_matching_node(NULL, of_coherency_table); 272 273 if (type == COHERENCY_FABRIC_TYPE_ARMADA_370_XP) 274 armada_370_coherency_init(np); 275 else if (type == COHERENCY_FABRIC_TYPE_ARMADA_375 || 276 type == COHERENCY_FABRIC_TYPE_ARMADA_380) 277 armada_375_380_coherency_init(np); 278 279 of_node_put(np); 280 281 return 0; 282 } 283 284 static int __init coherency_late_init(void) 285 { 286 if (coherency_available()) 287 bus_register_notifier(&platform_bus_type, 288 &mvebu_hwcc_nb); 289 return 0; 290 } 291 292 postcore_initcall(coherency_late_init); 293 294 #if IS_ENABLED(CONFIG_PCI) 295 static int __init coherency_pci_init(void) 296 { 297 if (coherency_available()) 298 bus_register_notifier(&pci_bus_type, 299 &mvebu_hwcc_pci_nb); 300 return 0; 301 } 302 303 arch_initcall(coherency_pci_init); 304 #endif 305