xref: /openbmc/linux/arch/arm/mach-mvebu/coherency.c (revision 316fbbc4)
1 /*
2  * Coherency fabric (Aurora) support for Armada 370, 375, 38x and XP
3  * platforms.
4  *
5  * Copyright (C) 2012 Marvell
6  *
7  * Yehuda Yitschak <yehuday@marvell.com>
8  * Gregory Clement <gregory.clement@free-electrons.com>
9  * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
10  *
11  * This file is licensed under the terms of the GNU General Public
12  * License version 2.  This program is licensed "as is" without any
13  * warranty of any kind, whether express or implied.
14  *
15  * The Armada 370, 375, 38x and XP SOCs have a coherency fabric which is
16  * responsible for ensuring hardware coherency between all CPUs and between
17  * CPUs and I/O masters. This file initializes the coherency fabric and
18  * supplies basic routines for configuring and controlling hardware coherency
19  */
20 
21 #define pr_fmt(fmt) "mvebu-coherency: " fmt
22 
23 #include <linux/kernel.h>
24 #include <linux/init.h>
25 #include <linux/of_address.h>
26 #include <linux/io.h>
27 #include <linux/smp.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/platform_device.h>
30 #include <linux/slab.h>
31 #include <linux/mbus.h>
32 #include <linux/pci.h>
33 #include <asm/smp_plat.h>
34 #include <asm/cacheflush.h>
35 #include <asm/mach/map.h>
36 #include "coherency.h"
37 #include "mvebu-soc-id.h"
38 
39 unsigned long coherency_phys_base;
40 void __iomem *coherency_base;
41 static void __iomem *coherency_cpu_base;
42 
43 /* Coherency fabric registers */
44 #define IO_SYNC_BARRIER_CTL_OFFSET		   0x0
45 
46 enum {
47 	COHERENCY_FABRIC_TYPE_NONE,
48 	COHERENCY_FABRIC_TYPE_ARMADA_370_XP,
49 	COHERENCY_FABRIC_TYPE_ARMADA_375,
50 	COHERENCY_FABRIC_TYPE_ARMADA_380,
51 };
52 
53 static struct of_device_id of_coherency_table[] = {
54 	{.compatible = "marvell,coherency-fabric",
55 	 .data = (void *) COHERENCY_FABRIC_TYPE_ARMADA_370_XP },
56 	{.compatible = "marvell,armada-375-coherency-fabric",
57 	 .data = (void *) COHERENCY_FABRIC_TYPE_ARMADA_375 },
58 	{.compatible = "marvell,armada-380-coherency-fabric",
59 	 .data = (void *) COHERENCY_FABRIC_TYPE_ARMADA_380 },
60 	{ /* end of list */ },
61 };
62 
63 /* Functions defined in coherency_ll.S */
64 int ll_enable_coherency(void);
65 void ll_add_cpu_to_smp_group(void);
66 
67 int set_cpu_coherent(void)
68 {
69 	if (!coherency_base) {
70 		pr_warn("Can't make current CPU cache coherent.\n");
71 		pr_warn("Coherency fabric is not initialized\n");
72 		return 1;
73 	}
74 
75 	ll_add_cpu_to_smp_group();
76 	return ll_enable_coherency();
77 }
78 
79 static inline void mvebu_hwcc_sync_io_barrier(void)
80 {
81 	writel(0x1, coherency_cpu_base + IO_SYNC_BARRIER_CTL_OFFSET);
82 	while (readl(coherency_cpu_base + IO_SYNC_BARRIER_CTL_OFFSET) & 0x1);
83 }
84 
85 static dma_addr_t mvebu_hwcc_dma_map_page(struct device *dev, struct page *page,
86 				  unsigned long offset, size_t size,
87 				  enum dma_data_direction dir,
88 				  struct dma_attrs *attrs)
89 {
90 	if (dir != DMA_TO_DEVICE)
91 		mvebu_hwcc_sync_io_barrier();
92 	return pfn_to_dma(dev, page_to_pfn(page)) + offset;
93 }
94 
95 
96 static void mvebu_hwcc_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
97 			      size_t size, enum dma_data_direction dir,
98 			      struct dma_attrs *attrs)
99 {
100 	if (dir != DMA_TO_DEVICE)
101 		mvebu_hwcc_sync_io_barrier();
102 }
103 
104 static void mvebu_hwcc_dma_sync(struct device *dev, dma_addr_t dma_handle,
105 			size_t size, enum dma_data_direction dir)
106 {
107 	if (dir != DMA_TO_DEVICE)
108 		mvebu_hwcc_sync_io_barrier();
109 }
110 
111 static struct dma_map_ops mvebu_hwcc_dma_ops = {
112 	.alloc			= arm_dma_alloc,
113 	.free			= arm_dma_free,
114 	.mmap			= arm_dma_mmap,
115 	.map_page		= mvebu_hwcc_dma_map_page,
116 	.unmap_page		= mvebu_hwcc_dma_unmap_page,
117 	.get_sgtable		= arm_dma_get_sgtable,
118 	.map_sg			= arm_dma_map_sg,
119 	.unmap_sg		= arm_dma_unmap_sg,
120 	.sync_single_for_cpu	= mvebu_hwcc_dma_sync,
121 	.sync_single_for_device	= mvebu_hwcc_dma_sync,
122 	.sync_sg_for_cpu	= arm_dma_sync_sg_for_cpu,
123 	.sync_sg_for_device	= arm_dma_sync_sg_for_device,
124 	.set_dma_mask		= arm_dma_set_mask,
125 };
126 
127 static int mvebu_hwcc_notifier(struct notifier_block *nb,
128 			       unsigned long event, void *__dev)
129 {
130 	struct device *dev = __dev;
131 
132 	if (event != BUS_NOTIFY_ADD_DEVICE)
133 		return NOTIFY_DONE;
134 	set_dma_ops(dev, &mvebu_hwcc_dma_ops);
135 
136 	return NOTIFY_OK;
137 }
138 
139 static struct notifier_block mvebu_hwcc_nb = {
140 	.notifier_call = mvebu_hwcc_notifier,
141 };
142 
143 static struct notifier_block mvebu_hwcc_pci_nb = {
144 	.notifier_call = mvebu_hwcc_notifier,
145 };
146 
147 static void __init armada_370_coherency_init(struct device_node *np)
148 {
149 	struct resource res;
150 
151 	of_address_to_resource(np, 0, &res);
152 	coherency_phys_base = res.start;
153 	/*
154 	 * Ensure secondary CPUs will see the updated value,
155 	 * which they read before they join the coherency
156 	 * fabric, and therefore before they are coherent with
157 	 * the boot CPU cache.
158 	 */
159 	sync_cache_w(&coherency_phys_base);
160 	coherency_base = of_iomap(np, 0);
161 	coherency_cpu_base = of_iomap(np, 1);
162 	set_cpu_coherent();
163 }
164 
165 /*
166  * This ioremap hook is used on Armada 375/38x to ensure that PCIe
167  * memory areas are mapped as MT_UNCACHED instead of MT_DEVICE. This
168  * is needed as a workaround for a deadlock issue between the PCIe
169  * interface and the cache controller.
170  */
171 static void __iomem *
172 armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
173 			      unsigned int mtype, void *caller)
174 {
175 	struct resource pcie_mem;
176 
177 	mvebu_mbus_get_pcie_mem_aperture(&pcie_mem);
178 
179 	if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end)
180 		mtype = MT_UNCACHED;
181 
182 	return __arm_ioremap_caller(phys_addr, size, mtype, caller);
183 }
184 
185 static void __init armada_375_380_coherency_init(struct device_node *np)
186 {
187 	struct device_node *cache_dn;
188 
189 	coherency_cpu_base = of_iomap(np, 0);
190 	arch_ioremap_caller = armada_pcie_wa_ioremap_caller;
191 
192 	/*
193 	 * Add the PL310 property "arm,io-coherent". This makes sure the
194 	 * outer sync operation is not used, which allows to
195 	 * workaround the system erratum that causes deadlocks when
196 	 * doing PCIe in an SMP situation on Armada 375 and Armada
197 	 * 38x.
198 	 */
199 	for_each_compatible_node(cache_dn, NULL, "arm,pl310-cache") {
200 		struct property *p;
201 
202 		p = kzalloc(sizeof(*p), GFP_KERNEL);
203 		p->name = kstrdup("arm,io-coherent", GFP_KERNEL);
204 		of_add_property(cache_dn, p);
205 	}
206 }
207 
208 static int coherency_type(void)
209 {
210 	struct device_node *np;
211 	const struct of_device_id *match;
212 	int type;
213 
214 	/*
215 	 * The coherency fabric is needed:
216 	 * - For coherency between processors on Armada XP, so only
217 	 *   when SMP is enabled.
218 	 * - For coherency between the processor and I/O devices, but
219 	 *   this coherency requires many pre-requisites (write
220 	 *   allocate cache policy, shareable pages, SMP bit set) that
221 	 *   are only meant in SMP situations.
222 	 *
223 	 * Note that this means that on Armada 370, there is currently
224 	 * no way to use hardware I/O coherency, because even when
225 	 * CONFIG_SMP is enabled, is_smp() returns false due to the
226 	 * Armada 370 being a single-core processor. To lift this
227 	 * limitation, we would have to find a way to make the cache
228 	 * policy set to write-allocate (on all Armada SoCs), and to
229 	 * set the shareable attribute in page tables (on all Armada
230 	 * SoCs except the Armada 370). Unfortunately, such decisions
231 	 * are taken very early in the kernel boot process, at a point
232 	 * where we don't know yet on which SoC we are running.
233 
234 	 */
235 	if (!is_smp())
236 		return COHERENCY_FABRIC_TYPE_NONE;
237 
238 	np = of_find_matching_node_and_match(NULL, of_coherency_table, &match);
239 	if (!np)
240 		return COHERENCY_FABRIC_TYPE_NONE;
241 
242 	type = (int) match->data;
243 
244 	of_node_put(np);
245 
246 	return type;
247 }
248 
249 int coherency_available(void)
250 {
251 	return coherency_type() != COHERENCY_FABRIC_TYPE_NONE;
252 }
253 
254 int __init coherency_init(void)
255 {
256 	int type = coherency_type();
257 	struct device_node *np;
258 
259 	np = of_find_matching_node(NULL, of_coherency_table);
260 
261 	if (type == COHERENCY_FABRIC_TYPE_ARMADA_370_XP)
262 		armada_370_coherency_init(np);
263 	else if (type == COHERENCY_FABRIC_TYPE_ARMADA_375 ||
264 		 type == COHERENCY_FABRIC_TYPE_ARMADA_380)
265 		armada_375_380_coherency_init(np);
266 
267 	of_node_put(np);
268 
269 	return 0;
270 }
271 
272 static int __init coherency_late_init(void)
273 {
274 	if (coherency_available())
275 		bus_register_notifier(&platform_bus_type,
276 				      &mvebu_hwcc_nb);
277 	return 0;
278 }
279 
280 postcore_initcall(coherency_late_init);
281 
282 #if IS_ENABLED(CONFIG_PCI)
283 static int __init coherency_pci_init(void)
284 {
285 	if (coherency_available())
286 		bus_register_notifier(&pci_bus_type,
287 				       &mvebu_hwcc_pci_nb);
288 	return 0;
289 }
290 
291 arch_initcall(coherency_pci_init);
292 #endif
293