xref: /openbmc/linux/arch/arm/mach-mvebu/coherency.c (revision b34e08d5)
1 /*
2  * Coherency fabric (Aurora) support for Armada 370 and XP platforms.
3  *
4  * Copyright (C) 2012 Marvell
5  *
6  * Yehuda Yitschak <yehuday@marvell.com>
7  * Gregory Clement <gregory.clement@free-electrons.com>
8  * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
9  *
10  * This file is licensed under the terms of the GNU General Public
11  * License version 2.  This program is licensed "as is" without any
12  * warranty of any kind, whether express or implied.
13  *
14  * The Armada 370 and Armada XP SOCs have a coherency fabric which is
15  * responsible for ensuring hardware coherency between all CPUs and between
16  * CPUs and I/O masters. This file initializes the coherency fabric and
17  * supplies basic routines for configuring and controlling hardware coherency
18  */
19 
20 #include <linux/kernel.h>
21 #include <linux/init.h>
22 #include <linux/of_address.h>
23 #include <linux/io.h>
24 #include <linux/smp.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/platform_device.h>
27 #include <asm/smp_plat.h>
28 #include <asm/cacheflush.h>
29 #include "armada-370-xp.h"
30 #include "coherency.h"
31 
32 unsigned long coherency_phys_base;
33 static void __iomem *coherency_base;
34 static void __iomem *coherency_cpu_base;
35 
36 /* Coherency fabric registers */
37 #define COHERENCY_FABRIC_CFG_OFFSET		   0x4
38 
39 #define IO_SYNC_BARRIER_CTL_OFFSET		   0x0
40 
41 static struct of_device_id of_coherency_table[] = {
42 	{.compatible = "marvell,coherency-fabric"},
43 	{ /* end of list */ },
44 };
45 
46 /* Function defined in coherency_ll.S */
47 int ll_set_cpu_coherent(void __iomem *base_addr, unsigned int hw_cpu_id);
48 
49 int set_cpu_coherent(unsigned int hw_cpu_id, int smp_group_id)
50 {
51 	if (!coherency_base) {
52 		pr_warn("Can't make CPU %d cache coherent.\n", hw_cpu_id);
53 		pr_warn("Coherency fabric is not initialized\n");
54 		return 1;
55 	}
56 
57 	return ll_set_cpu_coherent(coherency_base, hw_cpu_id);
58 }
59 
60 static inline void mvebu_hwcc_sync_io_barrier(void)
61 {
62 	writel(0x1, coherency_cpu_base + IO_SYNC_BARRIER_CTL_OFFSET);
63 	while (readl(coherency_cpu_base + IO_SYNC_BARRIER_CTL_OFFSET) & 0x1);
64 }
65 
66 static dma_addr_t mvebu_hwcc_dma_map_page(struct device *dev, struct page *page,
67 				  unsigned long offset, size_t size,
68 				  enum dma_data_direction dir,
69 				  struct dma_attrs *attrs)
70 {
71 	if (dir != DMA_TO_DEVICE)
72 		mvebu_hwcc_sync_io_barrier();
73 	return pfn_to_dma(dev, page_to_pfn(page)) + offset;
74 }
75 
76 
77 static void mvebu_hwcc_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
78 			      size_t size, enum dma_data_direction dir,
79 			      struct dma_attrs *attrs)
80 {
81 	if (dir != DMA_TO_DEVICE)
82 		mvebu_hwcc_sync_io_barrier();
83 }
84 
85 static void mvebu_hwcc_dma_sync(struct device *dev, dma_addr_t dma_handle,
86 			size_t size, enum dma_data_direction dir)
87 {
88 	if (dir != DMA_TO_DEVICE)
89 		mvebu_hwcc_sync_io_barrier();
90 }
91 
92 static struct dma_map_ops mvebu_hwcc_dma_ops = {
93 	.alloc			= arm_dma_alloc,
94 	.free			= arm_dma_free,
95 	.mmap			= arm_dma_mmap,
96 	.map_page		= mvebu_hwcc_dma_map_page,
97 	.unmap_page		= mvebu_hwcc_dma_unmap_page,
98 	.get_sgtable		= arm_dma_get_sgtable,
99 	.map_sg			= arm_dma_map_sg,
100 	.unmap_sg		= arm_dma_unmap_sg,
101 	.sync_single_for_cpu	= mvebu_hwcc_dma_sync,
102 	.sync_single_for_device	= mvebu_hwcc_dma_sync,
103 	.sync_sg_for_cpu	= arm_dma_sync_sg_for_cpu,
104 	.sync_sg_for_device	= arm_dma_sync_sg_for_device,
105 	.set_dma_mask		= arm_dma_set_mask,
106 };
107 
108 static int mvebu_hwcc_platform_notifier(struct notifier_block *nb,
109 				       unsigned long event, void *__dev)
110 {
111 	struct device *dev = __dev;
112 
113 	if (event != BUS_NOTIFY_ADD_DEVICE)
114 		return NOTIFY_DONE;
115 	set_dma_ops(dev, &mvebu_hwcc_dma_ops);
116 
117 	return NOTIFY_OK;
118 }
119 
120 static struct notifier_block mvebu_hwcc_platform_nb = {
121 	.notifier_call = mvebu_hwcc_platform_notifier,
122 };
123 
124 int __init coherency_init(void)
125 {
126 	struct device_node *np;
127 
128 	np = of_find_matching_node(NULL, of_coherency_table);
129 	if (np) {
130 		struct resource res;
131 		pr_info("Initializing Coherency fabric\n");
132 		of_address_to_resource(np, 0, &res);
133 		coherency_phys_base = res.start;
134 		/*
135 		 * Ensure secondary CPUs will see the updated value,
136 		 * which they read before they join the coherency
137 		 * fabric, and therefore before they are coherent with
138 		 * the boot CPU cache.
139 		 */
140 		sync_cache_w(&coherency_phys_base);
141 		coherency_base = of_iomap(np, 0);
142 		coherency_cpu_base = of_iomap(np, 1);
143 		set_cpu_coherent(cpu_logical_map(smp_processor_id()), 0);
144 		of_node_put(np);
145 	}
146 
147 	return 0;
148 }
149 
150 static int __init coherency_late_init(void)
151 {
152 	struct device_node *np;
153 
154 	np = of_find_matching_node(NULL, of_coherency_table);
155 	if (np) {
156 		bus_register_notifier(&platform_bus_type,
157 				      &mvebu_hwcc_platform_nb);
158 		of_node_put(np);
159 	}
160 	return 0;
161 }
162 
163 postcore_initcall(coherency_late_init);
164