1 /* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
2  *
3  * Redistribution and use in source and binary forms, with or without
4  * modification, are permitted provided that the following conditions are met:
5  *     * Redistributions of source code must retain the above copyright
6  *	 notice, this list of conditions and the following disclaimer.
7  *     * Redistributions in binary form must reproduce the above copyright
8  *	 notice, this list of conditions and the following disclaimer in the
9  *	 documentation and/or other materials provided with the distribution.
10  *     * Neither the name of Freescale Semiconductor nor the
11  *	 names of its contributors may be used to endorse or promote products
12  *	 derived from this software without specific prior written permission.
13  *
14  * ALTERNATIVELY, this software may be distributed under the terms of the
15  * GNU General Public License ("GPL") as published by the Free Software
16  * Foundation, either version 2 of that License or (at your option) any
17  * later version.
18  *
19  * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
20  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22  * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
23  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include "qman_priv.h"
32 
33 struct qman_portal *qman_dma_portal;
34 EXPORT_SYMBOL(qman_dma_portal);
35 
36 /* Enable portal interupts (as opposed to polling mode) */
37 #define CONFIG_FSL_DPA_PIRQ_SLOW  1
38 #define CONFIG_FSL_DPA_PIRQ_FAST  1
39 
40 static struct cpumask portal_cpus;
41 /* protect qman global registers and global data shared among portals */
42 static DEFINE_SPINLOCK(qman_lock);
43 
44 static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu)
45 {
46 #ifdef CONFIG_FSL_PAMU
47 	struct device *dev = pcfg->dev;
48 	int window_count = 1;
49 	struct iommu_domain_geometry geom_attr;
50 	struct pamu_stash_attribute stash_attr;
51 	int ret;
52 
53 	pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type);
54 	if (!pcfg->iommu_domain) {
55 		dev_err(dev, "%s(): iommu_domain_alloc() failed", __func__);
56 		goto no_iommu;
57 	}
58 	geom_attr.aperture_start = 0;
59 	geom_attr.aperture_end =
60 		((dma_addr_t)1 << min(8 * sizeof(dma_addr_t), (size_t)36)) - 1;
61 	geom_attr.force_aperture = true;
62 	ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_GEOMETRY,
63 				    &geom_attr);
64 	if (ret < 0) {
65 		dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
66 			ret);
67 		goto out_domain_free;
68 	}
69 	ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_WINDOWS,
70 				    &window_count);
71 	if (ret < 0) {
72 		dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
73 			ret);
74 		goto out_domain_free;
75 	}
76 	stash_attr.cpu = cpu;
77 	stash_attr.cache = PAMU_ATTR_CACHE_L1;
78 	ret = iommu_domain_set_attr(pcfg->iommu_domain,
79 				    DOMAIN_ATTR_FSL_PAMU_STASH,
80 				    &stash_attr);
81 	if (ret < 0) {
82 		dev_err(dev, "%s(): iommu_domain_set_attr() = %d",
83 			__func__, ret);
84 		goto out_domain_free;
85 	}
86 	ret = iommu_domain_window_enable(pcfg->iommu_domain, 0, 0, 1ULL << 36,
87 					 IOMMU_READ | IOMMU_WRITE);
88 	if (ret < 0) {
89 		dev_err(dev, "%s(): iommu_domain_window_enable() = %d",
90 			__func__, ret);
91 		goto out_domain_free;
92 	}
93 	ret = iommu_attach_device(pcfg->iommu_domain, dev);
94 	if (ret < 0) {
95 		dev_err(dev, "%s(): iommu_device_attach() = %d", __func__,
96 			ret);
97 		goto out_domain_free;
98 	}
99 	ret = iommu_domain_set_attr(pcfg->iommu_domain,
100 				    DOMAIN_ATTR_FSL_PAMU_ENABLE,
101 				    &window_count);
102 	if (ret < 0) {
103 		dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
104 			ret);
105 		goto out_detach_device;
106 	}
107 
108 no_iommu:
109 #endif
110 	qman_set_sdest(pcfg->channel, cpu);
111 
112 	return;
113 
114 #ifdef CONFIG_FSL_PAMU
115 out_detach_device:
116 	iommu_detach_device(pcfg->iommu_domain, NULL);
117 out_domain_free:
118 	iommu_domain_free(pcfg->iommu_domain);
119 	pcfg->iommu_domain = NULL;
120 #endif
121 }
122 
123 static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg)
124 {
125 	struct qman_portal *p;
126 	u32 irq_sources = 0;
127 
128 	/* We need the same LIODN offset for all portals */
129 	qman_liodn_fixup(pcfg->channel);
130 
131 	pcfg->iommu_domain = NULL;
132 	portal_set_cpu(pcfg, pcfg->cpu);
133 
134 	p = qman_create_affine_portal(pcfg, NULL);
135 	if (!p) {
136 		dev_crit(pcfg->dev, "%s: Portal failure on cpu %d\n",
137 			 __func__, pcfg->cpu);
138 		return NULL;
139 	}
140 
141 	/* Determine what should be interrupt-vs-poll driven */
142 #ifdef CONFIG_FSL_DPA_PIRQ_SLOW
143 	irq_sources |= QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI |
144 		       QM_PIRQ_CSCI;
145 #endif
146 #ifdef CONFIG_FSL_DPA_PIRQ_FAST
147 	irq_sources |= QM_PIRQ_DQRI;
148 #endif
149 	qman_p_irqsource_add(p, irq_sources);
150 
151 	spin_lock(&qman_lock);
152 	if (cpumask_equal(&portal_cpus, cpu_possible_mask)) {
153 		/* all assigned portals are initialized now */
154 		qman_init_cgr_all();
155 	}
156 
157 	if (!qman_dma_portal)
158 		qman_dma_portal = p;
159 
160 	spin_unlock(&qman_lock);
161 
162 	dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu);
163 
164 	return p;
165 }
166 
167 static void qman_portal_update_sdest(const struct qm_portal_config *pcfg,
168 							unsigned int cpu)
169 {
170 #ifdef CONFIG_FSL_PAMU /* TODO */
171 	struct pamu_stash_attribute stash_attr;
172 	int ret;
173 
174 	if (pcfg->iommu_domain) {
175 		stash_attr.cpu = cpu;
176 		stash_attr.cache = PAMU_ATTR_CACHE_L1;
177 		ret = iommu_domain_set_attr(pcfg->iommu_domain,
178 				DOMAIN_ATTR_FSL_PAMU_STASH, &stash_attr);
179 		if (ret < 0) {
180 			dev_err(pcfg->dev,
181 				"Failed to update pamu stash setting\n");
182 			return;
183 		}
184 	}
185 #endif
186 	qman_set_sdest(pcfg->channel, cpu);
187 }
188 
189 static int qman_offline_cpu(unsigned int cpu)
190 {
191 	struct qman_portal *p;
192 	const struct qm_portal_config *pcfg;
193 
194 	p = affine_portals[cpu];
195 	if (p) {
196 		pcfg = qman_get_qm_portal_config(p);
197 		if (pcfg) {
198 			/* select any other online CPU */
199 			cpu = cpumask_any_but(cpu_online_mask, cpu);
200 			irq_set_affinity(pcfg->irq, cpumask_of(cpu));
201 			qman_portal_update_sdest(pcfg, cpu);
202 		}
203 	}
204 	return 0;
205 }
206 
207 static int qman_online_cpu(unsigned int cpu)
208 {
209 	struct qman_portal *p;
210 	const struct qm_portal_config *pcfg;
211 
212 	p = affine_portals[cpu];
213 	if (p) {
214 		pcfg = qman_get_qm_portal_config(p);
215 		if (pcfg) {
216 			irq_set_affinity(pcfg->irq, cpumask_of(cpu));
217 			qman_portal_update_sdest(pcfg, cpu);
218 		}
219 	}
220 	return 0;
221 }
222 
223 static int qman_portal_probe(struct platform_device *pdev)
224 {
225 	struct device *dev = &pdev->dev;
226 	struct device_node *node = dev->of_node;
227 	struct qm_portal_config *pcfg;
228 	struct resource *addr_phys[2];
229 	int irq, cpu, err;
230 	u32 val;
231 
232 	err = qman_is_probed();
233 	if (!err)
234 		return -EPROBE_DEFER;
235 	if (err < 0) {
236 		dev_err(&pdev->dev, "failing probe due to qman probe error\n");
237 		return -ENODEV;
238 	}
239 
240 	pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
241 	if (!pcfg)
242 		return -ENOMEM;
243 
244 	pcfg->dev = dev;
245 
246 	addr_phys[0] = platform_get_resource(pdev, IORESOURCE_MEM,
247 					     DPAA_PORTAL_CE);
248 	if (!addr_phys[0]) {
249 		dev_err(dev, "Can't get %pOF property 'reg::CE'\n", node);
250 		return -ENXIO;
251 	}
252 
253 	addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM,
254 					     DPAA_PORTAL_CI);
255 	if (!addr_phys[1]) {
256 		dev_err(dev, "Can't get %pOF property 'reg::CI'\n", node);
257 		return -ENXIO;
258 	}
259 
260 	err = of_property_read_u32(node, "cell-index", &val);
261 	if (err) {
262 		dev_err(dev, "Can't get %pOF property 'cell-index'\n", node);
263 		return err;
264 	}
265 	pcfg->channel = val;
266 	pcfg->cpu = -1;
267 	irq = platform_get_irq(pdev, 0);
268 	if (irq <= 0) {
269 		dev_err(dev, "Can't get %pOF IRQ\n", node);
270 		return -ENXIO;
271 	}
272 	pcfg->irq = irq;
273 
274 	pcfg->addr_virt_ce = memremap(addr_phys[0]->start,
275 					resource_size(addr_phys[0]),
276 					QBMAN_MEMREMAP_ATTR);
277 	if (!pcfg->addr_virt_ce) {
278 		dev_err(dev, "memremap::CE failed\n");
279 		goto err_ioremap1;
280 	}
281 
282 	pcfg->addr_virt_ci = ioremap(addr_phys[1]->start,
283 				resource_size(addr_phys[1]));
284 	if (!pcfg->addr_virt_ci) {
285 		dev_err(dev, "ioremap::CI failed\n");
286 		goto err_ioremap2;
287 	}
288 
289 	pcfg->pools = qm_get_pools_sdqcr();
290 
291 	spin_lock(&qman_lock);
292 	cpu = cpumask_next_zero(-1, &portal_cpus);
293 	if (cpu >= nr_cpu_ids) {
294 		/* unassigned portal, skip init */
295 		spin_unlock(&qman_lock);
296 		return 0;
297 	}
298 
299 	cpumask_set_cpu(cpu, &portal_cpus);
300 	spin_unlock(&qman_lock);
301 	pcfg->cpu = cpu;
302 
303 	if (dma_set_mask(dev, DMA_BIT_MASK(40))) {
304 		dev_err(dev, "dma_set_mask() failed\n");
305 		goto err_portal_init;
306 	}
307 
308 	if (!init_pcfg(pcfg)) {
309 		dev_err(dev, "portal init failed\n");
310 		goto err_portal_init;
311 	}
312 
313 	/* clear irq affinity if assigned cpu is offline */
314 	if (!cpu_online(cpu))
315 		qman_offline_cpu(cpu);
316 
317 	return 0;
318 
319 err_portal_init:
320 	iounmap(pcfg->addr_virt_ci);
321 err_ioremap2:
322 	memunmap(pcfg->addr_virt_ce);
323 err_ioremap1:
324 	return -ENXIO;
325 }
326 
327 static const struct of_device_id qman_portal_ids[] = {
328 	{
329 		.compatible = "fsl,qman-portal",
330 	},
331 	{}
332 };
333 MODULE_DEVICE_TABLE(of, qman_portal_ids);
334 
335 static struct platform_driver qman_portal_driver = {
336 	.driver = {
337 		.name = KBUILD_MODNAME,
338 		.of_match_table = qman_portal_ids,
339 	},
340 	.probe = qman_portal_probe,
341 };
342 
343 static int __init qman_portal_driver_register(struct platform_driver *drv)
344 {
345 	int ret;
346 
347 	ret = platform_driver_register(drv);
348 	if (ret < 0)
349 		return ret;
350 
351 	ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
352 					"soc/qman_portal:online",
353 					qman_online_cpu, qman_offline_cpu);
354 	if (ret < 0) {
355 		pr_err("qman: failed to register hotplug callbacks.\n");
356 		platform_driver_unregister(drv);
357 		return ret;
358 	}
359 	return 0;
360 }
361 
362 module_driver(qman_portal_driver,
363 	      qman_portal_driver_register, platform_driver_unregister);
364