1 /* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
2  *
3  * Redistribution and use in source and binary forms, with or without
4  * modification, are permitted provided that the following conditions are met:
5  *     * Redistributions of source code must retain the above copyright
6  *	 notice, this list of conditions and the following disclaimer.
7  *     * Redistributions in binary form must reproduce the above copyright
8  *	 notice, this list of conditions and the following disclaimer in the
9  *	 documentation and/or other materials provided with the distribution.
10  *     * Neither the name of Freescale Semiconductor nor the
11  *	 names of its contributors may be used to endorse or promote products
12  *	 derived from this software without specific prior written permission.
13  *
14  * ALTERNATIVELY, this software may be distributed under the terms of the
15  * GNU General Public License ("GPL") as published by the Free Software
16  * Foundation, either version 2 of that License or (at your option) any
17  * later version.
18  *
19  * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
20  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22  * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
23  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include "qman_priv.h"
32 
33 struct qman_portal *qman_dma_portal;
34 EXPORT_SYMBOL(qman_dma_portal);
35 
36 /* Enable portal interupts (as opposed to polling mode) */
37 #define CONFIG_FSL_DPA_PIRQ_SLOW  1
38 #define CONFIG_FSL_DPA_PIRQ_FAST  1
39 
40 static struct cpumask portal_cpus;
41 static int __qman_portals_probed;
42 /* protect qman global registers and global data shared among portals */
43 static DEFINE_SPINLOCK(qman_lock);
44 
portal_set_cpu(struct qm_portal_config * pcfg,int cpu)45 static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu)
46 {
47 #ifdef CONFIG_FSL_PAMU
48 	struct device *dev = pcfg->dev;
49 	int ret;
50 
51 	pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type);
52 	if (!pcfg->iommu_domain) {
53 		dev_err(dev, "%s(): iommu_domain_alloc() failed", __func__);
54 		goto no_iommu;
55 	}
56 	ret = fsl_pamu_configure_l1_stash(pcfg->iommu_domain, cpu);
57 	if (ret < 0) {
58 		dev_err(dev, "%s(): fsl_pamu_configure_l1_stash() = %d",
59 			__func__, ret);
60 		goto out_domain_free;
61 	}
62 	ret = iommu_attach_device(pcfg->iommu_domain, dev);
63 	if (ret < 0) {
64 		dev_err(dev, "%s(): iommu_device_attach() = %d", __func__,
65 			ret);
66 		goto out_domain_free;
67 	}
68 
69 no_iommu:
70 #endif
71 	qman_set_sdest(pcfg->channel, cpu);
72 
73 	return;
74 
75 #ifdef CONFIG_FSL_PAMU
76 out_domain_free:
77 	iommu_domain_free(pcfg->iommu_domain);
78 	pcfg->iommu_domain = NULL;
79 #endif
80 }
81 
init_pcfg(struct qm_portal_config * pcfg)82 static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg)
83 {
84 	struct qman_portal *p;
85 	u32 irq_sources = 0;
86 
87 	/* We need the same LIODN offset for all portals */
88 	qman_liodn_fixup(pcfg->channel);
89 
90 	pcfg->iommu_domain = NULL;
91 	portal_set_cpu(pcfg, pcfg->cpu);
92 
93 	p = qman_create_affine_portal(pcfg, NULL);
94 	if (!p) {
95 		dev_crit(pcfg->dev, "%s: Portal failure on cpu %d\n",
96 			 __func__, pcfg->cpu);
97 		return NULL;
98 	}
99 
100 	/* Determine what should be interrupt-vs-poll driven */
101 #ifdef CONFIG_FSL_DPA_PIRQ_SLOW
102 	irq_sources |= QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI |
103 		       QM_PIRQ_CSCI;
104 #endif
105 #ifdef CONFIG_FSL_DPA_PIRQ_FAST
106 	irq_sources |= QM_PIRQ_DQRI;
107 #endif
108 	qman_p_irqsource_add(p, irq_sources);
109 
110 	spin_lock(&qman_lock);
111 	if (cpumask_equal(&portal_cpus, cpu_possible_mask)) {
112 		/* all assigned portals are initialized now */
113 		qman_init_cgr_all();
114 	}
115 
116 	if (!qman_dma_portal)
117 		qman_dma_portal = p;
118 
119 	spin_unlock(&qman_lock);
120 
121 	dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu);
122 
123 	return p;
124 }
125 
qman_portal_update_sdest(const struct qm_portal_config * pcfg,unsigned int cpu)126 static void qman_portal_update_sdest(const struct qm_portal_config *pcfg,
127 							unsigned int cpu)
128 {
129 #ifdef CONFIG_FSL_PAMU /* TODO */
130 	if (pcfg->iommu_domain) {
131 		if (fsl_pamu_configure_l1_stash(pcfg->iommu_domain, cpu) < 0) {
132 			dev_err(pcfg->dev,
133 				"Failed to update pamu stash setting\n");
134 			return;
135 		}
136 	}
137 #endif
138 	qman_set_sdest(pcfg->channel, cpu);
139 }
140 
qman_offline_cpu(unsigned int cpu)141 static int qman_offline_cpu(unsigned int cpu)
142 {
143 	struct qman_portal *p;
144 	const struct qm_portal_config *pcfg;
145 
146 	p = affine_portals[cpu];
147 	if (p) {
148 		pcfg = qman_get_qm_portal_config(p);
149 		if (pcfg) {
150 			/* select any other online CPU */
151 			cpu = cpumask_any_but(cpu_online_mask, cpu);
152 			irq_set_affinity(pcfg->irq, cpumask_of(cpu));
153 			qman_portal_update_sdest(pcfg, cpu);
154 		}
155 	}
156 	return 0;
157 }
158 
qman_online_cpu(unsigned int cpu)159 static int qman_online_cpu(unsigned int cpu)
160 {
161 	struct qman_portal *p;
162 	const struct qm_portal_config *pcfg;
163 
164 	p = affine_portals[cpu];
165 	if (p) {
166 		pcfg = qman_get_qm_portal_config(p);
167 		if (pcfg) {
168 			irq_set_affinity(pcfg->irq, cpumask_of(cpu));
169 			qman_portal_update_sdest(pcfg, cpu);
170 		}
171 	}
172 	return 0;
173 }
174 
qman_portals_probed(void)175 int qman_portals_probed(void)
176 {
177 	return __qman_portals_probed;
178 }
179 EXPORT_SYMBOL_GPL(qman_portals_probed);
180 
qman_portal_probe(struct platform_device * pdev)181 static int qman_portal_probe(struct platform_device *pdev)
182 {
183 	struct device *dev = &pdev->dev;
184 	struct device_node *node = dev->of_node;
185 	struct qm_portal_config *pcfg;
186 	struct resource *addr_phys[2];
187 	int irq, cpu, err, i;
188 	u32 val;
189 
190 	err = qman_is_probed();
191 	if (!err)
192 		return -EPROBE_DEFER;
193 	if (err < 0) {
194 		dev_err(&pdev->dev, "failing probe due to qman probe error\n");
195 		return -ENODEV;
196 	}
197 
198 	pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
199 	if (!pcfg) {
200 		__qman_portals_probed = -1;
201 		return -ENOMEM;
202 	}
203 
204 	pcfg->dev = dev;
205 
206 	addr_phys[0] = platform_get_resource(pdev, IORESOURCE_MEM,
207 					     DPAA_PORTAL_CE);
208 	if (!addr_phys[0]) {
209 		dev_err(dev, "Can't get %pOF property 'reg::CE'\n", node);
210 		goto err_ioremap1;
211 	}
212 
213 	addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM,
214 					     DPAA_PORTAL_CI);
215 	if (!addr_phys[1]) {
216 		dev_err(dev, "Can't get %pOF property 'reg::CI'\n", node);
217 		goto err_ioremap1;
218 	}
219 
220 	err = of_property_read_u32(node, "cell-index", &val);
221 	if (err) {
222 		dev_err(dev, "Can't get %pOF property 'cell-index'\n", node);
223 		__qman_portals_probed = -1;
224 		return err;
225 	}
226 	pcfg->channel = val;
227 	pcfg->cpu = -1;
228 	irq = platform_get_irq(pdev, 0);
229 	if (irq <= 0)
230 		goto err_ioremap1;
231 	pcfg->irq = irq;
232 
233 	pcfg->addr_virt_ce = memremap(addr_phys[0]->start,
234 					resource_size(addr_phys[0]),
235 					QBMAN_MEMREMAP_ATTR);
236 	if (!pcfg->addr_virt_ce) {
237 		dev_err(dev, "memremap::CE failed\n");
238 		goto err_ioremap1;
239 	}
240 
241 	pcfg->addr_virt_ci = ioremap(addr_phys[1]->start,
242 				resource_size(addr_phys[1]));
243 	if (!pcfg->addr_virt_ci) {
244 		dev_err(dev, "ioremap::CI failed\n");
245 		goto err_ioremap2;
246 	}
247 
248 	pcfg->pools = qm_get_pools_sdqcr();
249 
250 	spin_lock(&qman_lock);
251 	cpu = cpumask_first_zero(&portal_cpus);
252 	if (cpu >= nr_cpu_ids) {
253 		__qman_portals_probed = 1;
254 		/* unassigned portal, skip init */
255 		spin_unlock(&qman_lock);
256 		goto check_cleanup;
257 	}
258 
259 	cpumask_set_cpu(cpu, &portal_cpus);
260 	spin_unlock(&qman_lock);
261 	pcfg->cpu = cpu;
262 
263 	if (dma_set_mask(dev, DMA_BIT_MASK(40))) {
264 		dev_err(dev, "dma_set_mask() failed\n");
265 		goto err_portal_init;
266 	}
267 
268 	if (!init_pcfg(pcfg)) {
269 		dev_err(dev, "portal init failed\n");
270 		goto err_portal_init;
271 	}
272 
273 	/* clear irq affinity if assigned cpu is offline */
274 	if (!cpu_online(cpu))
275 		qman_offline_cpu(cpu);
276 
277 check_cleanup:
278 	if (__qman_portals_probed == 1 && qman_requires_cleanup()) {
279 		/*
280 		 * QMan wasn't reset prior to boot (Kexec for example)
281 		 * Empty all the frame queues so they are in reset state
282 		 */
283 		for (i = 0; i < qm_get_fqid_maxcnt(); i++) {
284 			err =  qman_shutdown_fq(i);
285 			if (err) {
286 				dev_err(dev, "Failed to shutdown frame queue %d\n",
287 					i);
288 				goto err_portal_init;
289 			}
290 		}
291 		qman_done_cleanup();
292 	}
293 
294 	return 0;
295 
296 err_portal_init:
297 	iounmap(pcfg->addr_virt_ci);
298 err_ioremap2:
299 	memunmap(pcfg->addr_virt_ce);
300 err_ioremap1:
301 	__qman_portals_probed = -1;
302 
303 	return -ENXIO;
304 }
305 
306 static const struct of_device_id qman_portal_ids[] = {
307 	{
308 		.compatible = "fsl,qman-portal",
309 	},
310 	{}
311 };
312 MODULE_DEVICE_TABLE(of, qman_portal_ids);
313 
314 static struct platform_driver qman_portal_driver = {
315 	.driver = {
316 		.name = KBUILD_MODNAME,
317 		.of_match_table = qman_portal_ids,
318 	},
319 	.probe = qman_portal_probe,
320 };
321 
qman_portal_driver_register(struct platform_driver * drv)322 static int __init qman_portal_driver_register(struct platform_driver *drv)
323 {
324 	int ret;
325 
326 	ret = platform_driver_register(drv);
327 	if (ret < 0)
328 		return ret;
329 
330 	ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
331 					"soc/qman_portal:online",
332 					qman_online_cpu, qman_offline_cpu);
333 	if (ret < 0) {
334 		pr_err("qman: failed to register hotplug callbacks.\n");
335 		platform_driver_unregister(drv);
336 		return ret;
337 	}
338 	return 0;
339 }
340 
341 module_driver(qman_portal_driver,
342 	      qman_portal_driver_register, platform_driver_unregister);
343