1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright 2019 NXP
3 
4 #include <linux/init.h>
5 #include <linux/module.h>
6 #include <linux/dmapool.h>
7 #include <linux/of_irq.h>
8 #include <linux/iommu.h>
9 #include <linux/sys_soc.h>
10 #include <linux/fsl/mc.h>
11 #include <soc/fsl/dpaa2-io.h>
12 
13 #include "../virt-dma.h"
14 #include "dpdmai.h"
15 #include "dpaa2-qdma.h"
16 
17 static bool smmu_disable = true;
18 
to_dpaa2_qdma_chan(struct dma_chan * chan)19 static struct dpaa2_qdma_chan *to_dpaa2_qdma_chan(struct dma_chan *chan)
20 {
21 	return container_of(chan, struct dpaa2_qdma_chan, vchan.chan);
22 }
23 
to_fsl_qdma_comp(struct virt_dma_desc * vd)24 static struct dpaa2_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
25 {
26 	return container_of(vd, struct dpaa2_qdma_comp, vdesc);
27 }
28 
dpaa2_qdma_alloc_chan_resources(struct dma_chan * chan)29 static int dpaa2_qdma_alloc_chan_resources(struct dma_chan *chan)
30 {
31 	struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
32 	struct dpaa2_qdma_engine *dpaa2_qdma = dpaa2_chan->qdma;
33 	struct device *dev = &dpaa2_qdma->priv->dpdmai_dev->dev;
34 
35 	dpaa2_chan->fd_pool = dma_pool_create("fd_pool", dev,
36 					      sizeof(struct dpaa2_fd),
37 					      sizeof(struct dpaa2_fd), 0);
38 	if (!dpaa2_chan->fd_pool)
39 		goto err;
40 
41 	dpaa2_chan->fl_pool =
42 		dma_pool_create("fl_pool", dev,
43 				 sizeof(struct dpaa2_fl_entry) * 3,
44 				 sizeof(struct dpaa2_fl_entry), 0);
45 
46 	if (!dpaa2_chan->fl_pool)
47 		goto err_fd;
48 
49 	dpaa2_chan->sdd_pool =
50 		dma_pool_create("sdd_pool", dev,
51 				sizeof(struct dpaa2_qdma_sd_d) * 2,
52 				sizeof(struct dpaa2_qdma_sd_d), 0);
53 	if (!dpaa2_chan->sdd_pool)
54 		goto err_fl;
55 
56 	return dpaa2_qdma->desc_allocated++;
57 err_fl:
58 	dma_pool_destroy(dpaa2_chan->fl_pool);
59 err_fd:
60 	dma_pool_destroy(dpaa2_chan->fd_pool);
61 err:
62 	return -ENOMEM;
63 }
64 
dpaa2_qdma_free_chan_resources(struct dma_chan * chan)65 static void dpaa2_qdma_free_chan_resources(struct dma_chan *chan)
66 {
67 	struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
68 	struct dpaa2_qdma_engine *dpaa2_qdma = dpaa2_chan->qdma;
69 	unsigned long flags;
70 
71 	LIST_HEAD(head);
72 
73 	spin_lock_irqsave(&dpaa2_chan->vchan.lock, flags);
74 	vchan_get_all_descriptors(&dpaa2_chan->vchan, &head);
75 	spin_unlock_irqrestore(&dpaa2_chan->vchan.lock, flags);
76 
77 	vchan_dma_desc_free_list(&dpaa2_chan->vchan, &head);
78 
79 	dpaa2_dpdmai_free_comp(dpaa2_chan, &dpaa2_chan->comp_used);
80 	dpaa2_dpdmai_free_comp(dpaa2_chan, &dpaa2_chan->comp_free);
81 
82 	dma_pool_destroy(dpaa2_chan->fd_pool);
83 	dma_pool_destroy(dpaa2_chan->fl_pool);
84 	dma_pool_destroy(dpaa2_chan->sdd_pool);
85 	dpaa2_qdma->desc_allocated--;
86 }
87 
88 /*
89  * Request a command descriptor for enqueue.
90  */
91 static struct dpaa2_qdma_comp *
dpaa2_qdma_request_desc(struct dpaa2_qdma_chan * dpaa2_chan)92 dpaa2_qdma_request_desc(struct dpaa2_qdma_chan *dpaa2_chan)
93 {
94 	struct dpaa2_qdma_priv *qdma_priv = dpaa2_chan->qdma->priv;
95 	struct device *dev = &qdma_priv->dpdmai_dev->dev;
96 	struct dpaa2_qdma_comp *comp_temp = NULL;
97 	unsigned long flags;
98 
99 	spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
100 	if (list_empty(&dpaa2_chan->comp_free)) {
101 		spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
102 		comp_temp = kzalloc(sizeof(*comp_temp), GFP_NOWAIT);
103 		if (!comp_temp)
104 			goto err;
105 		comp_temp->fd_virt_addr =
106 			dma_pool_alloc(dpaa2_chan->fd_pool, GFP_NOWAIT,
107 				       &comp_temp->fd_bus_addr);
108 		if (!comp_temp->fd_virt_addr)
109 			goto err_comp;
110 
111 		comp_temp->fl_virt_addr =
112 			dma_pool_alloc(dpaa2_chan->fl_pool, GFP_NOWAIT,
113 				       &comp_temp->fl_bus_addr);
114 		if (!comp_temp->fl_virt_addr)
115 			goto err_fd_virt;
116 
117 		comp_temp->desc_virt_addr =
118 			dma_pool_alloc(dpaa2_chan->sdd_pool, GFP_NOWAIT,
119 				       &comp_temp->desc_bus_addr);
120 		if (!comp_temp->desc_virt_addr)
121 			goto err_fl_virt;
122 
123 		comp_temp->qchan = dpaa2_chan;
124 		return comp_temp;
125 	}
126 
127 	comp_temp = list_first_entry(&dpaa2_chan->comp_free,
128 				     struct dpaa2_qdma_comp, list);
129 	list_del(&comp_temp->list);
130 	spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
131 
132 	comp_temp->qchan = dpaa2_chan;
133 
134 	return comp_temp;
135 
136 err_fl_virt:
137 		dma_pool_free(dpaa2_chan->fl_pool,
138 			      comp_temp->fl_virt_addr,
139 			      comp_temp->fl_bus_addr);
140 err_fd_virt:
141 		dma_pool_free(dpaa2_chan->fd_pool,
142 			      comp_temp->fd_virt_addr,
143 			      comp_temp->fd_bus_addr);
144 err_comp:
145 	kfree(comp_temp);
146 err:
147 	dev_err(dev, "Failed to request descriptor\n");
148 	return NULL;
149 }
150 
151 static void
dpaa2_qdma_populate_fd(u32 format,struct dpaa2_qdma_comp * dpaa2_comp)152 dpaa2_qdma_populate_fd(u32 format, struct dpaa2_qdma_comp *dpaa2_comp)
153 {
154 	struct dpaa2_fd *fd;
155 
156 	fd = dpaa2_comp->fd_virt_addr;
157 	memset(fd, 0, sizeof(struct dpaa2_fd));
158 
159 	/* fd populated */
160 	dpaa2_fd_set_addr(fd, dpaa2_comp->fl_bus_addr);
161 
162 	/*
163 	 * Bypass memory translation, Frame list format, short length disable
164 	 * we need to disable BMT if fsl-mc use iova addr
165 	 */
166 	if (smmu_disable)
167 		dpaa2_fd_set_bpid(fd, QMAN_FD_BMT_ENABLE);
168 	dpaa2_fd_set_format(fd, QMAN_FD_FMT_ENABLE | QMAN_FD_SL_DISABLE);
169 
170 	dpaa2_fd_set_frc(fd, format | QDMA_SER_CTX);
171 }
172 
173 /* first frame list for descriptor buffer */
174 static void
dpaa2_qdma_populate_first_framel(struct dpaa2_fl_entry * f_list,struct dpaa2_qdma_comp * dpaa2_comp,bool wrt_changed)175 dpaa2_qdma_populate_first_framel(struct dpaa2_fl_entry *f_list,
176 				 struct dpaa2_qdma_comp *dpaa2_comp,
177 				 bool wrt_changed)
178 {
179 	struct dpaa2_qdma_sd_d *sdd;
180 
181 	sdd = dpaa2_comp->desc_virt_addr;
182 	memset(sdd, 0, 2 * (sizeof(*sdd)));
183 
184 	/* source descriptor CMD */
185 	sdd->cmd = cpu_to_le32(QDMA_SD_CMD_RDTTYPE_COHERENT);
186 	sdd++;
187 
188 	/* dest descriptor CMD */
189 	if (wrt_changed)
190 		sdd->cmd = cpu_to_le32(LX2160_QDMA_DD_CMD_WRTTYPE_COHERENT);
191 	else
192 		sdd->cmd = cpu_to_le32(QDMA_DD_CMD_WRTTYPE_COHERENT);
193 
194 	memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
195 
196 	/* first frame list to source descriptor */
197 	dpaa2_fl_set_addr(f_list, dpaa2_comp->desc_bus_addr);
198 	dpaa2_fl_set_len(f_list, 0x20);
199 	dpaa2_fl_set_format(f_list, QDMA_FL_FMT_SBF | QDMA_FL_SL_LONG);
200 
201 	/* bypass memory translation */
202 	if (smmu_disable)
203 		f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE);
204 }
205 
206 /* source and destination frame list */
207 static void
dpaa2_qdma_populate_frames(struct dpaa2_fl_entry * f_list,dma_addr_t dst,dma_addr_t src,size_t len,uint8_t fmt)208 dpaa2_qdma_populate_frames(struct dpaa2_fl_entry *f_list,
209 			   dma_addr_t dst, dma_addr_t src,
210 			   size_t len, uint8_t fmt)
211 {
212 	/* source frame list to source buffer */
213 	memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
214 
215 	dpaa2_fl_set_addr(f_list, src);
216 	dpaa2_fl_set_len(f_list, len);
217 
218 	/* single buffer frame or scatter gather frame */
219 	dpaa2_fl_set_format(f_list, (fmt | QDMA_FL_SL_LONG));
220 
221 	/* bypass memory translation */
222 	if (smmu_disable)
223 		f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE);
224 
225 	f_list++;
226 
227 	/* destination frame list to destination buffer */
228 	memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
229 
230 	dpaa2_fl_set_addr(f_list, dst);
231 	dpaa2_fl_set_len(f_list, len);
232 	dpaa2_fl_set_format(f_list, (fmt | QDMA_FL_SL_LONG));
233 	/* single buffer frame or scatter gather frame */
234 	dpaa2_fl_set_final(f_list, QDMA_FL_F);
235 	/* bypass memory translation */
236 	if (smmu_disable)
237 		f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE);
238 }
239 
240 static struct dma_async_tx_descriptor
dpaa2_qdma_prep_memcpy(struct dma_chan * chan,dma_addr_t dst,dma_addr_t src,size_t len,ulong flags)241 *dpaa2_qdma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst,
242 			dma_addr_t src, size_t len, ulong flags)
243 {
244 	struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
245 	struct dpaa2_qdma_engine *dpaa2_qdma;
246 	struct dpaa2_qdma_comp *dpaa2_comp;
247 	struct dpaa2_fl_entry *f_list;
248 	bool wrt_changed;
249 
250 	dpaa2_qdma = dpaa2_chan->qdma;
251 	dpaa2_comp = dpaa2_qdma_request_desc(dpaa2_chan);
252 	if (!dpaa2_comp)
253 		return NULL;
254 
255 	wrt_changed = (bool)dpaa2_qdma->qdma_wrtype_fixup;
256 
257 	/* populate Frame descriptor */
258 	dpaa2_qdma_populate_fd(QDMA_FD_LONG_FORMAT, dpaa2_comp);
259 
260 	f_list = dpaa2_comp->fl_virt_addr;
261 
262 	/* first frame list for descriptor buffer (logn format) */
263 	dpaa2_qdma_populate_first_framel(f_list, dpaa2_comp, wrt_changed);
264 
265 	f_list++;
266 
267 	dpaa2_qdma_populate_frames(f_list, dst, src, len, QDMA_FL_FMT_SBF);
268 
269 	return vchan_tx_prep(&dpaa2_chan->vchan, &dpaa2_comp->vdesc, flags);
270 }
271 
dpaa2_qdma_issue_pending(struct dma_chan * chan)272 static void dpaa2_qdma_issue_pending(struct dma_chan *chan)
273 {
274 	struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
275 	struct dpaa2_qdma_comp *dpaa2_comp;
276 	struct virt_dma_desc *vdesc;
277 	struct dpaa2_fd *fd;
278 	unsigned long flags;
279 	int err;
280 
281 	spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
282 	spin_lock(&dpaa2_chan->vchan.lock);
283 	if (vchan_issue_pending(&dpaa2_chan->vchan)) {
284 		vdesc = vchan_next_desc(&dpaa2_chan->vchan);
285 		if (!vdesc)
286 			goto err_enqueue;
287 		dpaa2_comp = to_fsl_qdma_comp(vdesc);
288 
289 		fd = dpaa2_comp->fd_virt_addr;
290 
291 		list_del(&vdesc->node);
292 		list_add_tail(&dpaa2_comp->list, &dpaa2_chan->comp_used);
293 
294 		err = dpaa2_io_service_enqueue_fq(NULL, dpaa2_chan->fqid, fd);
295 		if (err) {
296 			list_move_tail(&dpaa2_comp->list,
297 				       &dpaa2_chan->comp_free);
298 		}
299 	}
300 err_enqueue:
301 	spin_unlock(&dpaa2_chan->vchan.lock);
302 	spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
303 }
304 
dpaa2_qdma_setup(struct fsl_mc_device * ls_dev)305 static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
306 {
307 	struct dpaa2_qdma_priv_per_prio *ppriv;
308 	struct device *dev = &ls_dev->dev;
309 	struct dpaa2_qdma_priv *priv;
310 	u8 prio_def = DPDMAI_PRIO_NUM;
311 	int err = -EINVAL;
312 	int i;
313 
314 	priv = dev_get_drvdata(dev);
315 
316 	priv->dev = dev;
317 	priv->dpqdma_id = ls_dev->obj_desc.id;
318 
319 	/* Get the handle for the DPDMAI this interface is associate with */
320 	err = dpdmai_open(priv->mc_io, 0, priv->dpqdma_id, &ls_dev->mc_handle);
321 	if (err) {
322 		dev_err(dev, "dpdmai_open() failed\n");
323 		return err;
324 	}
325 
326 	dev_dbg(dev, "Opened dpdmai object successfully\n");
327 
328 	err = dpdmai_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
329 				    &priv->dpdmai_attr);
330 	if (err) {
331 		dev_err(dev, "dpdmai_get_attributes() failed\n");
332 		goto exit;
333 	}
334 
335 	if (priv->dpdmai_attr.version.major > DPDMAI_VER_MAJOR) {
336 		err = -EINVAL;
337 		dev_err(dev, "DPDMAI major version mismatch\n"
338 			     "Found %u.%u, supported version is %u.%u\n",
339 				priv->dpdmai_attr.version.major,
340 				priv->dpdmai_attr.version.minor,
341 				DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR);
342 		goto exit;
343 	}
344 
345 	if (priv->dpdmai_attr.version.minor > DPDMAI_VER_MINOR) {
346 		err = -EINVAL;
347 		dev_err(dev, "DPDMAI minor version mismatch\n"
348 			     "Found %u.%u, supported version is %u.%u\n",
349 				priv->dpdmai_attr.version.major,
350 				priv->dpdmai_attr.version.minor,
351 				DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR);
352 		goto exit;
353 	}
354 
355 	priv->num_pairs = min(priv->dpdmai_attr.num_of_priorities, prio_def);
356 	ppriv = kcalloc(priv->num_pairs, sizeof(*ppriv), GFP_KERNEL);
357 	if (!ppriv) {
358 		err = -ENOMEM;
359 		goto exit;
360 	}
361 	priv->ppriv = ppriv;
362 
363 	for (i = 0; i < priv->num_pairs; i++) {
364 		err = dpdmai_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
365 					  i, &priv->rx_queue_attr[i]);
366 		if (err) {
367 			dev_err(dev, "dpdmai_get_rx_queue() failed\n");
368 			goto exit;
369 		}
370 		ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
371 
372 		err = dpdmai_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle,
373 					  i, &priv->tx_fqid[i]);
374 		if (err) {
375 			dev_err(dev, "dpdmai_get_tx_queue() failed\n");
376 			goto exit;
377 		}
378 		ppriv->req_fqid = priv->tx_fqid[i];
379 		ppriv->prio = i;
380 		ppriv->priv = priv;
381 		ppriv++;
382 	}
383 
384 	return 0;
385 exit:
386 	dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
387 	return err;
388 }
389 
dpaa2_qdma_fqdan_cb(struct dpaa2_io_notification_ctx * ctx)390 static void dpaa2_qdma_fqdan_cb(struct dpaa2_io_notification_ctx *ctx)
391 {
392 	struct dpaa2_qdma_priv_per_prio *ppriv = container_of(ctx,
393 			struct dpaa2_qdma_priv_per_prio, nctx);
394 	struct dpaa2_qdma_comp *dpaa2_comp, *_comp_tmp;
395 	struct dpaa2_qdma_priv *priv = ppriv->priv;
396 	u32 n_chans = priv->dpaa2_qdma->n_chans;
397 	struct dpaa2_qdma_chan *qchan;
398 	const struct dpaa2_fd *fd_eq;
399 	const struct dpaa2_fd *fd;
400 	struct dpaa2_dq *dq;
401 	int is_last = 0;
402 	int found;
403 	u8 status;
404 	int err;
405 	int i;
406 
407 	do {
408 		err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
409 					       ppriv->store);
410 	} while (err);
411 
412 	while (!is_last) {
413 		do {
414 			dq = dpaa2_io_store_next(ppriv->store, &is_last);
415 		} while (!is_last && !dq);
416 		if (!dq) {
417 			dev_err(priv->dev, "FQID returned no valid frames!\n");
418 			continue;
419 		}
420 
421 		/* obtain FD and process the error */
422 		fd = dpaa2_dq_fd(dq);
423 
424 		status = dpaa2_fd_get_ctrl(fd) & 0xff;
425 		if (status)
426 			dev_err(priv->dev, "FD error occurred\n");
427 		found = 0;
428 		for (i = 0; i < n_chans; i++) {
429 			qchan = &priv->dpaa2_qdma->chans[i];
430 			spin_lock(&qchan->queue_lock);
431 			if (list_empty(&qchan->comp_used)) {
432 				spin_unlock(&qchan->queue_lock);
433 				continue;
434 			}
435 			list_for_each_entry_safe(dpaa2_comp, _comp_tmp,
436 						 &qchan->comp_used, list) {
437 				fd_eq = dpaa2_comp->fd_virt_addr;
438 
439 				if (le64_to_cpu(fd_eq->simple.addr) ==
440 				    le64_to_cpu(fd->simple.addr)) {
441 					spin_lock(&qchan->vchan.lock);
442 					vchan_cookie_complete(&
443 							dpaa2_comp->vdesc);
444 					spin_unlock(&qchan->vchan.lock);
445 					found = 1;
446 					break;
447 				}
448 			}
449 			spin_unlock(&qchan->queue_lock);
450 			if (found)
451 				break;
452 		}
453 	}
454 
455 	dpaa2_io_service_rearm(NULL, ctx);
456 }
457 
dpaa2_qdma_dpio_setup(struct dpaa2_qdma_priv * priv)458 static int __cold dpaa2_qdma_dpio_setup(struct dpaa2_qdma_priv *priv)
459 {
460 	struct dpaa2_qdma_priv_per_prio *ppriv;
461 	struct device *dev = priv->dev;
462 	int err = -EINVAL;
463 	int i, num;
464 
465 	num = priv->num_pairs;
466 	ppriv = priv->ppriv;
467 	for (i = 0; i < num; i++) {
468 		ppriv->nctx.is_cdan = 0;
469 		ppriv->nctx.desired_cpu = DPAA2_IO_ANY_CPU;
470 		ppriv->nctx.id = ppriv->rsp_fqid;
471 		ppriv->nctx.cb = dpaa2_qdma_fqdan_cb;
472 		err = dpaa2_io_service_register(NULL, &ppriv->nctx, dev);
473 		if (err) {
474 			dev_err(dev, "Notification register failed\n");
475 			goto err_service;
476 		}
477 
478 		ppriv->store =
479 			dpaa2_io_store_create(DPAA2_QDMA_STORE_SIZE, dev);
480 		if (!ppriv->store) {
481 			err = -ENOMEM;
482 			dev_err(dev, "dpaa2_io_store_create() failed\n");
483 			goto err_store;
484 		}
485 
486 		ppriv++;
487 	}
488 	return 0;
489 
490 err_store:
491 	dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
492 err_service:
493 	ppriv--;
494 	while (ppriv >= priv->ppriv) {
495 		dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
496 		dpaa2_io_store_destroy(ppriv->store);
497 		ppriv--;
498 	}
499 	return err;
500 }
501 
dpaa2_dpmai_store_free(struct dpaa2_qdma_priv * priv)502 static void dpaa2_dpmai_store_free(struct dpaa2_qdma_priv *priv)
503 {
504 	struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
505 	int i;
506 
507 	for (i = 0; i < priv->num_pairs; i++) {
508 		dpaa2_io_store_destroy(ppriv->store);
509 		ppriv++;
510 	}
511 }
512 
dpaa2_dpdmai_dpio_free(struct dpaa2_qdma_priv * priv)513 static void dpaa2_dpdmai_dpio_free(struct dpaa2_qdma_priv *priv)
514 {
515 	struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
516 	struct device *dev = priv->dev;
517 	int i;
518 
519 	for (i = 0; i < priv->num_pairs; i++) {
520 		dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
521 		ppriv++;
522 	}
523 }
524 
dpaa2_dpdmai_bind(struct dpaa2_qdma_priv * priv)525 static int __cold dpaa2_dpdmai_bind(struct dpaa2_qdma_priv *priv)
526 {
527 	struct dpdmai_rx_queue_cfg rx_queue_cfg;
528 	struct dpaa2_qdma_priv_per_prio *ppriv;
529 	struct device *dev = priv->dev;
530 	struct fsl_mc_device *ls_dev;
531 	int i, num;
532 	int err;
533 
534 	ls_dev = to_fsl_mc_device(dev);
535 	num = priv->num_pairs;
536 	ppriv = priv->ppriv;
537 	for (i = 0; i < num; i++) {
538 		rx_queue_cfg.options = DPDMAI_QUEUE_OPT_USER_CTX |
539 					DPDMAI_QUEUE_OPT_DEST;
540 		rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
541 		rx_queue_cfg.dest_cfg.dest_type = DPDMAI_DEST_DPIO;
542 		rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
543 		rx_queue_cfg.dest_cfg.priority = ppriv->prio;
544 		err = dpdmai_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
545 					  rx_queue_cfg.dest_cfg.priority,
546 					  &rx_queue_cfg);
547 		if (err) {
548 			dev_err(dev, "dpdmai_set_rx_queue() failed\n");
549 			return err;
550 		}
551 
552 		ppriv++;
553 	}
554 
555 	return 0;
556 }
557 
dpaa2_dpdmai_dpio_unbind(struct dpaa2_qdma_priv * priv)558 static int __cold dpaa2_dpdmai_dpio_unbind(struct dpaa2_qdma_priv *priv)
559 {
560 	struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
561 	struct device *dev = priv->dev;
562 	struct fsl_mc_device *ls_dev;
563 	int err = 0;
564 	int i;
565 
566 	ls_dev = to_fsl_mc_device(dev);
567 
568 	for (i = 0; i < priv->num_pairs; i++) {
569 		ppriv->nctx.qman64 = 0;
570 		ppriv->nctx.dpio_id = 0;
571 		ppriv++;
572 	}
573 
574 	err = dpdmai_reset(priv->mc_io, 0, ls_dev->mc_handle);
575 	if (err)
576 		dev_err(dev, "dpdmai_reset() failed\n");
577 
578 	return err;
579 }
580 
dpaa2_dpdmai_free_comp(struct dpaa2_qdma_chan * qchan,struct list_head * head)581 static void dpaa2_dpdmai_free_comp(struct dpaa2_qdma_chan *qchan,
582 				   struct list_head *head)
583 {
584 	struct dpaa2_qdma_comp *comp_tmp, *_comp_tmp;
585 	unsigned long flags;
586 
587 	list_for_each_entry_safe(comp_tmp, _comp_tmp,
588 				 head, list) {
589 		spin_lock_irqsave(&qchan->queue_lock, flags);
590 		list_del(&comp_tmp->list);
591 		spin_unlock_irqrestore(&qchan->queue_lock, flags);
592 		dma_pool_free(qchan->fd_pool,
593 			      comp_tmp->fd_virt_addr,
594 			      comp_tmp->fd_bus_addr);
595 		dma_pool_free(qchan->fl_pool,
596 			      comp_tmp->fl_virt_addr,
597 			      comp_tmp->fl_bus_addr);
598 		dma_pool_free(qchan->sdd_pool,
599 			      comp_tmp->desc_virt_addr,
600 			      comp_tmp->desc_bus_addr);
601 		kfree(comp_tmp);
602 	}
603 }
604 
dpaa2_dpdmai_free_channels(struct dpaa2_qdma_engine * dpaa2_qdma)605 static void dpaa2_dpdmai_free_channels(struct dpaa2_qdma_engine *dpaa2_qdma)
606 {
607 	struct dpaa2_qdma_chan *qchan;
608 	int num, i;
609 
610 	num = dpaa2_qdma->n_chans;
611 	for (i = 0; i < num; i++) {
612 		qchan = &dpaa2_qdma->chans[i];
613 		dpaa2_dpdmai_free_comp(qchan, &qchan->comp_used);
614 		dpaa2_dpdmai_free_comp(qchan, &qchan->comp_free);
615 		dma_pool_destroy(qchan->fd_pool);
616 		dma_pool_destroy(qchan->fl_pool);
617 		dma_pool_destroy(qchan->sdd_pool);
618 	}
619 }
620 
dpaa2_qdma_free_desc(struct virt_dma_desc * vdesc)621 static void dpaa2_qdma_free_desc(struct virt_dma_desc *vdesc)
622 {
623 	struct dpaa2_qdma_comp *dpaa2_comp;
624 	struct dpaa2_qdma_chan *qchan;
625 	unsigned long flags;
626 
627 	dpaa2_comp = to_fsl_qdma_comp(vdesc);
628 	qchan = dpaa2_comp->qchan;
629 	spin_lock_irqsave(&qchan->queue_lock, flags);
630 	list_move_tail(&dpaa2_comp->list, &qchan->comp_free);
631 	spin_unlock_irqrestore(&qchan->queue_lock, flags);
632 }
633 
dpaa2_dpdmai_init_channels(struct dpaa2_qdma_engine * dpaa2_qdma)634 static int dpaa2_dpdmai_init_channels(struct dpaa2_qdma_engine *dpaa2_qdma)
635 {
636 	struct dpaa2_qdma_priv *priv = dpaa2_qdma->priv;
637 	struct dpaa2_qdma_chan *dpaa2_chan;
638 	int num = priv->num_pairs;
639 	int i;
640 
641 	INIT_LIST_HEAD(&dpaa2_qdma->dma_dev.channels);
642 	for (i = 0; i < dpaa2_qdma->n_chans; i++) {
643 		dpaa2_chan = &dpaa2_qdma->chans[i];
644 		dpaa2_chan->qdma = dpaa2_qdma;
645 		dpaa2_chan->fqid = priv->tx_fqid[i % num];
646 		dpaa2_chan->vchan.desc_free = dpaa2_qdma_free_desc;
647 		vchan_init(&dpaa2_chan->vchan, &dpaa2_qdma->dma_dev);
648 		spin_lock_init(&dpaa2_chan->queue_lock);
649 		INIT_LIST_HEAD(&dpaa2_chan->comp_used);
650 		INIT_LIST_HEAD(&dpaa2_chan->comp_free);
651 	}
652 	return 0;
653 }
654 
dpaa2_qdma_probe(struct fsl_mc_device * dpdmai_dev)655 static int dpaa2_qdma_probe(struct fsl_mc_device *dpdmai_dev)
656 {
657 	struct device *dev = &dpdmai_dev->dev;
658 	struct dpaa2_qdma_engine *dpaa2_qdma;
659 	struct dpaa2_qdma_priv *priv;
660 	int err;
661 
662 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
663 	if (!priv)
664 		return -ENOMEM;
665 	dev_set_drvdata(dev, priv);
666 	priv->dpdmai_dev = dpdmai_dev;
667 
668 	priv->iommu_domain = iommu_get_domain_for_dev(dev);
669 	if (priv->iommu_domain)
670 		smmu_disable = false;
671 
672 	/* obtain a MC portal */
673 	err = fsl_mc_portal_allocate(dpdmai_dev, 0, &priv->mc_io);
674 	if (err) {
675 		if (err == -ENXIO)
676 			err = -EPROBE_DEFER;
677 		else
678 			dev_err(dev, "MC portal allocation failed\n");
679 		goto err_mcportal;
680 	}
681 
682 	/* DPDMAI initialization */
683 	err = dpaa2_qdma_setup(dpdmai_dev);
684 	if (err) {
685 		dev_err(dev, "dpaa2_dpdmai_setup() failed\n");
686 		goto err_dpdmai_setup;
687 	}
688 
689 	/* DPIO */
690 	err = dpaa2_qdma_dpio_setup(priv);
691 	if (err) {
692 		dev_err(dev, "dpaa2_dpdmai_dpio_setup() failed\n");
693 		goto err_dpio_setup;
694 	}
695 
696 	/* DPDMAI binding to DPIO */
697 	err = dpaa2_dpdmai_bind(priv);
698 	if (err) {
699 		dev_err(dev, "dpaa2_dpdmai_bind() failed\n");
700 		goto err_bind;
701 	}
702 
703 	/* DPDMAI enable */
704 	err = dpdmai_enable(priv->mc_io, 0, dpdmai_dev->mc_handle);
705 	if (err) {
706 		dev_err(dev, "dpdmai_enable() failed\n");
707 		goto err_enable;
708 	}
709 
710 	dpaa2_qdma = kzalloc(sizeof(*dpaa2_qdma), GFP_KERNEL);
711 	if (!dpaa2_qdma) {
712 		err = -ENOMEM;
713 		goto err_eng;
714 	}
715 
716 	priv->dpaa2_qdma = dpaa2_qdma;
717 	dpaa2_qdma->priv = priv;
718 
719 	dpaa2_qdma->desc_allocated = 0;
720 	dpaa2_qdma->n_chans = NUM_CH;
721 
722 	dpaa2_dpdmai_init_channels(dpaa2_qdma);
723 
724 	if (soc_device_match(soc_fixup_tuning))
725 		dpaa2_qdma->qdma_wrtype_fixup = true;
726 	else
727 		dpaa2_qdma->qdma_wrtype_fixup = false;
728 
729 	dma_cap_set(DMA_PRIVATE, dpaa2_qdma->dma_dev.cap_mask);
730 	dma_cap_set(DMA_SLAVE, dpaa2_qdma->dma_dev.cap_mask);
731 	dma_cap_set(DMA_MEMCPY, dpaa2_qdma->dma_dev.cap_mask);
732 
733 	dpaa2_qdma->dma_dev.dev = dev;
734 	dpaa2_qdma->dma_dev.device_alloc_chan_resources =
735 		dpaa2_qdma_alloc_chan_resources;
736 	dpaa2_qdma->dma_dev.device_free_chan_resources =
737 		dpaa2_qdma_free_chan_resources;
738 	dpaa2_qdma->dma_dev.device_tx_status = dma_cookie_status;
739 	dpaa2_qdma->dma_dev.device_prep_dma_memcpy = dpaa2_qdma_prep_memcpy;
740 	dpaa2_qdma->dma_dev.device_issue_pending = dpaa2_qdma_issue_pending;
741 
742 	err = dma_async_device_register(&dpaa2_qdma->dma_dev);
743 	if (err) {
744 		dev_err(dev, "Can't register NXP QDMA engine.\n");
745 		goto err_dpaa2_qdma;
746 	}
747 
748 	return 0;
749 
750 err_dpaa2_qdma:
751 	kfree(dpaa2_qdma);
752 err_eng:
753 	dpdmai_disable(priv->mc_io, 0, dpdmai_dev->mc_handle);
754 err_enable:
755 	dpaa2_dpdmai_dpio_unbind(priv);
756 err_bind:
757 	dpaa2_dpmai_store_free(priv);
758 	dpaa2_dpdmai_dpio_free(priv);
759 err_dpio_setup:
760 	kfree(priv->ppriv);
761 	dpdmai_close(priv->mc_io, 0, dpdmai_dev->mc_handle);
762 err_dpdmai_setup:
763 	fsl_mc_portal_free(priv->mc_io);
764 err_mcportal:
765 	kfree(priv);
766 	dev_set_drvdata(dev, NULL);
767 	return err;
768 }
769 
dpaa2_qdma_remove(struct fsl_mc_device * ls_dev)770 static void dpaa2_qdma_remove(struct fsl_mc_device *ls_dev)
771 {
772 	struct dpaa2_qdma_engine *dpaa2_qdma;
773 	struct dpaa2_qdma_priv *priv;
774 	struct device *dev;
775 
776 	dev = &ls_dev->dev;
777 	priv = dev_get_drvdata(dev);
778 	dpaa2_qdma = priv->dpaa2_qdma;
779 
780 	dpdmai_disable(priv->mc_io, 0, ls_dev->mc_handle);
781 	dpaa2_dpdmai_dpio_unbind(priv);
782 	dpaa2_dpmai_store_free(priv);
783 	dpaa2_dpdmai_dpio_free(priv);
784 	dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
785 	fsl_mc_portal_free(priv->mc_io);
786 	dev_set_drvdata(dev, NULL);
787 	dpaa2_dpdmai_free_channels(dpaa2_qdma);
788 
789 	dma_async_device_unregister(&dpaa2_qdma->dma_dev);
790 	kfree(priv);
791 	kfree(dpaa2_qdma);
792 }
793 
dpaa2_qdma_shutdown(struct fsl_mc_device * ls_dev)794 static void dpaa2_qdma_shutdown(struct fsl_mc_device *ls_dev)
795 {
796 	struct dpaa2_qdma_priv *priv;
797 	struct device *dev;
798 
799 	dev = &ls_dev->dev;
800 	priv = dev_get_drvdata(dev);
801 
802 	dpdmai_disable(priv->mc_io, 0, ls_dev->mc_handle);
803 	dpaa2_dpdmai_dpio_unbind(priv);
804 	dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
805 	dpdmai_destroy(priv->mc_io, 0, ls_dev->mc_handle);
806 }
807 
808 static const struct fsl_mc_device_id dpaa2_qdma_id_table[] = {
809 	{
810 		.vendor = FSL_MC_VENDOR_FREESCALE,
811 		.obj_type = "dpdmai",
812 	},
813 	{ .vendor = 0x0 }
814 };
815 
816 static struct fsl_mc_driver dpaa2_qdma_driver = {
817 	.driver		= {
818 		.name	= "dpaa2-qdma",
819 		.owner  = THIS_MODULE,
820 	},
821 	.probe          = dpaa2_qdma_probe,
822 	.remove		= dpaa2_qdma_remove,
823 	.shutdown	= dpaa2_qdma_shutdown,
824 	.match_id_table	= dpaa2_qdma_id_table
825 };
826 
dpaa2_qdma_driver_init(void)827 static int __init dpaa2_qdma_driver_init(void)
828 {
829 	return fsl_mc_driver_register(&(dpaa2_qdma_driver));
830 }
831 late_initcall(dpaa2_qdma_driver_init);
832 
fsl_qdma_exit(void)833 static void __exit fsl_qdma_exit(void)
834 {
835 	fsl_mc_driver_unregister(&(dpaa2_qdma_driver));
836 }
837 module_exit(fsl_qdma_exit);
838 
839 MODULE_ALIAS("platform:fsl-dpaa2-qdma");
840 MODULE_LICENSE("GPL v2");
841 MODULE_DESCRIPTION("NXP Layerscape DPAA2 qDMA engine driver");
842