1 /*
2  * The driver for Freescale MPC512x LocalPlus Bus FIFO
3  * (called SCLPC in the Reference Manual).
4  *
5  * Copyright (C) 2013-2015 Alexander Popov <alex.popov@linux.com>.
6  *
7  * This file is released under the GPLv2.
8  */
9 
10 #include <linux/interrupt.h>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/of.h>
14 #include <linux/of_platform.h>
15 #include <linux/of_address.h>
16 #include <linux/of_irq.h>
17 #include <asm/mpc5121.h>
18 #include <asm/io.h>
19 #include <linux/spinlock.h>
20 #include <linux/slab.h>
21 #include <linux/dmaengine.h>
22 #include <linux/dma-direction.h>
23 #include <linux/dma-mapping.h>
24 
25 #define DRV_NAME "mpc512x_lpbfifo"
26 
27 struct cs_range {
28 	u32 csnum;
29 	u32 base; /* must be zero */
30 	u32 addr;
31 	u32 size;
32 };
33 
34 static struct lpbfifo_data {
35 	spinlock_t lock; /* for protecting lpbfifo_data */
36 	phys_addr_t regs_phys;
37 	resource_size_t regs_size;
38 	struct mpc512x_lpbfifo __iomem *regs;
39 	int irq;
40 	struct cs_range *cs_ranges;
41 	size_t cs_n;
42 	struct dma_chan *chan;
43 	struct mpc512x_lpbfifo_request *req;
44 	dma_addr_t ram_bus_addr;
45 	bool wait_lpbfifo_irq;
46 	bool wait_lpbfifo_callback;
47 } lpbfifo;
48 
49 /*
50  * A data transfer from RAM to some device on LPB is finished
51  * when both mpc512x_lpbfifo_irq() and mpc512x_lpbfifo_callback()
52  * have been called. We execute the callback registered in
53  * mpc512x_lpbfifo_request just after that.
54  * But for a data transfer from some device on LPB to RAM we don't enable
55  * LPBFIFO interrupt because clearing MPC512X_SCLPC_SUCCESS interrupt flag
56  * automatically disables LPBFIFO reading request to the DMA controller
57  * and the data transfer hangs. So the callback registered in
58  * mpc512x_lpbfifo_request is executed at the end of mpc512x_lpbfifo_callback().
59  */
60 
61 /*
62  * mpc512x_lpbfifo_irq - IRQ handler for LPB FIFO
63  */
64 static irqreturn_t mpc512x_lpbfifo_irq(int irq, void *param)
65 {
66 	struct device *dev = (struct device *)param;
67 	struct mpc512x_lpbfifo_request *req = NULL;
68 	unsigned long flags;
69 	u32 status;
70 
71 	spin_lock_irqsave(&lpbfifo.lock, flags);
72 
73 	if (!lpbfifo.regs)
74 		goto end;
75 
76 	req = lpbfifo.req;
77 	if (!req || req->dir == MPC512X_LPBFIFO_REQ_DIR_READ) {
78 		dev_err(dev, "bogus LPBFIFO IRQ\n");
79 		goto end;
80 	}
81 
82 	status = in_be32(&lpbfifo.regs->status);
83 	if (status != MPC512X_SCLPC_SUCCESS) {
84 		dev_err(dev, "DMA transfer from RAM to peripheral failed\n");
85 		out_be32(&lpbfifo.regs->enable,
86 				MPC512X_SCLPC_RESET | MPC512X_SCLPC_FIFO_RESET);
87 		goto end;
88 	}
89 	/* Clear the interrupt flag */
90 	out_be32(&lpbfifo.regs->status, MPC512X_SCLPC_SUCCESS);
91 
92 	lpbfifo.wait_lpbfifo_irq = false;
93 
94 	if (lpbfifo.wait_lpbfifo_callback)
95 		goto end;
96 
97 	/* Transfer is finished, set the FIFO as idle */
98 	lpbfifo.req = NULL;
99 
100 	spin_unlock_irqrestore(&lpbfifo.lock, flags);
101 
102 	if (req->callback)
103 		req->callback(req);
104 
105 	return IRQ_HANDLED;
106 
107  end:
108 	spin_unlock_irqrestore(&lpbfifo.lock, flags);
109 	return IRQ_HANDLED;
110 }
111 
112 /*
113  * mpc512x_lpbfifo_callback is called by DMA driver when
114  * DMA transaction is finished.
115  */
116 static void mpc512x_lpbfifo_callback(void *param)
117 {
118 	unsigned long flags;
119 	struct mpc512x_lpbfifo_request *req = NULL;
120 	enum dma_data_direction dir;
121 
122 	spin_lock_irqsave(&lpbfifo.lock, flags);
123 
124 	if (!lpbfifo.regs) {
125 		spin_unlock_irqrestore(&lpbfifo.lock, flags);
126 		return;
127 	}
128 
129 	req = lpbfifo.req;
130 	if (!req) {
131 		pr_err("bogus LPBFIFO callback\n");
132 		spin_unlock_irqrestore(&lpbfifo.lock, flags);
133 		return;
134 	}
135 
136 	/* Release the mapping */
137 	if (req->dir == MPC512X_LPBFIFO_REQ_DIR_WRITE)
138 		dir = DMA_TO_DEVICE;
139 	else
140 		dir = DMA_FROM_DEVICE;
141 	dma_unmap_single(lpbfifo.chan->device->dev,
142 			lpbfifo.ram_bus_addr, req->size, dir);
143 
144 	lpbfifo.wait_lpbfifo_callback = false;
145 
146 	if (!lpbfifo.wait_lpbfifo_irq) {
147 		/* Transfer is finished, set the FIFO as idle */
148 		lpbfifo.req = NULL;
149 
150 		spin_unlock_irqrestore(&lpbfifo.lock, flags);
151 
152 		if (req->callback)
153 			req->callback(req);
154 	} else {
155 		spin_unlock_irqrestore(&lpbfifo.lock, flags);
156 	}
157 }
158 
159 static int mpc512x_lpbfifo_kick(void)
160 {
161 	u32 bits;
162 	bool no_incr = false;
163 	u32 bpt = 32; /* max bytes per LPBFIFO transaction involving DMA */
164 	u32 cs = 0;
165 	size_t i;
166 	struct dma_device *dma_dev = NULL;
167 	struct scatterlist sg;
168 	enum dma_data_direction dir;
169 	struct dma_slave_config dma_conf = {};
170 	struct dma_async_tx_descriptor *dma_tx = NULL;
171 	dma_cookie_t cookie;
172 	int ret;
173 
174 	/*
175 	 * 1. Fit the requirements:
176 	 * - the packet size must be a multiple of 4 since FIFO Data Word
177 	 *    Register allows only full-word access according the Reference
178 	 *    Manual;
179 	 * - the physical address of the device on LPB and the packet size
180 	 *    must be aligned on BPT (bytes per transaction) or 8-bytes
181 	 *    boundary according the Reference Manual;
182 	 * - but we choose DMA maxburst equal (or very close to) BPT to prevent
183 	 *    DMA controller from overtaking FIFO and causing FIFO underflow
184 	 *    error. So we force the packet size to be aligned on BPT boundary
185 	 *    not to confuse DMA driver which requires the packet size to be
186 	 *    aligned on maxburst boundary;
187 	 * - BPT should be set to the LPB device port size for operation with
188 	 *    disabled auto-incrementing according Reference Manual.
189 	 */
190 	if (lpbfifo.req->size == 0 || !IS_ALIGNED(lpbfifo.req->size, 4))
191 		return -EINVAL;
192 
193 	if (lpbfifo.req->portsize != LPB_DEV_PORTSIZE_UNDEFINED) {
194 		bpt = lpbfifo.req->portsize;
195 		no_incr = true;
196 	}
197 
198 	while (bpt > 1) {
199 		if (IS_ALIGNED(lpbfifo.req->dev_phys_addr, min(bpt, 0x8u)) &&
200 					IS_ALIGNED(lpbfifo.req->size, bpt)) {
201 			break;
202 		}
203 
204 		if (no_incr)
205 			return -EINVAL;
206 
207 		bpt >>= 1;
208 	}
209 	dma_conf.dst_maxburst = max(bpt, 0x4u) / 4;
210 	dma_conf.src_maxburst = max(bpt, 0x4u) / 4;
211 
212 	for (i = 0; i < lpbfifo.cs_n; i++) {
213 		phys_addr_t cs_start = lpbfifo.cs_ranges[i].addr;
214 		phys_addr_t cs_end = cs_start + lpbfifo.cs_ranges[i].size;
215 		phys_addr_t access_start = lpbfifo.req->dev_phys_addr;
216 		phys_addr_t access_end = access_start + lpbfifo.req->size;
217 
218 		if (access_start >= cs_start && access_end <= cs_end) {
219 			cs = lpbfifo.cs_ranges[i].csnum;
220 			break;
221 		}
222 	}
223 	if (i == lpbfifo.cs_n)
224 		return -EFAULT;
225 
226 	/* 2. Prepare DMA */
227 	dma_dev = lpbfifo.chan->device;
228 
229 	if (lpbfifo.req->dir == MPC512X_LPBFIFO_REQ_DIR_WRITE) {
230 		dir = DMA_TO_DEVICE;
231 		dma_conf.direction = DMA_MEM_TO_DEV;
232 		dma_conf.dst_addr = lpbfifo.regs_phys +
233 				offsetof(struct mpc512x_lpbfifo, data_word);
234 	} else {
235 		dir = DMA_FROM_DEVICE;
236 		dma_conf.direction = DMA_DEV_TO_MEM;
237 		dma_conf.src_addr = lpbfifo.regs_phys +
238 				offsetof(struct mpc512x_lpbfifo, data_word);
239 	}
240 	dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
241 	dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
242 
243 	/* Make DMA channel work with LPB FIFO data register */
244 	if (dma_dev->device_config(lpbfifo.chan, &dma_conf)) {
245 		ret = -EINVAL;
246 		goto err_dma_prep;
247 	}
248 
249 	sg_init_table(&sg, 1);
250 
251 	sg_dma_address(&sg) = dma_map_single(dma_dev->dev,
252 			lpbfifo.req->ram_virt_addr, lpbfifo.req->size, dir);
253 	if (dma_mapping_error(dma_dev->dev, sg_dma_address(&sg)))
254 		return -EFAULT;
255 
256 	lpbfifo.ram_bus_addr = sg_dma_address(&sg); /* For freeing later */
257 
258 	sg_dma_len(&sg) = lpbfifo.req->size;
259 
260 	dma_tx = dmaengine_prep_slave_sg(lpbfifo.chan, &sg,
261 						1, dma_conf.direction, 0);
262 	if (!dma_tx) {
263 		ret = -ENOSPC;
264 		goto err_dma_prep;
265 	}
266 	dma_tx->callback = mpc512x_lpbfifo_callback;
267 	dma_tx->callback_param = NULL;
268 
269 	/* 3. Prepare FIFO */
270 	out_be32(&lpbfifo.regs->enable,
271 				MPC512X_SCLPC_RESET | MPC512X_SCLPC_FIFO_RESET);
272 	out_be32(&lpbfifo.regs->enable, 0x0);
273 
274 	/*
275 	 * Configure the watermarks for write operation (RAM->DMA->FIFO->dev):
276 	 * - high watermark 7 words according the Reference Manual,
277 	 * - low watermark 512 bytes (half of the FIFO).
278 	 * These watermarks don't work for read operation since the
279 	 * MPC512X_SCLPC_FLUSH bit is set (according the Reference Manual).
280 	 */
281 	out_be32(&lpbfifo.regs->fifo_ctrl, MPC512X_SCLPC_FIFO_CTRL(0x7));
282 	out_be32(&lpbfifo.regs->fifo_alarm, MPC512X_SCLPC_FIFO_ALARM(0x200));
283 
284 	/*
285 	 * Start address is a physical address of the region which belongs
286 	 * to the device on the LocalPlus Bus
287 	 */
288 	out_be32(&lpbfifo.regs->start_addr, lpbfifo.req->dev_phys_addr);
289 
290 	/*
291 	 * Configure chip select, transfer direction, address increment option
292 	 * and bytes per transaction option
293 	 */
294 	bits = MPC512X_SCLPC_CS(cs);
295 	if (lpbfifo.req->dir == MPC512X_LPBFIFO_REQ_DIR_READ)
296 		bits |= MPC512X_SCLPC_READ | MPC512X_SCLPC_FLUSH;
297 	if (no_incr)
298 		bits |= MPC512X_SCLPC_DAI;
299 	bits |= MPC512X_SCLPC_BPT(bpt);
300 	out_be32(&lpbfifo.regs->ctrl, bits);
301 
302 	/* Unmask irqs */
303 	bits = MPC512X_SCLPC_ENABLE | MPC512X_SCLPC_ABORT_INT_ENABLE;
304 	if (lpbfifo.req->dir == MPC512X_LPBFIFO_REQ_DIR_WRITE)
305 		bits |= MPC512X_SCLPC_NORM_INT_ENABLE;
306 	else
307 		lpbfifo.wait_lpbfifo_irq = false;
308 
309 	out_be32(&lpbfifo.regs->enable, bits);
310 
311 	/* 4. Set packet size and kick FIFO off */
312 	bits = lpbfifo.req->size | MPC512X_SCLPC_START;
313 	out_be32(&lpbfifo.regs->pkt_size, bits);
314 
315 	/* 5. Finally kick DMA off */
316 	cookie = dma_tx->tx_submit(dma_tx);
317 	if (dma_submit_error(cookie)) {
318 		ret = -ENOSPC;
319 		goto err_dma_submit;
320 	}
321 
322 	return 0;
323 
324  err_dma_submit:
325 	out_be32(&lpbfifo.regs->enable,
326 				MPC512X_SCLPC_RESET | MPC512X_SCLPC_FIFO_RESET);
327  err_dma_prep:
328 	dma_unmap_single(dma_dev->dev, sg_dma_address(&sg),
329 						lpbfifo.req->size, dir);
330 	return ret;
331 }
332 
333 static int mpc512x_lpbfifo_submit_locked(struct mpc512x_lpbfifo_request *req)
334 {
335 	int ret = 0;
336 
337 	if (!lpbfifo.regs)
338 		return -ENODEV;
339 
340 	/* Check whether a transfer is in progress */
341 	if (lpbfifo.req)
342 		return -EBUSY;
343 
344 	lpbfifo.wait_lpbfifo_irq = true;
345 	lpbfifo.wait_lpbfifo_callback = true;
346 	lpbfifo.req = req;
347 
348 	ret = mpc512x_lpbfifo_kick();
349 	if (ret != 0)
350 		lpbfifo.req = NULL; /* Set the FIFO as idle */
351 
352 	return ret;
353 }
354 
355 int mpc512x_lpbfifo_submit(struct mpc512x_lpbfifo_request *req)
356 {
357 	unsigned long flags;
358 	int ret = 0;
359 
360 	spin_lock_irqsave(&lpbfifo.lock, flags);
361 	ret = mpc512x_lpbfifo_submit_locked(req);
362 	spin_unlock_irqrestore(&lpbfifo.lock, flags);
363 
364 	return ret;
365 }
366 EXPORT_SYMBOL(mpc512x_lpbfifo_submit);
367 
368 /*
369  * LPBFIFO driver uses "ranges" property of "localbus" device tree node
370  * for being able to determine the chip select number of a client device
371  * ordering a DMA transfer.
372  */
373 static int get_cs_ranges(struct device *dev)
374 {
375 	int ret = -ENODEV;
376 	struct device_node *lb_node;
377 	const u32 *addr_cells_p;
378 	const u32 *size_cells_p;
379 	int proplen;
380 	size_t i;
381 
382 	lb_node = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-localbus");
383 	if (!lb_node)
384 		return ret;
385 
386 	/*
387 	 * The node defined as compatible with 'fsl,mpc5121-localbus'
388 	 * should have two address cells and one size cell.
389 	 * Every item of its ranges property should consist of:
390 	 * - the first address cell which is the chipselect number;
391 	 * - the second address cell which is the offset in the chipselect,
392 	 *    must be zero.
393 	 * - CPU address of the beginning of an access window;
394 	 * - the only size cell which is the size of an access window.
395 	 */
396 	addr_cells_p = of_get_property(lb_node, "#address-cells", NULL);
397 	size_cells_p = of_get_property(lb_node, "#size-cells", NULL);
398 	if (addr_cells_p == NULL || *addr_cells_p != 2 ||
399 				size_cells_p == NULL ||	*size_cells_p != 1) {
400 		goto end;
401 	}
402 
403 	proplen = of_property_count_u32_elems(lb_node, "ranges");
404 	if (proplen <= 0 || proplen % 4 != 0)
405 		goto end;
406 
407 	lpbfifo.cs_n = proplen / 4;
408 	lpbfifo.cs_ranges = devm_kcalloc(dev, lpbfifo.cs_n,
409 					sizeof(struct cs_range), GFP_KERNEL);
410 	if (!lpbfifo.cs_ranges)
411 		goto end;
412 
413 	if (of_property_read_u32_array(lb_node, "ranges",
414 				(u32 *)lpbfifo.cs_ranges, proplen) != 0) {
415 		goto end;
416 	}
417 
418 	for (i = 0; i < lpbfifo.cs_n; i++) {
419 		if (lpbfifo.cs_ranges[i].base != 0)
420 			goto end;
421 	}
422 
423 	ret = 0;
424 
425  end:
426 	of_node_put(lb_node);
427 	return ret;
428 }
429 
430 static int mpc512x_lpbfifo_probe(struct platform_device *pdev)
431 {
432 	struct resource r;
433 	int ret = 0;
434 
435 	memset(&lpbfifo, 0, sizeof(struct lpbfifo_data));
436 	spin_lock_init(&lpbfifo.lock);
437 
438 	lpbfifo.chan = dma_request_slave_channel(&pdev->dev, "rx-tx");
439 	if (lpbfifo.chan == NULL)
440 		return -EPROBE_DEFER;
441 
442 	if (of_address_to_resource(pdev->dev.of_node, 0, &r) != 0) {
443 		dev_err(&pdev->dev, "bad 'reg' in 'sclpc' device tree node\n");
444 		ret = -ENODEV;
445 		goto err0;
446 	}
447 
448 	lpbfifo.regs_phys = r.start;
449 	lpbfifo.regs_size = resource_size(&r);
450 
451 	if (!devm_request_mem_region(&pdev->dev, lpbfifo.regs_phys,
452 					lpbfifo.regs_size, DRV_NAME)) {
453 		dev_err(&pdev->dev, "unable to request region\n");
454 		ret = -EBUSY;
455 		goto err0;
456 	}
457 
458 	lpbfifo.regs = devm_ioremap(&pdev->dev,
459 					lpbfifo.regs_phys, lpbfifo.regs_size);
460 	if (!lpbfifo.regs) {
461 		dev_err(&pdev->dev, "mapping registers failed\n");
462 		ret = -ENOMEM;
463 		goto err0;
464 	}
465 
466 	out_be32(&lpbfifo.regs->enable,
467 				MPC512X_SCLPC_RESET | MPC512X_SCLPC_FIFO_RESET);
468 
469 	if (get_cs_ranges(&pdev->dev) != 0) {
470 		dev_err(&pdev->dev, "bad '/localbus' device tree node\n");
471 		ret = -ENODEV;
472 		goto err0;
473 	}
474 
475 	lpbfifo.irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
476 	if (lpbfifo.irq == NO_IRQ) {
477 		dev_err(&pdev->dev, "mapping irq failed\n");
478 		ret = -ENODEV;
479 		goto err0;
480 	}
481 
482 	if (request_irq(lpbfifo.irq, mpc512x_lpbfifo_irq, 0,
483 						DRV_NAME, &pdev->dev) != 0) {
484 		dev_err(&pdev->dev, "requesting irq failed\n");
485 		ret = -ENODEV;
486 		goto err1;
487 	}
488 
489 	dev_info(&pdev->dev, "probe succeeded\n");
490 	return 0;
491 
492  err1:
493 	irq_dispose_mapping(lpbfifo.irq);
494  err0:
495 	dma_release_channel(lpbfifo.chan);
496 	return ret;
497 }
498 
499 static int mpc512x_lpbfifo_remove(struct platform_device *pdev)
500 {
501 	unsigned long flags;
502 	struct dma_device *dma_dev = lpbfifo.chan->device;
503 	struct mpc512x_lpbfifo __iomem *regs = NULL;
504 
505 	spin_lock_irqsave(&lpbfifo.lock, flags);
506 	regs = lpbfifo.regs;
507 	lpbfifo.regs = NULL;
508 	spin_unlock_irqrestore(&lpbfifo.lock, flags);
509 
510 	dma_dev->device_terminate_all(lpbfifo.chan);
511 	out_be32(&regs->enable, MPC512X_SCLPC_RESET | MPC512X_SCLPC_FIFO_RESET);
512 
513 	free_irq(lpbfifo.irq, &pdev->dev);
514 	irq_dispose_mapping(lpbfifo.irq);
515 	dma_release_channel(lpbfifo.chan);
516 
517 	return 0;
518 }
519 
520 static const struct of_device_id mpc512x_lpbfifo_match[] = {
521 	{ .compatible = "fsl,mpc512x-lpbfifo", },
522 	{},
523 };
524 MODULE_DEVICE_TABLE(of, mpc512x_lpbfifo_match);
525 
526 static struct platform_driver mpc512x_lpbfifo_driver = {
527 	.probe = mpc512x_lpbfifo_probe,
528 	.remove = mpc512x_lpbfifo_remove,
529 	.driver = {
530 		.name = DRV_NAME,
531 		.owner = THIS_MODULE,
532 		.of_match_table = mpc512x_lpbfifo_match,
533 	},
534 };
535 
536 module_platform_driver(mpc512x_lpbfifo_driver);
537 
538 MODULE_AUTHOR("Alexander Popov <alex.popov@linux.com>");
539 MODULE_DESCRIPTION("MPC512x LocalPlus Bus FIFO device driver");
540 MODULE_LICENSE("GPL v2");
541