xref: /openbmc/linux/drivers/spi/spi-fsl-dspi.c (revision 4d75f5c664195b970e1cd2fd25b65b5eff257a0a)
1 // SPDX-License-Identifier: GPL-2.0+
2 //
3 // Copyright 2013 Freescale Semiconductor, Inc.
4 // Copyright 2020-2025 NXP
5 //
6 // Freescale DSPI driver
7 // This file contains a driver for the Freescale DSPI
8 
9 #include <linux/clk.h>
10 #include <linux/delay.h>
11 #include <linux/dmaengine.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/of.h>
17 #include <linux/platform_device.h>
18 #include <linux/pinctrl/consumer.h>
19 #include <linux/regmap.h>
20 #include <linux/spi/spi.h>
21 #include <linux/spi/spi-fsl-dspi.h>
22 
23 #define DRIVER_NAME			"fsl-dspi"
24 
25 #define SPI_MCR				0x00
26 #define SPI_MCR_HOST			BIT(31)
27 #define SPI_MCR_PCSIS(x)		((x) << 16)
28 #define SPI_MCR_CLR_TXF			BIT(11)
29 #define SPI_MCR_CLR_RXF			BIT(10)
30 #define SPI_MCR_XSPI			BIT(3)
31 #define SPI_MCR_DIS_TXF			BIT(13)
32 #define SPI_MCR_DIS_RXF			BIT(12)
33 #define SPI_MCR_HALT			BIT(0)
34 
35 #define SPI_TCR				0x08
36 #define SPI_TCR_GET_TCNT(x)		(((x) & GENMASK(31, 16)) >> 16)
37 
38 #define SPI_CTAR(x)			(0x0c + (((x) & GENMASK(1, 0)) * 4))
39 #define SPI_CTAR_FMSZ(x)		(((x) << 27) & GENMASK(30, 27))
40 #define SPI_CTAR_CPOL			BIT(26)
41 #define SPI_CTAR_CPHA			BIT(25)
42 #define SPI_CTAR_LSBFE			BIT(24)
43 #define SPI_CTAR_PCSSCK(x)		(((x) << 22) & GENMASK(23, 22))
44 #define SPI_CTAR_PASC(x)		(((x) << 20) & GENMASK(21, 20))
45 #define SPI_CTAR_PDT(x)			(((x) << 18) & GENMASK(19, 18))
46 #define SPI_CTAR_PBR(x)			(((x) << 16) & GENMASK(17, 16))
47 #define SPI_CTAR_CSSCK(x)		(((x) << 12) & GENMASK(15, 12))
48 #define SPI_CTAR_ASC(x)			(((x) << 8) & GENMASK(11, 8))
49 #define SPI_CTAR_DT(x)			(((x) << 4) & GENMASK(7, 4))
50 #define SPI_CTAR_BR(x)			((x) & GENMASK(3, 0))
51 #define SPI_CTAR_SCALE_BITS		0xf
52 
53 #define SPI_CTAR0_SLAVE			0x0c
54 
55 #define SPI_SR				0x2c
56 #define SPI_SR_TCFQF			BIT(31)
57 #define SPI_SR_TFUF			BIT(27)
58 #define SPI_SR_TFFF			BIT(25)
59 #define SPI_SR_CMDTCF			BIT(23)
60 #define SPI_SR_SPEF			BIT(21)
61 #define SPI_SR_RFOF			BIT(19)
62 #define SPI_SR_TFIWF			BIT(18)
63 #define SPI_SR_RFDF			BIT(17)
64 #define SPI_SR_CMDFFF			BIT(16)
65 #define SPI_SR_TXRXS			BIT(30)
66 #define SPI_SR_CLEAR			(SPI_SR_TCFQF | \
67 					SPI_SR_TFUF | SPI_SR_TFFF | \
68 					SPI_SR_CMDTCF | SPI_SR_SPEF | \
69 					SPI_SR_RFOF | SPI_SR_TFIWF | \
70 					SPI_SR_RFDF | SPI_SR_CMDFFF)
71 
72 #define SPI_RSER_TFFFE			BIT(25)
73 #define SPI_RSER_TFFFD			BIT(24)
74 #define SPI_RSER_RFDFE			BIT(17)
75 #define SPI_RSER_RFDFD			BIT(16)
76 
77 #define SPI_RSER			0x30
78 #define SPI_RSER_TCFQE			BIT(31)
79 #define SPI_RSER_CMDTCFE		BIT(23)
80 
81 #define SPI_PUSHR			0x34
82 #define SPI_PUSHR_CMD_CONT		BIT(15)
83 #define SPI_PUSHR_CMD_CTAS(x)		(((x) << 12 & GENMASK(14, 12)))
84 #define SPI_PUSHR_CMD_EOQ		BIT(11)
85 #define SPI_PUSHR_CMD_CTCNT		BIT(10)
86 #define SPI_PUSHR_CMD_PCS(x)		(BIT(x) & GENMASK(5, 0))
87 
88 #define SPI_PUSHR_SLAVE			0x34
89 
90 #define SPI_POPR			0x38
91 
92 #define SPI_TXFR0			0x3c
93 #define SPI_TXFR1			0x40
94 #define SPI_TXFR2			0x44
95 #define SPI_TXFR3			0x48
96 #define SPI_RXFR0			0x7c
97 #define SPI_RXFR1			0x80
98 #define SPI_RXFR2			0x84
99 #define SPI_RXFR3			0x88
100 
101 #define SPI_CTARE(x)			(0x11c + (((x) & GENMASK(1, 0)) * 4))
102 #define SPI_CTARE_FMSZE(x)		(((x) & 0x1) << 16)
103 #define SPI_CTARE_DTCP(x)		((x) & 0x7ff)
104 
105 #define SPI_SREX			0x13c
106 
107 #define SPI_FRAME_BITS(bits)		SPI_CTAR_FMSZ((bits) - 1)
108 #define SPI_FRAME_EBITS(bits)		SPI_CTARE_FMSZE(((bits) - 1) >> 4)
109 
110 #define DMA_COMPLETION_TIMEOUT		msecs_to_jiffies(3000)
111 
112 struct chip_data {
113 	u32			ctar_val;
114 };
115 
116 enum dspi_trans_mode {
117 	DSPI_XSPI_MODE,
118 	DSPI_DMA_MODE,
119 };
120 
121 struct fsl_dspi_devtype_data {
122 	enum dspi_trans_mode	trans_mode;
123 	u8			max_clock_factor;
124 	int			fifo_size;
125 };
126 
127 enum {
128 	LS1021A,
129 	LS1012A,
130 	LS1028A,
131 	LS1043A,
132 	LS1046A,
133 	LS2080A,
134 	LS2085A,
135 	LX2160A,
136 	MCF5441X,
137 	VF610,
138 };
139 
140 static const struct fsl_dspi_devtype_data devtype_data[] = {
141 	[VF610] = {
142 		.trans_mode		= DSPI_DMA_MODE,
143 		.max_clock_factor	= 2,
144 		.fifo_size		= 4,
145 	},
146 	[LS1021A] = {
147 		/* Has A-011218 DMA erratum */
148 		.trans_mode		= DSPI_XSPI_MODE,
149 		.max_clock_factor	= 8,
150 		.fifo_size		= 4,
151 	},
152 	[LS1012A] = {
153 		/* Has A-011218 DMA erratum */
154 		.trans_mode		= DSPI_XSPI_MODE,
155 		.max_clock_factor	= 8,
156 		.fifo_size		= 16,
157 	},
158 	[LS1028A] = {
159 		.trans_mode		= DSPI_XSPI_MODE,
160 		.max_clock_factor	= 8,
161 		.fifo_size		= 4,
162 	},
163 	[LS1043A] = {
164 		/* Has A-011218 DMA erratum */
165 		.trans_mode		= DSPI_XSPI_MODE,
166 		.max_clock_factor	= 8,
167 		.fifo_size		= 16,
168 	},
169 	[LS1046A] = {
170 		/* Has A-011218 DMA erratum */
171 		.trans_mode		= DSPI_XSPI_MODE,
172 		.max_clock_factor	= 8,
173 		.fifo_size		= 16,
174 	},
175 	[LS2080A] = {
176 		.trans_mode		= DSPI_XSPI_MODE,
177 		.max_clock_factor	= 8,
178 		.fifo_size		= 4,
179 	},
180 	[LS2085A] = {
181 		.trans_mode		= DSPI_XSPI_MODE,
182 		.max_clock_factor	= 8,
183 		.fifo_size		= 4,
184 	},
185 	[LX2160A] = {
186 		.trans_mode		= DSPI_XSPI_MODE,
187 		.max_clock_factor	= 8,
188 		.fifo_size		= 4,
189 	},
190 	[MCF5441X] = {
191 		.trans_mode		= DSPI_DMA_MODE,
192 		.max_clock_factor	= 8,
193 		.fifo_size		= 16,
194 	},
195 };
196 
197 struct fsl_dspi_dma {
198 	u32					*tx_dma_buf;
199 	struct dma_chan				*chan_tx;
200 	dma_addr_t				tx_dma_phys;
201 	struct completion			cmd_tx_complete;
202 	struct dma_async_tx_descriptor		*tx_desc;
203 
204 	u32					*rx_dma_buf;
205 	struct dma_chan				*chan_rx;
206 	dma_addr_t				rx_dma_phys;
207 	struct completion			cmd_rx_complete;
208 	struct dma_async_tx_descriptor		*rx_desc;
209 };
210 
211 struct fsl_dspi {
212 	struct spi_controller			*ctlr;
213 	struct platform_device			*pdev;
214 
215 	struct regmap				*regmap;
216 	struct regmap				*regmap_pushr;
217 	int					irq;
218 	struct clk				*clk;
219 
220 	struct spi_transfer			*cur_transfer;
221 	struct spi_message			*cur_msg;
222 	struct chip_data			*cur_chip;
223 	size_t					progress;
224 	size_t					len;
225 	const void				*tx;
226 	void					*rx;
227 	u16					tx_cmd;
228 	const struct fsl_dspi_devtype_data	*devtype_data;
229 
230 	struct completion			xfer_done;
231 
232 	struct fsl_dspi_dma			*dma;
233 
234 	int					oper_word_size;
235 	int					oper_bits_per_word;
236 
237 	int					words_in_flight;
238 
239 	/*
240 	 * Offsets for CMD and TXDATA within SPI_PUSHR when accessed
241 	 * individually (in XSPI mode)
242 	 */
243 	int					pushr_cmd;
244 	int					pushr_tx;
245 
246 	void (*host_to_dev)(struct fsl_dspi *dspi, u32 *txdata);
247 	void (*dev_to_host)(struct fsl_dspi *dspi, u32 rxdata);
248 };
249 
dspi_native_host_to_dev(struct fsl_dspi * dspi,u32 * txdata)250 static void dspi_native_host_to_dev(struct fsl_dspi *dspi, u32 *txdata)
251 {
252 	switch (dspi->oper_word_size) {
253 	case 1:
254 		*txdata = *(u8 *)dspi->tx;
255 		break;
256 	case 2:
257 		*txdata = *(u16 *)dspi->tx;
258 		break;
259 	case 4:
260 		*txdata = *(u32 *)dspi->tx;
261 		break;
262 	}
263 	dspi->tx += dspi->oper_word_size;
264 }
265 
dspi_native_dev_to_host(struct fsl_dspi * dspi,u32 rxdata)266 static void dspi_native_dev_to_host(struct fsl_dspi *dspi, u32 rxdata)
267 {
268 	switch (dspi->oper_word_size) {
269 	case 1:
270 		*(u8 *)dspi->rx = rxdata;
271 		break;
272 	case 2:
273 		*(u16 *)dspi->rx = rxdata;
274 		break;
275 	case 4:
276 		*(u32 *)dspi->rx = rxdata;
277 		break;
278 	}
279 	dspi->rx += dspi->oper_word_size;
280 }
281 
dspi_8on32_host_to_dev(struct fsl_dspi * dspi,u32 * txdata)282 static void dspi_8on32_host_to_dev(struct fsl_dspi *dspi, u32 *txdata)
283 {
284 	*txdata = cpu_to_be32(*(u32 *)dspi->tx);
285 	dspi->tx += sizeof(u32);
286 }
287 
dspi_8on32_dev_to_host(struct fsl_dspi * dspi,u32 rxdata)288 static void dspi_8on32_dev_to_host(struct fsl_dspi *dspi, u32 rxdata)
289 {
290 	*(u32 *)dspi->rx = be32_to_cpu(rxdata);
291 	dspi->rx += sizeof(u32);
292 }
293 
dspi_8on16_host_to_dev(struct fsl_dspi * dspi,u32 * txdata)294 static void dspi_8on16_host_to_dev(struct fsl_dspi *dspi, u32 *txdata)
295 {
296 	*txdata = cpu_to_be16(*(u16 *)dspi->tx);
297 	dspi->tx += sizeof(u16);
298 }
299 
dspi_8on16_dev_to_host(struct fsl_dspi * dspi,u32 rxdata)300 static void dspi_8on16_dev_to_host(struct fsl_dspi *dspi, u32 rxdata)
301 {
302 	*(u16 *)dspi->rx = be16_to_cpu(rxdata);
303 	dspi->rx += sizeof(u16);
304 }
305 
dspi_16on32_host_to_dev(struct fsl_dspi * dspi,u32 * txdata)306 static void dspi_16on32_host_to_dev(struct fsl_dspi *dspi, u32 *txdata)
307 {
308 	u16 hi = *(u16 *)dspi->tx;
309 	u16 lo = *(u16 *)(dspi->tx + 2);
310 
311 	*txdata = (u32)hi << 16 | lo;
312 	dspi->tx += sizeof(u32);
313 }
314 
dspi_16on32_dev_to_host(struct fsl_dspi * dspi,u32 rxdata)315 static void dspi_16on32_dev_to_host(struct fsl_dspi *dspi, u32 rxdata)
316 {
317 	u16 hi = rxdata & 0xffff;
318 	u16 lo = rxdata >> 16;
319 
320 	*(u16 *)dspi->rx = lo;
321 	*(u16 *)(dspi->rx + 2) = hi;
322 	dspi->rx += sizeof(u32);
323 }
324 
325 /*
326  * Pop one word from the TX buffer for pushing into the
327  * PUSHR register (TX FIFO)
328  */
dspi_pop_tx(struct fsl_dspi * dspi)329 static u32 dspi_pop_tx(struct fsl_dspi *dspi)
330 {
331 	u32 txdata = 0;
332 
333 	if (dspi->tx)
334 		dspi->host_to_dev(dspi, &txdata);
335 	dspi->len -= dspi->oper_word_size;
336 	return txdata;
337 }
338 
339 /* Prepare one TX FIFO entry (txdata plus cmd) */
dspi_pop_tx_pushr(struct fsl_dspi * dspi)340 static u32 dspi_pop_tx_pushr(struct fsl_dspi *dspi)
341 {
342 	u16 cmd = dspi->tx_cmd, data = dspi_pop_tx(dspi);
343 
344 	if (spi_controller_is_target(dspi->ctlr))
345 		return data;
346 
347 	if (dspi->len > 0)
348 		cmd |= SPI_PUSHR_CMD_CONT;
349 	return cmd << 16 | data;
350 }
351 
352 /* Push one word to the RX buffer from the POPR register (RX FIFO) */
dspi_push_rx(struct fsl_dspi * dspi,u32 rxdata)353 static void dspi_push_rx(struct fsl_dspi *dspi, u32 rxdata)
354 {
355 	if (!dspi->rx)
356 		return;
357 	dspi->dev_to_host(dspi, rxdata);
358 }
359 
dspi_tx_dma_callback(void * arg)360 static void dspi_tx_dma_callback(void *arg)
361 {
362 	struct fsl_dspi *dspi = arg;
363 	struct fsl_dspi_dma *dma = dspi->dma;
364 
365 	complete(&dma->cmd_tx_complete);
366 }
367 
dspi_rx_dma_callback(void * arg)368 static void dspi_rx_dma_callback(void *arg)
369 {
370 	struct fsl_dspi *dspi = arg;
371 	struct fsl_dspi_dma *dma = dspi->dma;
372 	int i;
373 
374 	if (dspi->rx) {
375 		for (i = 0; i < dspi->words_in_flight; i++)
376 			dspi_push_rx(dspi, dspi->dma->rx_dma_buf[i]);
377 	}
378 
379 	complete(&dma->cmd_rx_complete);
380 }
381 
dspi_next_xfer_dma_submit(struct fsl_dspi * dspi)382 static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
383 {
384 	struct device *dev = &dspi->pdev->dev;
385 	struct fsl_dspi_dma *dma = dspi->dma;
386 	int time_left;
387 	int i;
388 
389 	for (i = 0; i < dspi->words_in_flight; i++)
390 		dspi->dma->tx_dma_buf[i] = dspi_pop_tx_pushr(dspi);
391 
392 	dma->tx_desc = dmaengine_prep_slave_single(dma->chan_tx,
393 					dma->tx_dma_phys,
394 					dspi->words_in_flight *
395 					DMA_SLAVE_BUSWIDTH_4_BYTES,
396 					DMA_MEM_TO_DEV,
397 					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
398 	if (!dma->tx_desc) {
399 		dev_err(dev, "Not able to get desc for DMA xfer\n");
400 		return -EIO;
401 	}
402 
403 	dma->tx_desc->callback = dspi_tx_dma_callback;
404 	dma->tx_desc->callback_param = dspi;
405 	if (dma_submit_error(dmaengine_submit(dma->tx_desc))) {
406 		dev_err(dev, "DMA submit failed\n");
407 		return -EINVAL;
408 	}
409 
410 	dma->rx_desc = dmaengine_prep_slave_single(dma->chan_rx,
411 					dma->rx_dma_phys,
412 					dspi->words_in_flight *
413 					DMA_SLAVE_BUSWIDTH_4_BYTES,
414 					DMA_DEV_TO_MEM,
415 					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
416 	if (!dma->rx_desc) {
417 		dev_err(dev, "Not able to get desc for DMA xfer\n");
418 		return -EIO;
419 	}
420 
421 	dma->rx_desc->callback = dspi_rx_dma_callback;
422 	dma->rx_desc->callback_param = dspi;
423 	if (dma_submit_error(dmaengine_submit(dma->rx_desc))) {
424 		dev_err(dev, "DMA submit failed\n");
425 		return -EINVAL;
426 	}
427 
428 	reinit_completion(&dspi->dma->cmd_rx_complete);
429 	reinit_completion(&dspi->dma->cmd_tx_complete);
430 
431 	dma_async_issue_pending(dma->chan_rx);
432 	dma_async_issue_pending(dma->chan_tx);
433 
434 	if (spi_controller_is_target(dspi->ctlr)) {
435 		wait_for_completion_interruptible(&dspi->dma->cmd_rx_complete);
436 		return 0;
437 	}
438 
439 	time_left = wait_for_completion_timeout(&dspi->dma->cmd_tx_complete,
440 						DMA_COMPLETION_TIMEOUT);
441 	if (time_left == 0) {
442 		dev_err(dev, "DMA tx timeout\n");
443 		dmaengine_terminate_all(dma->chan_tx);
444 		dmaengine_terminate_all(dma->chan_rx);
445 		return -ETIMEDOUT;
446 	}
447 
448 	time_left = wait_for_completion_timeout(&dspi->dma->cmd_rx_complete,
449 						DMA_COMPLETION_TIMEOUT);
450 	if (time_left == 0) {
451 		dev_err(dev, "DMA rx timeout\n");
452 		dmaengine_terminate_all(dma->chan_tx);
453 		dmaengine_terminate_all(dma->chan_rx);
454 		return -ETIMEDOUT;
455 	}
456 
457 	return 0;
458 }
459 
460 static void dspi_setup_accel(struct fsl_dspi *dspi);
461 
dspi_dma_xfer(struct fsl_dspi * dspi)462 static int dspi_dma_xfer(struct fsl_dspi *dspi)
463 {
464 	struct spi_message *message = dspi->cur_msg;
465 	struct device *dev = &dspi->pdev->dev;
466 	int ret = 0;
467 
468 	/*
469 	 * dspi->len gets decremented by dspi_pop_tx_pushr in
470 	 * dspi_next_xfer_dma_submit
471 	 */
472 	while (dspi->len) {
473 		/* Figure out operational bits-per-word for this chunk */
474 		dspi_setup_accel(dspi);
475 
476 		dspi->words_in_flight = dspi->len / dspi->oper_word_size;
477 		if (dspi->words_in_flight > dspi->devtype_data->fifo_size)
478 			dspi->words_in_flight = dspi->devtype_data->fifo_size;
479 
480 		message->actual_length += dspi->words_in_flight *
481 					  dspi->oper_word_size;
482 
483 		ret = dspi_next_xfer_dma_submit(dspi);
484 		if (ret) {
485 			dev_err(dev, "DMA transfer failed\n");
486 			break;
487 		}
488 	}
489 
490 	return ret;
491 }
492 
dspi_request_dma(struct fsl_dspi * dspi,phys_addr_t phy_addr)493 static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
494 {
495 	int dma_bufsize = dspi->devtype_data->fifo_size * 2;
496 	struct device *dev = &dspi->pdev->dev;
497 	struct dma_slave_config cfg;
498 	struct fsl_dspi_dma *dma;
499 	int ret;
500 
501 	dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
502 	if (!dma)
503 		return -ENOMEM;
504 
505 	dma->chan_rx = dma_request_chan(dev, "rx");
506 	if (IS_ERR(dma->chan_rx)) {
507 		return dev_err_probe(dev, PTR_ERR(dma->chan_rx),
508 			"rx dma channel not available\n");
509 	}
510 
511 	dma->chan_tx = dma_request_chan(dev, "tx");
512 	if (IS_ERR(dma->chan_tx)) {
513 		ret = PTR_ERR(dma->chan_tx);
514 		dev_err_probe(dev, ret, "tx dma channel not available\n");
515 		goto err_tx_channel;
516 	}
517 
518 	dma->tx_dma_buf = dma_alloc_coherent(dma->chan_tx->device->dev,
519 					     dma_bufsize, &dma->tx_dma_phys,
520 					     GFP_KERNEL);
521 	if (!dma->tx_dma_buf) {
522 		ret = -ENOMEM;
523 		goto err_tx_dma_buf;
524 	}
525 
526 	dma->rx_dma_buf = dma_alloc_coherent(dma->chan_rx->device->dev,
527 					     dma_bufsize, &dma->rx_dma_phys,
528 					     GFP_KERNEL);
529 	if (!dma->rx_dma_buf) {
530 		ret = -ENOMEM;
531 		goto err_rx_dma_buf;
532 	}
533 
534 	memset(&cfg, 0, sizeof(cfg));
535 	cfg.src_addr = phy_addr + SPI_POPR;
536 	cfg.dst_addr = phy_addr + SPI_PUSHR;
537 	cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
538 	cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
539 	cfg.src_maxburst = 1;
540 	cfg.dst_maxburst = 1;
541 
542 	cfg.direction = DMA_DEV_TO_MEM;
543 	ret = dmaengine_slave_config(dma->chan_rx, &cfg);
544 	if (ret) {
545 		dev_err(dev, "can't configure rx dma channel\n");
546 		ret = -EINVAL;
547 		goto err_slave_config;
548 	}
549 
550 	cfg.direction = DMA_MEM_TO_DEV;
551 	ret = dmaengine_slave_config(dma->chan_tx, &cfg);
552 	if (ret) {
553 		dev_err(dev, "can't configure tx dma channel\n");
554 		ret = -EINVAL;
555 		goto err_slave_config;
556 	}
557 
558 	dspi->dma = dma;
559 	init_completion(&dma->cmd_tx_complete);
560 	init_completion(&dma->cmd_rx_complete);
561 
562 	return 0;
563 
564 err_slave_config:
565 	dma_free_coherent(dma->chan_rx->device->dev,
566 			  dma_bufsize, dma->rx_dma_buf, dma->rx_dma_phys);
567 err_rx_dma_buf:
568 	dma_free_coherent(dma->chan_tx->device->dev,
569 			  dma_bufsize, dma->tx_dma_buf, dma->tx_dma_phys);
570 err_tx_dma_buf:
571 	dma_release_channel(dma->chan_tx);
572 err_tx_channel:
573 	dma_release_channel(dma->chan_rx);
574 
575 	devm_kfree(dev, dma);
576 	dspi->dma = NULL;
577 
578 	return ret;
579 }
580 
dspi_release_dma(struct fsl_dspi * dspi)581 static void dspi_release_dma(struct fsl_dspi *dspi)
582 {
583 	int dma_bufsize = dspi->devtype_data->fifo_size * 2;
584 	struct fsl_dspi_dma *dma = dspi->dma;
585 
586 	if (!dma)
587 		return;
588 
589 	if (dma->chan_tx) {
590 		dma_free_coherent(dma->chan_tx->device->dev, dma_bufsize,
591 				  dma->tx_dma_buf, dma->tx_dma_phys);
592 		dma_release_channel(dma->chan_tx);
593 	}
594 
595 	if (dma->chan_rx) {
596 		dma_free_coherent(dma->chan_rx->device->dev, dma_bufsize,
597 				  dma->rx_dma_buf, dma->rx_dma_phys);
598 		dma_release_channel(dma->chan_rx);
599 	}
600 }
601 
hz_to_spi_baud(char * pbr,char * br,int speed_hz,unsigned long clkrate)602 static void hz_to_spi_baud(char *pbr, char *br, int speed_hz,
603 			   unsigned long clkrate)
604 {
605 	/* Valid baud rate pre-scaler values */
606 	int pbr_tbl[4] = {2, 3, 5, 7};
607 	int brs[16] = {	2,	4,	6,	8,
608 			16,	32,	64,	128,
609 			256,	512,	1024,	2048,
610 			4096,	8192,	16384,	32768 };
611 	int scale_needed, scale, minscale = INT_MAX;
612 	int i, j;
613 
614 	scale_needed = clkrate / speed_hz;
615 	if (clkrate % speed_hz)
616 		scale_needed++;
617 
618 	for (i = 0; i < ARRAY_SIZE(brs); i++)
619 		for (j = 0; j < ARRAY_SIZE(pbr_tbl); j++) {
620 			scale = brs[i] * pbr_tbl[j];
621 			if (scale >= scale_needed) {
622 				if (scale < minscale) {
623 					minscale = scale;
624 					*br = i;
625 					*pbr = j;
626 				}
627 				break;
628 			}
629 		}
630 
631 	if (minscale == INT_MAX) {
632 		pr_warn("Can not find valid baud rate,speed_hz is %d,clkrate is %ld, we use the max prescaler value.\n",
633 			speed_hz, clkrate);
634 		*pbr = ARRAY_SIZE(pbr_tbl) - 1;
635 		*br =  ARRAY_SIZE(brs) - 1;
636 	}
637 }
638 
ns_delay_scale(char * psc,char * sc,int delay_ns,unsigned long clkrate)639 static void ns_delay_scale(char *psc, char *sc, int delay_ns,
640 			   unsigned long clkrate)
641 {
642 	int scale_needed, scale, minscale = INT_MAX;
643 	int pscale_tbl[4] = {1, 3, 5, 7};
644 	u32 remainder;
645 	int i, j;
646 
647 	scale_needed = div_u64_rem((u64)delay_ns * clkrate, NSEC_PER_SEC,
648 				   &remainder);
649 	if (remainder)
650 		scale_needed++;
651 
652 	for (i = 0; i < ARRAY_SIZE(pscale_tbl); i++)
653 		for (j = 0; j <= SPI_CTAR_SCALE_BITS; j++) {
654 			scale = pscale_tbl[i] * (2 << j);
655 			if (scale >= scale_needed) {
656 				if (scale < minscale) {
657 					minscale = scale;
658 					*psc = i;
659 					*sc = j;
660 				}
661 				break;
662 			}
663 		}
664 
665 	if (minscale == INT_MAX) {
666 		pr_warn("Cannot find correct scale values for %dns delay at clkrate %ld, using max prescaler value",
667 			delay_ns, clkrate);
668 		*psc = ARRAY_SIZE(pscale_tbl) - 1;
669 		*sc = SPI_CTAR_SCALE_BITS;
670 	}
671 }
672 
dspi_pushr_cmd_write(struct fsl_dspi * dspi,u16 cmd)673 static void dspi_pushr_cmd_write(struct fsl_dspi *dspi, u16 cmd)
674 {
675 	/*
676 	 * The only time when the PCS doesn't need continuation after this word
677 	 * is when it's last. We need to look ahead, because we actually call
678 	 * dspi_pop_tx (the function that decrements dspi->len) _after_
679 	 * dspi_pushr_cmd_write with XSPI mode. As for how much in advance? One
680 	 * word is enough. If there's more to transmit than that,
681 	 * dspi_xspi_write will know to split the FIFO writes in 2, and
682 	 * generate a new PUSHR command with the final word that will have PCS
683 	 * deasserted (not continued) here.
684 	 */
685 	if (dspi->len > dspi->oper_word_size)
686 		cmd |= SPI_PUSHR_CMD_CONT;
687 	regmap_write(dspi->regmap_pushr, dspi->pushr_cmd, cmd);
688 }
689 
dspi_pushr_txdata_write(struct fsl_dspi * dspi,u16 txdata)690 static void dspi_pushr_txdata_write(struct fsl_dspi *dspi, u16 txdata)
691 {
692 	regmap_write(dspi->regmap_pushr, dspi->pushr_tx, txdata);
693 }
694 
dspi_xspi_fifo_write(struct fsl_dspi * dspi,int num_words)695 static void dspi_xspi_fifo_write(struct fsl_dspi *dspi, int num_words)
696 {
697 	int num_bytes = num_words * dspi->oper_word_size;
698 	u16 tx_cmd = dspi->tx_cmd;
699 
700 	/*
701 	 * If the PCS needs to de-assert (i.e. we're at the end of the buffer
702 	 * and cs_change does not want the PCS to stay on), then we need a new
703 	 * PUSHR command, since this one (for the body of the buffer)
704 	 * necessarily has the CONT bit set.
705 	 * So send one word less during this go, to force a split and a command
706 	 * with a single word next time, when CONT will be unset.
707 	 */
708 	if (!(dspi->tx_cmd & SPI_PUSHR_CMD_CONT) && num_bytes == dspi->len)
709 		tx_cmd |= SPI_PUSHR_CMD_EOQ;
710 
711 	/* Update CTARE */
712 	regmap_write(dspi->regmap, SPI_CTARE(0),
713 		     SPI_FRAME_EBITS(dspi->oper_bits_per_word) |
714 		     SPI_CTARE_DTCP(num_words));
715 
716 	/*
717 	 * Write the CMD FIFO entry first, and then the two
718 	 * corresponding TX FIFO entries (or one...).
719 	 */
720 	dspi_pushr_cmd_write(dspi, tx_cmd);
721 
722 	/* Fill TX FIFO with as many transfers as possible */
723 	while (num_words--) {
724 		u32 data = dspi_pop_tx(dspi);
725 
726 		dspi_pushr_txdata_write(dspi, data & 0xFFFF);
727 		if (dspi->oper_bits_per_word > 16)
728 			dspi_pushr_txdata_write(dspi, data >> 16);
729 	}
730 }
731 
dspi_popr_read(struct fsl_dspi * dspi)732 static u32 dspi_popr_read(struct fsl_dspi *dspi)
733 {
734 	u32 rxdata = 0;
735 
736 	regmap_read(dspi->regmap, SPI_POPR, &rxdata);
737 	return rxdata;
738 }
739 
dspi_fifo_read(struct fsl_dspi * dspi)740 static void dspi_fifo_read(struct fsl_dspi *dspi)
741 {
742 	int num_fifo_entries = dspi->words_in_flight;
743 
744 	/* Read one FIFO entry and push to rx buffer */
745 	while (num_fifo_entries--)
746 		dspi_push_rx(dspi, dspi_popr_read(dspi));
747 }
748 
dspi_setup_accel(struct fsl_dspi * dspi)749 static void dspi_setup_accel(struct fsl_dspi *dspi)
750 {
751 	struct spi_transfer *xfer = dspi->cur_transfer;
752 	bool odd = !!(dspi->len & 1);
753 
754 	/* No accel for frames not multiple of 8 bits at the moment */
755 	if (xfer->bits_per_word % 8)
756 		goto no_accel;
757 
758 	if (!odd && dspi->len <= dspi->devtype_data->fifo_size * 2) {
759 		dspi->oper_bits_per_word = 16;
760 	} else if (odd && dspi->len <= dspi->devtype_data->fifo_size) {
761 		dspi->oper_bits_per_word = 8;
762 	} else {
763 		/* Start off with maximum supported by hardware */
764 		if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE)
765 			dspi->oper_bits_per_word = 32;
766 		else
767 			dspi->oper_bits_per_word = 16;
768 
769 		/*
770 		 * And go down only if the buffer can't be sent with
771 		 * words this big
772 		 */
773 		do {
774 			if (dspi->len >= DIV_ROUND_UP(dspi->oper_bits_per_word, 8))
775 				break;
776 
777 			dspi->oper_bits_per_word /= 2;
778 		} while (dspi->oper_bits_per_word > 8);
779 	}
780 
781 	if (xfer->bits_per_word == 8 && dspi->oper_bits_per_word == 32) {
782 		dspi->dev_to_host = dspi_8on32_dev_to_host;
783 		dspi->host_to_dev = dspi_8on32_host_to_dev;
784 	} else if (xfer->bits_per_word == 8 && dspi->oper_bits_per_word == 16) {
785 		dspi->dev_to_host = dspi_8on16_dev_to_host;
786 		dspi->host_to_dev = dspi_8on16_host_to_dev;
787 	} else if (xfer->bits_per_word == 16 && dspi->oper_bits_per_word == 32) {
788 		dspi->dev_to_host = dspi_16on32_dev_to_host;
789 		dspi->host_to_dev = dspi_16on32_host_to_dev;
790 	} else {
791 no_accel:
792 		dspi->dev_to_host = dspi_native_dev_to_host;
793 		dspi->host_to_dev = dspi_native_host_to_dev;
794 		dspi->oper_bits_per_word = xfer->bits_per_word;
795 	}
796 
797 	dspi->oper_word_size = DIV_ROUND_UP(dspi->oper_bits_per_word, 8);
798 
799 	/*
800 	 * Update CTAR here (code is common for XSPI and DMA modes).
801 	 * We will update CTARE in the portion specific to XSPI, when we
802 	 * also know the preload value (DTCP).
803 	 */
804 	regmap_write(dspi->regmap, SPI_CTAR(0),
805 		     dspi->cur_chip->ctar_val |
806 		     SPI_FRAME_BITS(dspi->oper_bits_per_word));
807 }
808 
dspi_fifo_write(struct fsl_dspi * dspi)809 static void dspi_fifo_write(struct fsl_dspi *dspi)
810 {
811 	int num_fifo_entries = dspi->devtype_data->fifo_size;
812 	struct spi_transfer *xfer = dspi->cur_transfer;
813 	struct spi_message *msg = dspi->cur_msg;
814 	int num_words, num_bytes;
815 
816 	dspi_setup_accel(dspi);
817 
818 	/* In XSPI mode each 32-bit word occupies 2 TX FIFO entries */
819 	if (dspi->oper_word_size == 4)
820 		num_fifo_entries /= 2;
821 
822 	/*
823 	 * Integer division intentionally trims off odd (or non-multiple of 4)
824 	 * numbers of bytes at the end of the buffer, which will be sent next
825 	 * time using a smaller oper_word_size.
826 	 */
827 	num_words = dspi->len / dspi->oper_word_size;
828 	if (num_words > num_fifo_entries)
829 		num_words = num_fifo_entries;
830 
831 	/* Update total number of bytes that were transferred */
832 	num_bytes = num_words * dspi->oper_word_size;
833 	msg->actual_length += num_bytes;
834 	dspi->progress += num_bytes / DIV_ROUND_UP(xfer->bits_per_word, 8);
835 
836 	/*
837 	 * Update shared variable for use in the next interrupt (both in
838 	 * dspi_fifo_read and in dspi_fifo_write).
839 	 */
840 	dspi->words_in_flight = num_words;
841 
842 	spi_take_timestamp_pre(dspi->ctlr, xfer, dspi->progress, !dspi->irq);
843 
844 	dspi_xspi_fifo_write(dspi, num_words);
845 	/*
846 	 * Everything after this point is in a potential race with the next
847 	 * interrupt, so we must never use dspi->words_in_flight again since it
848 	 * might already be modified by the next dspi_fifo_write.
849 	 */
850 
851 	spi_take_timestamp_post(dspi->ctlr, dspi->cur_transfer,
852 				dspi->progress, !dspi->irq);
853 }
854 
dspi_rxtx(struct fsl_dspi * dspi)855 static int dspi_rxtx(struct fsl_dspi *dspi)
856 {
857 	dspi_fifo_read(dspi);
858 
859 	if (!dspi->len)
860 		/* Success! */
861 		return 0;
862 
863 	dspi_fifo_write(dspi);
864 
865 	return -EINPROGRESS;
866 }
867 
dspi_poll(struct fsl_dspi * dspi)868 static int dspi_poll(struct fsl_dspi *dspi)
869 {
870 	int tries = 1000;
871 	u32 spi_sr;
872 
873 	do {
874 		regmap_read(dspi->regmap, SPI_SR, &spi_sr);
875 		regmap_write(dspi->regmap, SPI_SR, spi_sr);
876 
877 		if (spi_sr & SPI_SR_CMDTCF)
878 			break;
879 	} while (--tries);
880 
881 	if (!tries)
882 		return -ETIMEDOUT;
883 
884 	return dspi_rxtx(dspi);
885 }
886 
dspi_interrupt(int irq,void * dev_id)887 static irqreturn_t dspi_interrupt(int irq, void *dev_id)
888 {
889 	struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id;
890 	u32 spi_sr;
891 
892 	regmap_read(dspi->regmap, SPI_SR, &spi_sr);
893 	regmap_write(dspi->regmap, SPI_SR, spi_sr);
894 
895 	if (!(spi_sr & SPI_SR_CMDTCF))
896 		return IRQ_NONE;
897 
898 	if (dspi_rxtx(dspi) == 0)
899 		complete(&dspi->xfer_done);
900 
901 	return IRQ_HANDLED;
902 }
903 
dspi_assert_cs(struct spi_device * spi,bool * cs)904 static void dspi_assert_cs(struct spi_device *spi, bool *cs)
905 {
906 	if (!spi_get_csgpiod(spi, 0) || *cs)
907 		return;
908 
909 	gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), true);
910 	*cs = true;
911 }
912 
dspi_deassert_cs(struct spi_device * spi,bool * cs)913 static void dspi_deassert_cs(struct spi_device *spi, bool *cs)
914 {
915 	if (!spi_get_csgpiod(spi, 0) || !*cs)
916 		return;
917 
918 	gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), false);
919 	*cs = false;
920 }
921 
dspi_transfer_one_message(struct spi_controller * ctlr,struct spi_message * message)922 static int dspi_transfer_one_message(struct spi_controller *ctlr,
923 				     struct spi_message *message)
924 {
925 	struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr);
926 	struct spi_device *spi = message->spi;
927 	struct spi_transfer *transfer;
928 	bool cs = false;
929 	int status = 0;
930 	u32 val = 0;
931 	bool cs_change = false;
932 
933 	message->actual_length = 0;
934 
935 	/* Put DSPI in running mode if halted. */
936 	regmap_read(dspi->regmap, SPI_MCR, &val);
937 	if (val & SPI_MCR_HALT) {
938 		regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_HALT, 0);
939 		while (regmap_read(dspi->regmap, SPI_SR, &val) >= 0 &&
940 		       !(val & SPI_SR_TXRXS))
941 			;
942 	}
943 
944 	list_for_each_entry(transfer, &message->transfers, transfer_list) {
945 		dspi->cur_transfer = transfer;
946 		dspi->cur_msg = message;
947 		dspi->cur_chip = spi_get_ctldata(spi);
948 
949 		dspi_assert_cs(spi, &cs);
950 
951 		/* Prepare command word for CMD FIFO */
952 		dspi->tx_cmd = SPI_PUSHR_CMD_CTAS(0);
953 		if (!spi_get_csgpiod(spi, 0))
954 			dspi->tx_cmd |= SPI_PUSHR_CMD_PCS(spi_get_chipselect(spi, 0));
955 
956 		if (list_is_last(&dspi->cur_transfer->transfer_list,
957 				 &dspi->cur_msg->transfers)) {
958 			/* Leave PCS activated after last transfer when
959 			 * cs_change is set.
960 			 */
961 			if (transfer->cs_change)
962 				dspi->tx_cmd |= SPI_PUSHR_CMD_CONT;
963 		} else {
964 			/* Keep PCS active between transfers in same message
965 			 * when cs_change is not set, and de-activate PCS
966 			 * between transfers in the same message when
967 			 * cs_change is set.
968 			 */
969 			if (!transfer->cs_change)
970 				dspi->tx_cmd |= SPI_PUSHR_CMD_CONT;
971 		}
972 
973 		cs_change = transfer->cs_change;
974 		dspi->tx = transfer->tx_buf;
975 		dspi->rx = transfer->rx_buf;
976 		dspi->len = transfer->len;
977 		dspi->progress = 0;
978 
979 		regmap_update_bits(dspi->regmap, SPI_MCR,
980 				   SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF,
981 				   SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF);
982 
983 		regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR);
984 
985 		spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer,
986 				       dspi->progress, !dspi->irq);
987 
988 		if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
989 			status = dspi_dma_xfer(dspi);
990 		} else {
991 			dspi_fifo_write(dspi);
992 
993 			if (dspi->irq) {
994 				wait_for_completion(&dspi->xfer_done);
995 				reinit_completion(&dspi->xfer_done);
996 			} else {
997 				do {
998 					status = dspi_poll(dspi);
999 				} while (status == -EINPROGRESS);
1000 			}
1001 		}
1002 		if (status)
1003 			break;
1004 
1005 		spi_transfer_delay_exec(transfer);
1006 
1007 		if (!(dspi->tx_cmd & SPI_PUSHR_CMD_CONT))
1008 			dspi_deassert_cs(spi, &cs);
1009 	}
1010 
1011 	if (status || !cs_change) {
1012 		/* Put DSPI in stop mode */
1013 		regmap_update_bits(dspi->regmap, SPI_MCR,
1014 				   SPI_MCR_HALT, SPI_MCR_HALT);
1015 		while (regmap_read(dspi->regmap, SPI_SR, &val) >= 0 &&
1016 		       val & SPI_SR_TXRXS)
1017 			;
1018 	}
1019 
1020 	message->status = status;
1021 	spi_finalize_current_message(ctlr);
1022 
1023 	return status;
1024 }
1025 
dspi_setup(struct spi_device * spi)1026 static int dspi_setup(struct spi_device *spi)
1027 {
1028 	struct fsl_dspi *dspi = spi_controller_get_devdata(spi->controller);
1029 	u32 period_ns = DIV_ROUND_UP(NSEC_PER_SEC, spi->max_speed_hz);
1030 	unsigned char br = 0, pbr = 0, pcssck = 0, cssck = 0;
1031 	u32 quarter_period_ns = DIV_ROUND_UP(period_ns, 4);
1032 	u32 cs_sck_delay = 0, sck_cs_delay = 0;
1033 	struct fsl_dspi_platform_data *pdata;
1034 	unsigned char pasc = 0, asc = 0;
1035 	struct gpio_desc *gpio_cs;
1036 	struct chip_data *chip;
1037 	unsigned long clkrate;
1038 	bool cs = true;
1039 
1040 	/* Only alloc on first setup */
1041 	chip = spi_get_ctldata(spi);
1042 	if (chip == NULL) {
1043 		chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
1044 		if (!chip)
1045 			return -ENOMEM;
1046 	}
1047 
1048 	pdata = dev_get_platdata(&dspi->pdev->dev);
1049 
1050 	if (!pdata) {
1051 		of_property_read_u32(spi->dev.of_node, "fsl,spi-cs-sck-delay",
1052 				     &cs_sck_delay);
1053 
1054 		of_property_read_u32(spi->dev.of_node, "fsl,spi-sck-cs-delay",
1055 				     &sck_cs_delay);
1056 	} else {
1057 		cs_sck_delay = pdata->cs_sck_delay;
1058 		sck_cs_delay = pdata->sck_cs_delay;
1059 	}
1060 
1061 	/* Since tCSC and tASC apply to continuous transfers too, avoid SCK
1062 	 * glitches of half a cycle by never allowing tCSC + tASC to go below
1063 	 * half a SCK period.
1064 	 */
1065 	if (cs_sck_delay < quarter_period_ns)
1066 		cs_sck_delay = quarter_period_ns;
1067 	if (sck_cs_delay < quarter_period_ns)
1068 		sck_cs_delay = quarter_period_ns;
1069 
1070 	dev_dbg(&spi->dev,
1071 		"DSPI controller timing params: CS-to-SCK delay %u ns, SCK-to-CS delay %u ns\n",
1072 		cs_sck_delay, sck_cs_delay);
1073 
1074 	clkrate = clk_get_rate(dspi->clk);
1075 	hz_to_spi_baud(&pbr, &br, spi->max_speed_hz, clkrate);
1076 
1077 	/* Set PCS to SCK delay scale values */
1078 	ns_delay_scale(&pcssck, &cssck, cs_sck_delay, clkrate);
1079 
1080 	/* Set After SCK delay scale values */
1081 	ns_delay_scale(&pasc, &asc, sck_cs_delay, clkrate);
1082 
1083 	chip->ctar_val = 0;
1084 	if (spi->mode & SPI_CPOL)
1085 		chip->ctar_val |= SPI_CTAR_CPOL;
1086 	if (spi->mode & SPI_CPHA)
1087 		chip->ctar_val |= SPI_CTAR_CPHA;
1088 
1089 	if (!spi_controller_is_target(dspi->ctlr)) {
1090 		chip->ctar_val |= SPI_CTAR_PCSSCK(pcssck) |
1091 				  SPI_CTAR_CSSCK(cssck) |
1092 				  SPI_CTAR_PASC(pasc) |
1093 				  SPI_CTAR_ASC(asc) |
1094 				  SPI_CTAR_PBR(pbr) |
1095 				  SPI_CTAR_BR(br);
1096 
1097 		if (spi->mode & SPI_LSB_FIRST)
1098 			chip->ctar_val |= SPI_CTAR_LSBFE;
1099 	}
1100 
1101 	gpio_cs = spi_get_csgpiod(spi, 0);
1102 	if (gpio_cs)
1103 		gpiod_direction_output(gpio_cs, false);
1104 
1105 	dspi_deassert_cs(spi, &cs);
1106 
1107 	spi_set_ctldata(spi, chip);
1108 
1109 	return 0;
1110 }
1111 
dspi_cleanup(struct spi_device * spi)1112 static void dspi_cleanup(struct spi_device *spi)
1113 {
1114 	struct chip_data *chip = spi_get_ctldata(spi);
1115 
1116 	dev_dbg(&spi->dev, "spi_device %u.%u cleanup\n",
1117 		spi->controller->bus_num, spi_get_chipselect(spi, 0));
1118 
1119 	kfree(chip);
1120 }
1121 
1122 static const struct of_device_id fsl_dspi_dt_ids[] = {
1123 	{
1124 		.compatible = "fsl,vf610-dspi",
1125 		.data = &devtype_data[VF610],
1126 	}, {
1127 		.compatible = "fsl,ls1021a-v1.0-dspi",
1128 		.data = &devtype_data[LS1021A],
1129 	}, {
1130 		.compatible = "fsl,ls1012a-dspi",
1131 		.data = &devtype_data[LS1012A],
1132 	}, {
1133 		.compatible = "fsl,ls1028a-dspi",
1134 		.data = &devtype_data[LS1028A],
1135 	}, {
1136 		.compatible = "fsl,ls1043a-dspi",
1137 		.data = &devtype_data[LS1043A],
1138 	}, {
1139 		.compatible = "fsl,ls1046a-dspi",
1140 		.data = &devtype_data[LS1046A],
1141 	}, {
1142 		.compatible = "fsl,ls2080a-dspi",
1143 		.data = &devtype_data[LS2080A],
1144 	}, {
1145 		.compatible = "fsl,ls2085a-dspi",
1146 		.data = &devtype_data[LS2085A],
1147 	}, {
1148 		.compatible = "fsl,lx2160a-dspi",
1149 		.data = &devtype_data[LX2160A],
1150 	},
1151 	{ /* sentinel */ }
1152 };
1153 MODULE_DEVICE_TABLE(of, fsl_dspi_dt_ids);
1154 
1155 #ifdef CONFIG_PM_SLEEP
dspi_suspend(struct device * dev)1156 static int dspi_suspend(struct device *dev)
1157 {
1158 	struct fsl_dspi *dspi = dev_get_drvdata(dev);
1159 
1160 	if (dspi->irq)
1161 		disable_irq(dspi->irq);
1162 	spi_controller_suspend(dspi->ctlr);
1163 	clk_disable_unprepare(dspi->clk);
1164 
1165 	pinctrl_pm_select_sleep_state(dev);
1166 
1167 	return 0;
1168 }
1169 
dspi_resume(struct device * dev)1170 static int dspi_resume(struct device *dev)
1171 {
1172 	struct fsl_dspi *dspi = dev_get_drvdata(dev);
1173 	int ret;
1174 
1175 	pinctrl_pm_select_default_state(dev);
1176 
1177 	ret = clk_prepare_enable(dspi->clk);
1178 	if (ret)
1179 		return ret;
1180 	spi_controller_resume(dspi->ctlr);
1181 	if (dspi->irq)
1182 		enable_irq(dspi->irq);
1183 
1184 	return 0;
1185 }
1186 #endif /* CONFIG_PM_SLEEP */
1187 
1188 static SIMPLE_DEV_PM_OPS(dspi_pm, dspi_suspend, dspi_resume);
1189 
1190 static const struct regmap_range dspi_yes_ranges[] = {
1191 	regmap_reg_range(SPI_MCR, SPI_MCR),
1192 	regmap_reg_range(SPI_TCR, SPI_CTAR(3)),
1193 	regmap_reg_range(SPI_SR, SPI_TXFR3),
1194 	regmap_reg_range(SPI_RXFR0, SPI_RXFR3),
1195 	regmap_reg_range(SPI_CTARE(0), SPI_CTARE(3)),
1196 	regmap_reg_range(SPI_SREX, SPI_SREX),
1197 };
1198 
1199 static const struct regmap_access_table dspi_access_table = {
1200 	.yes_ranges	= dspi_yes_ranges,
1201 	.n_yes_ranges	= ARRAY_SIZE(dspi_yes_ranges),
1202 };
1203 
1204 static const struct regmap_range dspi_volatile_ranges[] = {
1205 	regmap_reg_range(SPI_MCR, SPI_TCR),
1206 	regmap_reg_range(SPI_SR, SPI_SR),
1207 	regmap_reg_range(SPI_PUSHR, SPI_RXFR3),
1208 };
1209 
1210 static const struct regmap_access_table dspi_volatile_table = {
1211 	.yes_ranges	= dspi_volatile_ranges,
1212 	.n_yes_ranges	= ARRAY_SIZE(dspi_volatile_ranges),
1213 };
1214 
1215 static const struct regmap_config dspi_regmap_config = {
1216 	.reg_bits	= 32,
1217 	.val_bits	= 32,
1218 	.reg_stride	= 4,
1219 	.max_register	= 0x88,
1220 	.volatile_table	= &dspi_volatile_table,
1221 	.rd_table	= &dspi_access_table,
1222 	.wr_table	= &dspi_access_table,
1223 };
1224 
1225 static const struct regmap_range dspi_xspi_volatile_ranges[] = {
1226 	regmap_reg_range(SPI_MCR, SPI_TCR),
1227 	regmap_reg_range(SPI_SR, SPI_SR),
1228 	regmap_reg_range(SPI_PUSHR, SPI_RXFR3),
1229 	regmap_reg_range(SPI_SREX, SPI_SREX),
1230 };
1231 
1232 static const struct regmap_access_table dspi_xspi_volatile_table = {
1233 	.yes_ranges	= dspi_xspi_volatile_ranges,
1234 	.n_yes_ranges	= ARRAY_SIZE(dspi_xspi_volatile_ranges),
1235 };
1236 
1237 static const struct regmap_config dspi_xspi_regmap_config[] = {
1238 	{
1239 		.reg_bits	= 32,
1240 		.val_bits	= 32,
1241 		.reg_stride	= 4,
1242 		.max_register	= 0x13c,
1243 		.volatile_table	= &dspi_xspi_volatile_table,
1244 		.rd_table	= &dspi_access_table,
1245 		.wr_table	= &dspi_access_table,
1246 	},
1247 	{
1248 		.name		= "pushr",
1249 		.reg_bits	= 16,
1250 		.val_bits	= 16,
1251 		.reg_stride	= 2,
1252 		.max_register	= 0x2,
1253 	},
1254 };
1255 
dspi_init(struct fsl_dspi * dspi)1256 static int dspi_init(struct fsl_dspi *dspi)
1257 {
1258 	unsigned int mcr;
1259 
1260 	/* Set idle states for all chip select signals to high */
1261 	mcr = SPI_MCR_PCSIS(GENMASK(dspi->ctlr->max_native_cs - 1, 0));
1262 
1263 	if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE)
1264 		mcr |= SPI_MCR_XSPI;
1265 	if (!spi_controller_is_target(dspi->ctlr))
1266 		mcr |= SPI_MCR_HOST;
1267 
1268 	mcr |= SPI_MCR_HALT;
1269 
1270 	regmap_write(dspi->regmap, SPI_MCR, mcr);
1271 	regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR);
1272 
1273 	switch (dspi->devtype_data->trans_mode) {
1274 	case DSPI_XSPI_MODE:
1275 		regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_CMDTCFE);
1276 		break;
1277 	case DSPI_DMA_MODE:
1278 		regmap_write(dspi->regmap, SPI_RSER,
1279 			     SPI_RSER_TFFFE | SPI_RSER_TFFFD |
1280 			     SPI_RSER_RFDFE | SPI_RSER_RFDFD);
1281 		break;
1282 	default:
1283 		dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n",
1284 			dspi->devtype_data->trans_mode);
1285 		return -EINVAL;
1286 	}
1287 
1288 	return 0;
1289 }
1290 
dspi_target_abort(struct spi_controller * host)1291 static int dspi_target_abort(struct spi_controller *host)
1292 {
1293 	struct fsl_dspi *dspi = spi_controller_get_devdata(host);
1294 
1295 	/*
1296 	 * Terminate all pending DMA transactions for the SPI working
1297 	 * in TARGET mode.
1298 	 */
1299 	if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
1300 		dmaengine_terminate_sync(dspi->dma->chan_rx);
1301 		dmaengine_terminate_sync(dspi->dma->chan_tx);
1302 	}
1303 
1304 	/* Clear the internal DSPI RX and TX FIFO buffers */
1305 	regmap_update_bits(dspi->regmap, SPI_MCR,
1306 			   SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF,
1307 			   SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF);
1308 
1309 	return 0;
1310 }
1311 
dspi_probe(struct platform_device * pdev)1312 static int dspi_probe(struct platform_device *pdev)
1313 {
1314 	struct device_node *np = pdev->dev.of_node;
1315 	const struct regmap_config *regmap_config;
1316 	struct fsl_dspi_platform_data *pdata;
1317 	struct spi_controller *ctlr;
1318 	int ret, cs_num, bus_num = -1;
1319 	struct fsl_dspi *dspi;
1320 	struct resource *res;
1321 	void __iomem *base;
1322 	bool big_endian;
1323 
1324 	dspi = devm_kzalloc(&pdev->dev, sizeof(*dspi), GFP_KERNEL);
1325 	if (!dspi)
1326 		return -ENOMEM;
1327 
1328 	ctlr = spi_alloc_host(&pdev->dev, 0);
1329 	if (!ctlr)
1330 		return -ENOMEM;
1331 
1332 	spi_controller_set_devdata(ctlr, dspi);
1333 	platform_set_drvdata(pdev, dspi);
1334 
1335 	dspi->pdev = pdev;
1336 	dspi->ctlr = ctlr;
1337 
1338 	ctlr->setup = dspi_setup;
1339 	ctlr->transfer_one_message = dspi_transfer_one_message;
1340 	ctlr->dev.of_node = pdev->dev.of_node;
1341 
1342 	ctlr->cleanup = dspi_cleanup;
1343 	ctlr->target_abort = dspi_target_abort;
1344 	ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
1345 	ctlr->use_gpio_descriptors = true;
1346 
1347 	pdata = dev_get_platdata(&pdev->dev);
1348 	if (pdata) {
1349 		ctlr->num_chipselect = ctlr->max_native_cs = pdata->cs_num;
1350 		ctlr->bus_num = pdata->bus_num;
1351 
1352 		/* Only Coldfire uses platform data */
1353 		dspi->devtype_data = &devtype_data[MCF5441X];
1354 		big_endian = true;
1355 	} else {
1356 
1357 		ret = of_property_read_u32(np, "spi-num-chipselects", &cs_num);
1358 		if (ret < 0) {
1359 			dev_err(&pdev->dev, "can't get spi-num-chipselects\n");
1360 			goto out_ctlr_put;
1361 		}
1362 		ctlr->num_chipselect = ctlr->max_native_cs = cs_num;
1363 
1364 		of_property_read_u32(np, "bus-num", &bus_num);
1365 		ctlr->bus_num = bus_num;
1366 
1367 		if (of_property_read_bool(np, "spi-slave"))
1368 			ctlr->target = true;
1369 
1370 		dspi->devtype_data = of_device_get_match_data(&pdev->dev);
1371 		if (!dspi->devtype_data) {
1372 			dev_err(&pdev->dev, "can't get devtype_data\n");
1373 			ret = -EFAULT;
1374 			goto out_ctlr_put;
1375 		}
1376 
1377 		big_endian = of_device_is_big_endian(np);
1378 	}
1379 	if (big_endian) {
1380 		dspi->pushr_cmd = 0;
1381 		dspi->pushr_tx = 2;
1382 	} else {
1383 		dspi->pushr_cmd = 2;
1384 		dspi->pushr_tx = 0;
1385 	}
1386 
1387 	if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE)
1388 		ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
1389 	else
1390 		ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
1391 
1392 	base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
1393 	if (IS_ERR(base)) {
1394 		ret = PTR_ERR(base);
1395 		goto out_ctlr_put;
1396 	}
1397 
1398 	if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE)
1399 		regmap_config = &dspi_xspi_regmap_config[0];
1400 	else
1401 		regmap_config = &dspi_regmap_config;
1402 	dspi->regmap = devm_regmap_init_mmio(&pdev->dev, base, regmap_config);
1403 	if (IS_ERR(dspi->regmap)) {
1404 		dev_err(&pdev->dev, "failed to init regmap: %ld\n",
1405 				PTR_ERR(dspi->regmap));
1406 		ret = PTR_ERR(dspi->regmap);
1407 		goto out_ctlr_put;
1408 	}
1409 
1410 	if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE) {
1411 		dspi->regmap_pushr = devm_regmap_init_mmio(
1412 			&pdev->dev, base + SPI_PUSHR,
1413 			&dspi_xspi_regmap_config[1]);
1414 		if (IS_ERR(dspi->regmap_pushr)) {
1415 			dev_err(&pdev->dev,
1416 				"failed to init pushr regmap: %ld\n",
1417 				PTR_ERR(dspi->regmap_pushr));
1418 			ret = PTR_ERR(dspi->regmap_pushr);
1419 			goto out_ctlr_put;
1420 		}
1421 	}
1422 
1423 	dspi->clk = devm_clk_get(&pdev->dev, "dspi");
1424 	if (IS_ERR(dspi->clk)) {
1425 		ret = PTR_ERR(dspi->clk);
1426 		dev_err(&pdev->dev, "unable to get clock\n");
1427 		goto out_ctlr_put;
1428 	}
1429 	ret = clk_prepare_enable(dspi->clk);
1430 	if (ret)
1431 		goto out_ctlr_put;
1432 
1433 	ret = dspi_init(dspi);
1434 	if (ret)
1435 		goto out_clk_put;
1436 
1437 	dspi->irq = platform_get_irq(pdev, 0);
1438 	if (dspi->irq <= 0) {
1439 		dev_info(&pdev->dev,
1440 			 "can't get platform irq, using poll mode\n");
1441 		dspi->irq = 0;
1442 		goto poll_mode;
1443 	}
1444 
1445 	init_completion(&dspi->xfer_done);
1446 
1447 	ret = request_threaded_irq(dspi->irq, dspi_interrupt, NULL,
1448 				   IRQF_SHARED, pdev->name, dspi);
1449 	if (ret < 0) {
1450 		dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n");
1451 		goto out_clk_put;
1452 	}
1453 
1454 poll_mode:
1455 
1456 	if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
1457 		ret = dspi_request_dma(dspi, res->start);
1458 		if (ret < 0) {
1459 			dev_err(&pdev->dev, "can't get dma channels\n");
1460 			goto out_free_irq;
1461 		}
1462 	}
1463 
1464 	ctlr->max_speed_hz =
1465 		clk_get_rate(dspi->clk) / dspi->devtype_data->max_clock_factor;
1466 
1467 	if (dspi->devtype_data->trans_mode != DSPI_DMA_MODE)
1468 		ctlr->ptp_sts_supported = true;
1469 
1470 	ret = spi_register_controller(ctlr);
1471 	if (ret != 0) {
1472 		dev_err(&pdev->dev, "Problem registering DSPI ctlr\n");
1473 		goto out_release_dma;
1474 	}
1475 
1476 	return ret;
1477 
1478 out_release_dma:
1479 	dspi_release_dma(dspi);
1480 out_free_irq:
1481 	if (dspi->irq)
1482 		free_irq(dspi->irq, dspi);
1483 out_clk_put:
1484 	clk_disable_unprepare(dspi->clk);
1485 out_ctlr_put:
1486 	spi_controller_put(ctlr);
1487 
1488 	return ret;
1489 }
1490 
dspi_remove(struct platform_device * pdev)1491 static void dspi_remove(struct platform_device *pdev)
1492 {
1493 	struct fsl_dspi *dspi = platform_get_drvdata(pdev);
1494 
1495 	/* Disconnect from the SPI framework */
1496 	spi_unregister_controller(dspi->ctlr);
1497 
1498 	/* Disable RX and TX */
1499 	regmap_update_bits(dspi->regmap, SPI_MCR,
1500 			   SPI_MCR_DIS_TXF | SPI_MCR_DIS_RXF,
1501 			   SPI_MCR_DIS_TXF | SPI_MCR_DIS_RXF);
1502 
1503 	/* Stop Running */
1504 	regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_HALT, SPI_MCR_HALT);
1505 
1506 	dspi_release_dma(dspi);
1507 	if (dspi->irq)
1508 		free_irq(dspi->irq, dspi);
1509 	clk_disable_unprepare(dspi->clk);
1510 }
1511 
dspi_shutdown(struct platform_device * pdev)1512 static void dspi_shutdown(struct platform_device *pdev)
1513 {
1514 	dspi_remove(pdev);
1515 }
1516 
1517 static struct platform_driver fsl_dspi_driver = {
1518 	.driver.name		= DRIVER_NAME,
1519 	.driver.of_match_table	= fsl_dspi_dt_ids,
1520 	.driver.owner		= THIS_MODULE,
1521 	.driver.pm		= &dspi_pm,
1522 	.probe			= dspi_probe,
1523 	.remove_new		= dspi_remove,
1524 	.shutdown		= dspi_shutdown,
1525 };
1526 module_platform_driver(fsl_dspi_driver);
1527 
1528 MODULE_DESCRIPTION("Freescale DSPI Controller Driver");
1529 MODULE_LICENSE("GPL");
1530 MODULE_ALIAS("platform:" DRIVER_NAME);
1531