xref: /openbmc/linux/drivers/spi/spi-fsl-dspi.c (revision 9be08a27)
1 // SPDX-License-Identifier: GPL-2.0+
2 //
3 // Copyright 2013 Freescale Semiconductor, Inc.
4 //
5 // Freescale DSPI driver
6 // This file contains a driver for the Freescale DSPI
7 
8 #include <linux/clk.h>
9 #include <linux/delay.h>
10 #include <linux/dmaengine.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/err.h>
13 #include <linux/errno.h>
14 #include <linux/interrupt.h>
15 #include <linux/io.h>
16 #include <linux/kernel.h>
17 #include <linux/math64.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/of_device.h>
21 #include <linux/pinctrl/consumer.h>
22 #include <linux/platform_device.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/regmap.h>
25 #include <linux/sched.h>
26 #include <linux/spi/spi.h>
27 #include <linux/spi/spi-fsl-dspi.h>
28 #include <linux/spi/spi_bitbang.h>
29 #include <linux/time.h>
30 
31 #define DRIVER_NAME "fsl-dspi"
32 
33 #define DSPI_FIFO_SIZE			4
34 #define DSPI_DMA_BUFSIZE		(DSPI_FIFO_SIZE * 1024)
35 
36 #define SPI_MCR		0x00
37 #define SPI_MCR_MASTER		(1 << 31)
38 #define SPI_MCR_PCSIS		(0x3F << 16)
39 #define SPI_MCR_CLR_TXF	(1 << 11)
40 #define SPI_MCR_CLR_RXF	(1 << 10)
41 #define SPI_MCR_XSPI		(1 << 3)
42 
43 #define SPI_TCR			0x08
44 #define SPI_TCR_GET_TCNT(x)	(((x) & 0xffff0000) >> 16)
45 
46 #define SPI_CTAR(x)		(0x0c + (((x) & 0x3) * 4))
47 #define SPI_CTAR_FMSZ(x)	(((x) & 0x0000000f) << 27)
48 #define SPI_CTAR_CPOL(x)	((x) << 26)
49 #define SPI_CTAR_CPHA(x)	((x) << 25)
50 #define SPI_CTAR_LSBFE(x)	((x) << 24)
51 #define SPI_CTAR_PCSSCK(x)	(((x) & 0x00000003) << 22)
52 #define SPI_CTAR_PASC(x)	(((x) & 0x00000003) << 20)
53 #define SPI_CTAR_PDT(x)	(((x) & 0x00000003) << 18)
54 #define SPI_CTAR_PBR(x)	(((x) & 0x00000003) << 16)
55 #define SPI_CTAR_CSSCK(x)	(((x) & 0x0000000f) << 12)
56 #define SPI_CTAR_ASC(x)	(((x) & 0x0000000f) << 8)
57 #define SPI_CTAR_DT(x)		(((x) & 0x0000000f) << 4)
58 #define SPI_CTAR_BR(x)		((x) & 0x0000000f)
59 #define SPI_CTAR_SCALE_BITS	0xf
60 
61 #define SPI_CTAR0_SLAVE	0x0c
62 
63 #define SPI_SR			0x2c
64 #define SPI_SR_EOQF		0x10000000
65 #define SPI_SR_TCFQF		0x80000000
66 #define SPI_SR_CLEAR		0xdaad0000
67 
68 #define SPI_RSER_TFFFE		BIT(25)
69 #define SPI_RSER_TFFFD		BIT(24)
70 #define SPI_RSER_RFDFE		BIT(17)
71 #define SPI_RSER_RFDFD		BIT(16)
72 
73 #define SPI_RSER		0x30
74 #define SPI_RSER_EOQFE		0x10000000
75 #define SPI_RSER_TCFQE		0x80000000
76 
77 #define SPI_PUSHR		0x34
78 #define SPI_PUSHR_CMD_CONT	(1 << 15)
79 #define SPI_PUSHR_CONT		(SPI_PUSHR_CMD_CONT << 16)
80 #define SPI_PUSHR_CMD_CTAS(x)	(((x) & 0x0003) << 12)
81 #define SPI_PUSHR_CTAS(x)	(SPI_PUSHR_CMD_CTAS(x) << 16)
82 #define SPI_PUSHR_CMD_EOQ	(1 << 11)
83 #define SPI_PUSHR_EOQ		(SPI_PUSHR_CMD_EOQ << 16)
84 #define SPI_PUSHR_CMD_CTCNT	(1 << 10)
85 #define SPI_PUSHR_CTCNT		(SPI_PUSHR_CMD_CTCNT << 16)
86 #define SPI_PUSHR_CMD_PCS(x)	((1 << x) & 0x003f)
87 #define SPI_PUSHR_PCS(x)	(SPI_PUSHR_CMD_PCS(x) << 16)
88 #define SPI_PUSHR_TXDATA(x)	((x) & 0x0000ffff)
89 
90 #define SPI_PUSHR_SLAVE	0x34
91 
92 #define SPI_POPR		0x38
93 #define SPI_POPR_RXDATA(x)	((x) & 0x0000ffff)
94 
95 #define SPI_TXFR0		0x3c
96 #define SPI_TXFR1		0x40
97 #define SPI_TXFR2		0x44
98 #define SPI_TXFR3		0x48
99 #define SPI_RXFR0		0x7c
100 #define SPI_RXFR1		0x80
101 #define SPI_RXFR2		0x84
102 #define SPI_RXFR3		0x88
103 
104 #define SPI_CTARE(x)		(0x11c + (((x) & 0x3) * 4))
105 #define SPI_CTARE_FMSZE(x)	(((x) & 0x1) << 16)
106 #define SPI_CTARE_DTCP(x)	((x) & 0x7ff)
107 
108 #define SPI_SREX		0x13c
109 
110 #define SPI_FRAME_BITS(bits)	SPI_CTAR_FMSZ((bits) - 1)
111 #define SPI_FRAME_BITS_MASK	SPI_CTAR_FMSZ(0xf)
112 #define SPI_FRAME_BITS_16	SPI_CTAR_FMSZ(0xf)
113 #define SPI_FRAME_BITS_8	SPI_CTAR_FMSZ(0x7)
114 
115 #define SPI_FRAME_EBITS(bits)	SPI_CTARE_FMSZE(((bits) - 1) >> 4)
116 #define SPI_FRAME_EBITS_MASK	SPI_CTARE_FMSZE(1)
117 
118 /* Register offsets for regmap_pushr */
119 #define PUSHR_CMD		0x0
120 #define PUSHR_TX		0x2
121 
122 #define SPI_CS_INIT		0x01
123 #define SPI_CS_ASSERT		0x02
124 #define SPI_CS_DROP		0x04
125 
126 #define DMA_COMPLETION_TIMEOUT	msecs_to_jiffies(3000)
127 
128 struct chip_data {
129 	u32 ctar_val;
130 	u16 void_write_data;
131 };
132 
133 enum dspi_trans_mode {
134 	DSPI_EOQ_MODE = 0,
135 	DSPI_TCFQ_MODE,
136 	DSPI_DMA_MODE,
137 };
138 
139 struct fsl_dspi_devtype_data {
140 	enum dspi_trans_mode trans_mode;
141 	u8 max_clock_factor;
142 	bool xspi_mode;
143 };
144 
145 static const struct fsl_dspi_devtype_data vf610_data = {
146 	.trans_mode = DSPI_DMA_MODE,
147 	.max_clock_factor = 2,
148 };
149 
150 static const struct fsl_dspi_devtype_data ls1021a_v1_data = {
151 	.trans_mode = DSPI_TCFQ_MODE,
152 	.max_clock_factor = 8,
153 	.xspi_mode = true,
154 };
155 
156 static const struct fsl_dspi_devtype_data ls2085a_data = {
157 	.trans_mode = DSPI_TCFQ_MODE,
158 	.max_clock_factor = 8,
159 };
160 
161 static const struct fsl_dspi_devtype_data coldfire_data = {
162 	.trans_mode = DSPI_EOQ_MODE,
163 	.max_clock_factor = 8,
164 };
165 
166 struct fsl_dspi_dma {
167 	/* Length of transfer in words of DSPI_FIFO_SIZE */
168 	u32 curr_xfer_len;
169 
170 	u32 *tx_dma_buf;
171 	struct dma_chan *chan_tx;
172 	dma_addr_t tx_dma_phys;
173 	struct completion cmd_tx_complete;
174 	struct dma_async_tx_descriptor *tx_desc;
175 
176 	u32 *rx_dma_buf;
177 	struct dma_chan *chan_rx;
178 	dma_addr_t rx_dma_phys;
179 	struct completion cmd_rx_complete;
180 	struct dma_async_tx_descriptor *rx_desc;
181 };
182 
183 struct fsl_dspi {
184 	struct spi_master	*master;
185 	struct platform_device	*pdev;
186 
187 	struct regmap		*regmap;
188 	struct regmap		*regmap_pushr;
189 	int			irq;
190 	struct clk		*clk;
191 
192 	struct spi_transfer	*cur_transfer;
193 	struct spi_message	*cur_msg;
194 	struct chip_data	*cur_chip;
195 	size_t			len;
196 	const void		*tx;
197 	void			*rx;
198 	void			*rx_end;
199 	u16			void_write_data;
200 	u16			tx_cmd;
201 	u8			bits_per_word;
202 	u8			bytes_per_word;
203 	const struct fsl_dspi_devtype_data *devtype_data;
204 
205 	wait_queue_head_t	waitq;
206 	u32			waitflags;
207 
208 	struct fsl_dspi_dma	*dma;
209 };
210 
211 static u32 dspi_pop_tx(struct fsl_dspi *dspi)
212 {
213 	u32 txdata = 0;
214 
215 	if (dspi->tx) {
216 		if (dspi->bytes_per_word == 1)
217 			txdata = *(u8 *)dspi->tx;
218 		else if (dspi->bytes_per_word == 2)
219 			txdata = *(u16 *)dspi->tx;
220 		else  /* dspi->bytes_per_word == 4 */
221 			txdata = *(u32 *)dspi->tx;
222 		dspi->tx += dspi->bytes_per_word;
223 	}
224 	dspi->len -= dspi->bytes_per_word;
225 	return txdata;
226 }
227 
228 static u32 dspi_pop_tx_pushr(struct fsl_dspi *dspi)
229 {
230 	u16 cmd = dspi->tx_cmd, data = dspi_pop_tx(dspi);
231 
232 	if (dspi->len > 0)
233 		cmd |= SPI_PUSHR_CMD_CONT;
234 	return cmd << 16 | data;
235 }
236 
237 static void dspi_push_rx(struct fsl_dspi *dspi, u32 rxdata)
238 {
239 	if (!dspi->rx)
240 		return;
241 
242 	/* Mask of undefined bits */
243 	rxdata &= (1 << dspi->bits_per_word) - 1;
244 
245 	if (dspi->bytes_per_word == 1)
246 		*(u8 *)dspi->rx = rxdata;
247 	else if (dspi->bytes_per_word == 2)
248 		*(u16 *)dspi->rx = rxdata;
249 	else /* dspi->bytes_per_word == 4 */
250 		*(u32 *)dspi->rx = rxdata;
251 	dspi->rx += dspi->bytes_per_word;
252 }
253 
254 static void dspi_tx_dma_callback(void *arg)
255 {
256 	struct fsl_dspi *dspi = arg;
257 	struct fsl_dspi_dma *dma = dspi->dma;
258 
259 	complete(&dma->cmd_tx_complete);
260 }
261 
262 static void dspi_rx_dma_callback(void *arg)
263 {
264 	struct fsl_dspi *dspi = arg;
265 	struct fsl_dspi_dma *dma = dspi->dma;
266 	int i;
267 
268 	if (dspi->rx) {
269 		for (i = 0; i < dma->curr_xfer_len; i++)
270 			dspi_push_rx(dspi, dspi->dma->rx_dma_buf[i]);
271 	}
272 
273 	complete(&dma->cmd_rx_complete);
274 }
275 
276 static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
277 {
278 	struct fsl_dspi_dma *dma = dspi->dma;
279 	struct device *dev = &dspi->pdev->dev;
280 	int time_left;
281 	int i;
282 
283 	for (i = 0; i < dma->curr_xfer_len; i++)
284 		dspi->dma->tx_dma_buf[i] = dspi_pop_tx_pushr(dspi);
285 
286 	dma->tx_desc = dmaengine_prep_slave_single(dma->chan_tx,
287 					dma->tx_dma_phys,
288 					dma->curr_xfer_len *
289 					DMA_SLAVE_BUSWIDTH_4_BYTES,
290 					DMA_MEM_TO_DEV,
291 					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
292 	if (!dma->tx_desc) {
293 		dev_err(dev, "Not able to get desc for DMA xfer\n");
294 		return -EIO;
295 	}
296 
297 	dma->tx_desc->callback = dspi_tx_dma_callback;
298 	dma->tx_desc->callback_param = dspi;
299 	if (dma_submit_error(dmaengine_submit(dma->tx_desc))) {
300 		dev_err(dev, "DMA submit failed\n");
301 		return -EINVAL;
302 	}
303 
304 	dma->rx_desc = dmaengine_prep_slave_single(dma->chan_rx,
305 					dma->rx_dma_phys,
306 					dma->curr_xfer_len *
307 					DMA_SLAVE_BUSWIDTH_4_BYTES,
308 					DMA_DEV_TO_MEM,
309 					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
310 	if (!dma->rx_desc) {
311 		dev_err(dev, "Not able to get desc for DMA xfer\n");
312 		return -EIO;
313 	}
314 
315 	dma->rx_desc->callback = dspi_rx_dma_callback;
316 	dma->rx_desc->callback_param = dspi;
317 	if (dma_submit_error(dmaengine_submit(dma->rx_desc))) {
318 		dev_err(dev, "DMA submit failed\n");
319 		return -EINVAL;
320 	}
321 
322 	reinit_completion(&dspi->dma->cmd_rx_complete);
323 	reinit_completion(&dspi->dma->cmd_tx_complete);
324 
325 	dma_async_issue_pending(dma->chan_rx);
326 	dma_async_issue_pending(dma->chan_tx);
327 
328 	time_left = wait_for_completion_timeout(&dspi->dma->cmd_tx_complete,
329 					DMA_COMPLETION_TIMEOUT);
330 	if (time_left == 0) {
331 		dev_err(dev, "DMA tx timeout\n");
332 		dmaengine_terminate_all(dma->chan_tx);
333 		dmaengine_terminate_all(dma->chan_rx);
334 		return -ETIMEDOUT;
335 	}
336 
337 	time_left = wait_for_completion_timeout(&dspi->dma->cmd_rx_complete,
338 					DMA_COMPLETION_TIMEOUT);
339 	if (time_left == 0) {
340 		dev_err(dev, "DMA rx timeout\n");
341 		dmaengine_terminate_all(dma->chan_tx);
342 		dmaengine_terminate_all(dma->chan_rx);
343 		return -ETIMEDOUT;
344 	}
345 
346 	return 0;
347 }
348 
349 static int dspi_dma_xfer(struct fsl_dspi *dspi)
350 {
351 	struct fsl_dspi_dma *dma = dspi->dma;
352 	struct device *dev = &dspi->pdev->dev;
353 	struct spi_message *message = dspi->cur_msg;
354 	int curr_remaining_bytes;
355 	int bytes_per_buffer;
356 	int ret = 0;
357 
358 	curr_remaining_bytes = dspi->len;
359 	bytes_per_buffer = DSPI_DMA_BUFSIZE / DSPI_FIFO_SIZE;
360 	while (curr_remaining_bytes) {
361 		/* Check if current transfer fits the DMA buffer */
362 		dma->curr_xfer_len = curr_remaining_bytes
363 			/ dspi->bytes_per_word;
364 		if (dma->curr_xfer_len > bytes_per_buffer)
365 			dma->curr_xfer_len = bytes_per_buffer;
366 
367 		ret = dspi_next_xfer_dma_submit(dspi);
368 		if (ret) {
369 			dev_err(dev, "DMA transfer failed\n");
370 			goto exit;
371 
372 		} else {
373 			const int len =
374 				dma->curr_xfer_len * dspi->bytes_per_word;
375 			curr_remaining_bytes -= len;
376 			message->actual_length += len;
377 			if (curr_remaining_bytes < 0)
378 				curr_remaining_bytes = 0;
379 		}
380 	}
381 
382 exit:
383 	return ret;
384 }
385 
386 static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
387 {
388 	struct fsl_dspi_dma *dma;
389 	struct dma_slave_config cfg;
390 	struct device *dev = &dspi->pdev->dev;
391 	int ret;
392 
393 	dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
394 	if (!dma)
395 		return -ENOMEM;
396 
397 	dma->chan_rx = dma_request_slave_channel(dev, "rx");
398 	if (!dma->chan_rx) {
399 		dev_err(dev, "rx dma channel not available\n");
400 		ret = -ENODEV;
401 		return ret;
402 	}
403 
404 	dma->chan_tx = dma_request_slave_channel(dev, "tx");
405 	if (!dma->chan_tx) {
406 		dev_err(dev, "tx dma channel not available\n");
407 		ret = -ENODEV;
408 		goto err_tx_channel;
409 	}
410 
411 	dma->tx_dma_buf = dma_alloc_coherent(dev, DSPI_DMA_BUFSIZE,
412 					&dma->tx_dma_phys, GFP_KERNEL);
413 	if (!dma->tx_dma_buf) {
414 		ret = -ENOMEM;
415 		goto err_tx_dma_buf;
416 	}
417 
418 	dma->rx_dma_buf = dma_alloc_coherent(dev, DSPI_DMA_BUFSIZE,
419 					&dma->rx_dma_phys, GFP_KERNEL);
420 	if (!dma->rx_dma_buf) {
421 		ret = -ENOMEM;
422 		goto err_rx_dma_buf;
423 	}
424 
425 	cfg.src_addr = phy_addr + SPI_POPR;
426 	cfg.dst_addr = phy_addr + SPI_PUSHR;
427 	cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
428 	cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
429 	cfg.src_maxburst = 1;
430 	cfg.dst_maxburst = 1;
431 
432 	cfg.direction = DMA_DEV_TO_MEM;
433 	ret = dmaengine_slave_config(dma->chan_rx, &cfg);
434 	if (ret) {
435 		dev_err(dev, "can't configure rx dma channel\n");
436 		ret = -EINVAL;
437 		goto err_slave_config;
438 	}
439 
440 	cfg.direction = DMA_MEM_TO_DEV;
441 	ret = dmaengine_slave_config(dma->chan_tx, &cfg);
442 	if (ret) {
443 		dev_err(dev, "can't configure tx dma channel\n");
444 		ret = -EINVAL;
445 		goto err_slave_config;
446 	}
447 
448 	dspi->dma = dma;
449 	init_completion(&dma->cmd_tx_complete);
450 	init_completion(&dma->cmd_rx_complete);
451 
452 	return 0;
453 
454 err_slave_config:
455 	dma_free_coherent(dev, DSPI_DMA_BUFSIZE,
456 			dma->rx_dma_buf, dma->rx_dma_phys);
457 err_rx_dma_buf:
458 	dma_free_coherent(dev, DSPI_DMA_BUFSIZE,
459 			dma->tx_dma_buf, dma->tx_dma_phys);
460 err_tx_dma_buf:
461 	dma_release_channel(dma->chan_tx);
462 err_tx_channel:
463 	dma_release_channel(dma->chan_rx);
464 
465 	devm_kfree(dev, dma);
466 	dspi->dma = NULL;
467 
468 	return ret;
469 }
470 
471 static void dspi_release_dma(struct fsl_dspi *dspi)
472 {
473 	struct fsl_dspi_dma *dma = dspi->dma;
474 	struct device *dev = &dspi->pdev->dev;
475 
476 	if (dma) {
477 		if (dma->chan_tx) {
478 			dma_unmap_single(dev, dma->tx_dma_phys,
479 					DSPI_DMA_BUFSIZE, DMA_TO_DEVICE);
480 			dma_release_channel(dma->chan_tx);
481 		}
482 
483 		if (dma->chan_rx) {
484 			dma_unmap_single(dev, dma->rx_dma_phys,
485 					DSPI_DMA_BUFSIZE, DMA_FROM_DEVICE);
486 			dma_release_channel(dma->chan_rx);
487 		}
488 	}
489 }
490 
491 static void hz_to_spi_baud(char *pbr, char *br, int speed_hz,
492 		unsigned long clkrate)
493 {
494 	/* Valid baud rate pre-scaler values */
495 	int pbr_tbl[4] = {2, 3, 5, 7};
496 	int brs[16] = {	2,	4,	6,	8,
497 		16,	32,	64,	128,
498 		256,	512,	1024,	2048,
499 		4096,	8192,	16384,	32768 };
500 	int scale_needed, scale, minscale = INT_MAX;
501 	int i, j;
502 
503 	scale_needed = clkrate / speed_hz;
504 	if (clkrate % speed_hz)
505 		scale_needed++;
506 
507 	for (i = 0; i < ARRAY_SIZE(brs); i++)
508 		for (j = 0; j < ARRAY_SIZE(pbr_tbl); j++) {
509 			scale = brs[i] * pbr_tbl[j];
510 			if (scale >= scale_needed) {
511 				if (scale < minscale) {
512 					minscale = scale;
513 					*br = i;
514 					*pbr = j;
515 				}
516 				break;
517 			}
518 		}
519 
520 	if (minscale == INT_MAX) {
521 		pr_warn("Can not find valid baud rate,speed_hz is %d,clkrate is %ld, we use the max prescaler value.\n",
522 			speed_hz, clkrate);
523 		*pbr = ARRAY_SIZE(pbr_tbl) - 1;
524 		*br =  ARRAY_SIZE(brs) - 1;
525 	}
526 }
527 
528 static void ns_delay_scale(char *psc, char *sc, int delay_ns,
529 		unsigned long clkrate)
530 {
531 	int pscale_tbl[4] = {1, 3, 5, 7};
532 	int scale_needed, scale, minscale = INT_MAX;
533 	int i, j;
534 	u32 remainder;
535 
536 	scale_needed = div_u64_rem((u64)delay_ns * clkrate, NSEC_PER_SEC,
537 			&remainder);
538 	if (remainder)
539 		scale_needed++;
540 
541 	for (i = 0; i < ARRAY_SIZE(pscale_tbl); i++)
542 		for (j = 0; j <= SPI_CTAR_SCALE_BITS; j++) {
543 			scale = pscale_tbl[i] * (2 << j);
544 			if (scale >= scale_needed) {
545 				if (scale < minscale) {
546 					minscale = scale;
547 					*psc = i;
548 					*sc = j;
549 				}
550 				break;
551 			}
552 		}
553 
554 	if (minscale == INT_MAX) {
555 		pr_warn("Cannot find correct scale values for %dns delay at clkrate %ld, using max prescaler value",
556 			delay_ns, clkrate);
557 		*psc = ARRAY_SIZE(pscale_tbl) - 1;
558 		*sc = SPI_CTAR_SCALE_BITS;
559 	}
560 }
561 
562 static void fifo_write(struct fsl_dspi *dspi)
563 {
564 	regmap_write(dspi->regmap, SPI_PUSHR, dspi_pop_tx_pushr(dspi));
565 }
566 
567 static void cmd_fifo_write(struct fsl_dspi *dspi)
568 {
569 	u16 cmd = dspi->tx_cmd;
570 
571 	if (dspi->len > 0)
572 		cmd |= SPI_PUSHR_CMD_CONT;
573 	regmap_write(dspi->regmap_pushr, PUSHR_CMD, cmd);
574 }
575 
576 static void tx_fifo_write(struct fsl_dspi *dspi, u16 txdata)
577 {
578 	regmap_write(dspi->regmap_pushr, PUSHR_TX, txdata);
579 }
580 
581 static void dspi_tcfq_write(struct fsl_dspi *dspi)
582 {
583 	/* Clear transfer count */
584 	dspi->tx_cmd |= SPI_PUSHR_CMD_CTCNT;
585 
586 	if (dspi->devtype_data->xspi_mode && dspi->bits_per_word > 16) {
587 		/* Write two TX FIFO entries first, and then the corresponding
588 		 * CMD FIFO entry.
589 		 */
590 		u32 data = dspi_pop_tx(dspi);
591 
592 		if (dspi->cur_chip->ctar_val & SPI_CTAR_LSBFE(1)) {
593 			/* LSB */
594 			tx_fifo_write(dspi, data & 0xFFFF);
595 			tx_fifo_write(dspi, data >> 16);
596 		} else {
597 			/* MSB */
598 			tx_fifo_write(dspi, data >> 16);
599 			tx_fifo_write(dspi, data & 0xFFFF);
600 		}
601 		cmd_fifo_write(dspi);
602 	} else {
603 		/* Write one entry to both TX FIFO and CMD FIFO
604 		 * simultaneously.
605 		 */
606 		fifo_write(dspi);
607 	}
608 }
609 
610 static u32 fifo_read(struct fsl_dspi *dspi)
611 {
612 	u32 rxdata = 0;
613 
614 	regmap_read(dspi->regmap, SPI_POPR, &rxdata);
615 	return rxdata;
616 }
617 
618 static void dspi_tcfq_read(struct fsl_dspi *dspi)
619 {
620 	dspi_push_rx(dspi, fifo_read(dspi));
621 }
622 
623 static void dspi_eoq_write(struct fsl_dspi *dspi)
624 {
625 	int fifo_size = DSPI_FIFO_SIZE;
626 
627 	/* Fill TX FIFO with as many transfers as possible */
628 	while (dspi->len && fifo_size--) {
629 		/* Request EOQF for last transfer in FIFO */
630 		if (dspi->len == dspi->bytes_per_word || fifo_size == 0)
631 			dspi->tx_cmd |= SPI_PUSHR_CMD_EOQ;
632 		/* Clear transfer count for first transfer in FIFO */
633 		if (fifo_size == (DSPI_FIFO_SIZE - 1))
634 			dspi->tx_cmd |= SPI_PUSHR_CMD_CTCNT;
635 		/* Write combined TX FIFO and CMD FIFO entry */
636 		fifo_write(dspi);
637 	}
638 }
639 
640 static void dspi_eoq_read(struct fsl_dspi *dspi)
641 {
642 	int fifo_size = DSPI_FIFO_SIZE;
643 
644 	/* Read one FIFO entry at and push to rx buffer */
645 	while ((dspi->rx < dspi->rx_end) && fifo_size--)
646 		dspi_push_rx(dspi, fifo_read(dspi));
647 }
648 
649 static int dspi_transfer_one_message(struct spi_master *master,
650 		struct spi_message *message)
651 {
652 	struct fsl_dspi *dspi = spi_master_get_devdata(master);
653 	struct spi_device *spi = message->spi;
654 	struct spi_transfer *transfer;
655 	int status = 0;
656 	enum dspi_trans_mode trans_mode;
657 
658 	message->actual_length = 0;
659 
660 	list_for_each_entry(transfer, &message->transfers, transfer_list) {
661 		dspi->cur_transfer = transfer;
662 		dspi->cur_msg = message;
663 		dspi->cur_chip = spi_get_ctldata(spi);
664 		/* Prepare command word for CMD FIFO */
665 		dspi->tx_cmd = SPI_PUSHR_CMD_CTAS(0) |
666 			SPI_PUSHR_CMD_PCS(spi->chip_select);
667 		if (list_is_last(&dspi->cur_transfer->transfer_list,
668 				 &dspi->cur_msg->transfers)) {
669 			/* Leave PCS activated after last transfer when
670 			 * cs_change is set.
671 			 */
672 			if (transfer->cs_change)
673 				dspi->tx_cmd |= SPI_PUSHR_CMD_CONT;
674 		} else {
675 			/* Keep PCS active between transfers in same message
676 			 * when cs_change is not set, and de-activate PCS
677 			 * between transfers in the same message when
678 			 * cs_change is set.
679 			 */
680 			if (!transfer->cs_change)
681 				dspi->tx_cmd |= SPI_PUSHR_CMD_CONT;
682 		}
683 
684 		dspi->void_write_data = dspi->cur_chip->void_write_data;
685 
686 		dspi->tx = transfer->tx_buf;
687 		dspi->rx = transfer->rx_buf;
688 		dspi->rx_end = dspi->rx + transfer->len;
689 		dspi->len = transfer->len;
690 		/* Validated transfer specific frame size (defaults applied) */
691 		dspi->bits_per_word = transfer->bits_per_word;
692 		if (transfer->bits_per_word <= 8)
693 			dspi->bytes_per_word = 1;
694 		else if (transfer->bits_per_word <= 16)
695 			dspi->bytes_per_word = 2;
696 		else
697 			dspi->bytes_per_word = 4;
698 
699 		regmap_update_bits(dspi->regmap, SPI_MCR,
700 				   SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF,
701 				   SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF);
702 		regmap_write(dspi->regmap, SPI_CTAR(0),
703 			     dspi->cur_chip->ctar_val |
704 			     SPI_FRAME_BITS(transfer->bits_per_word));
705 		if (dspi->devtype_data->xspi_mode)
706 			regmap_write(dspi->regmap, SPI_CTARE(0),
707 				     SPI_FRAME_EBITS(transfer->bits_per_word)
708 				     | SPI_CTARE_DTCP(1));
709 
710 		trans_mode = dspi->devtype_data->trans_mode;
711 		switch (trans_mode) {
712 		case DSPI_EOQ_MODE:
713 			regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_EOQFE);
714 			dspi_eoq_write(dspi);
715 			break;
716 		case DSPI_TCFQ_MODE:
717 			regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_TCFQE);
718 			dspi_tcfq_write(dspi);
719 			break;
720 		case DSPI_DMA_MODE:
721 			regmap_write(dspi->regmap, SPI_RSER,
722 				SPI_RSER_TFFFE | SPI_RSER_TFFFD |
723 				SPI_RSER_RFDFE | SPI_RSER_RFDFD);
724 			status = dspi_dma_xfer(dspi);
725 			break;
726 		default:
727 			dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n",
728 				trans_mode);
729 			status = -EINVAL;
730 			goto out;
731 		}
732 
733 		if (trans_mode != DSPI_DMA_MODE) {
734 			if (wait_event_interruptible(dspi->waitq,
735 						dspi->waitflags))
736 				dev_err(&dspi->pdev->dev,
737 					"wait transfer complete fail!\n");
738 			dspi->waitflags = 0;
739 		}
740 
741 		if (transfer->delay_usecs)
742 			udelay(transfer->delay_usecs);
743 	}
744 
745 out:
746 	message->status = status;
747 	spi_finalize_current_message(master);
748 
749 	return status;
750 }
751 
752 static int dspi_setup(struct spi_device *spi)
753 {
754 	struct chip_data *chip;
755 	struct fsl_dspi *dspi = spi_master_get_devdata(spi->master);
756 	struct fsl_dspi_platform_data *pdata;
757 	u32 cs_sck_delay = 0, sck_cs_delay = 0;
758 	unsigned char br = 0, pbr = 0, pcssck = 0, cssck = 0;
759 	unsigned char pasc = 0, asc = 0;
760 	unsigned long clkrate;
761 
762 	/* Only alloc on first setup */
763 	chip = spi_get_ctldata(spi);
764 	if (chip == NULL) {
765 		chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
766 		if (!chip)
767 			return -ENOMEM;
768 	}
769 
770 	pdata = dev_get_platdata(&dspi->pdev->dev);
771 
772 	if (!pdata) {
773 		of_property_read_u32(spi->dev.of_node, "fsl,spi-cs-sck-delay",
774 				&cs_sck_delay);
775 
776 		of_property_read_u32(spi->dev.of_node, "fsl,spi-sck-cs-delay",
777 				&sck_cs_delay);
778 	} else {
779 		cs_sck_delay = pdata->cs_sck_delay;
780 		sck_cs_delay = pdata->sck_cs_delay;
781 	}
782 
783 	chip->void_write_data = 0;
784 
785 	clkrate = clk_get_rate(dspi->clk);
786 	hz_to_spi_baud(&pbr, &br, spi->max_speed_hz, clkrate);
787 
788 	/* Set PCS to SCK delay scale values */
789 	ns_delay_scale(&pcssck, &cssck, cs_sck_delay, clkrate);
790 
791 	/* Set After SCK delay scale values */
792 	ns_delay_scale(&pasc, &asc, sck_cs_delay, clkrate);
793 
794 	chip->ctar_val = SPI_CTAR_CPOL(spi->mode & SPI_CPOL ? 1 : 0)
795 		| SPI_CTAR_CPHA(spi->mode & SPI_CPHA ? 1 : 0)
796 		| SPI_CTAR_LSBFE(spi->mode & SPI_LSB_FIRST ? 1 : 0)
797 		| SPI_CTAR_PCSSCK(pcssck)
798 		| SPI_CTAR_CSSCK(cssck)
799 		| SPI_CTAR_PASC(pasc)
800 		| SPI_CTAR_ASC(asc)
801 		| SPI_CTAR_PBR(pbr)
802 		| SPI_CTAR_BR(br);
803 
804 	spi_set_ctldata(spi, chip);
805 
806 	return 0;
807 }
808 
809 static void dspi_cleanup(struct spi_device *spi)
810 {
811 	struct chip_data *chip = spi_get_ctldata((struct spi_device *)spi);
812 
813 	dev_dbg(&spi->dev, "spi_device %u.%u cleanup\n",
814 			spi->master->bus_num, spi->chip_select);
815 
816 	kfree(chip);
817 }
818 
819 static irqreturn_t dspi_interrupt(int irq, void *dev_id)
820 {
821 	struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id;
822 	struct spi_message *msg = dspi->cur_msg;
823 	enum dspi_trans_mode trans_mode;
824 	u32 spi_sr, spi_tcr;
825 	u16 spi_tcnt;
826 
827 	regmap_read(dspi->regmap, SPI_SR, &spi_sr);
828 	regmap_write(dspi->regmap, SPI_SR, spi_sr);
829 
830 
831 	if (spi_sr & (SPI_SR_EOQF | SPI_SR_TCFQF)) {
832 		/* Get transfer counter (in number of SPI transfers). It was
833 		 * reset to 0 when transfer(s) were started.
834 		 */
835 		regmap_read(dspi->regmap, SPI_TCR, &spi_tcr);
836 		spi_tcnt = SPI_TCR_GET_TCNT(spi_tcr);
837 		/* Update total number of bytes that were transferred */
838 		msg->actual_length += spi_tcnt * dspi->bytes_per_word;
839 
840 		trans_mode = dspi->devtype_data->trans_mode;
841 		switch (trans_mode) {
842 		case DSPI_EOQ_MODE:
843 			dspi_eoq_read(dspi);
844 			break;
845 		case DSPI_TCFQ_MODE:
846 			dspi_tcfq_read(dspi);
847 			break;
848 		default:
849 			dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n",
850 				trans_mode);
851 				return IRQ_HANDLED;
852 		}
853 
854 		if (!dspi->len) {
855 			dspi->waitflags = 1;
856 			wake_up_interruptible(&dspi->waitq);
857 		} else {
858 			switch (trans_mode) {
859 			case DSPI_EOQ_MODE:
860 				dspi_eoq_write(dspi);
861 				break;
862 			case DSPI_TCFQ_MODE:
863 				dspi_tcfq_write(dspi);
864 				break;
865 			default:
866 				dev_err(&dspi->pdev->dev,
867 					"unsupported trans_mode %u\n",
868 					trans_mode);
869 			}
870 		}
871 	}
872 
873 	return IRQ_HANDLED;
874 }
875 
876 static const struct of_device_id fsl_dspi_dt_ids[] = {
877 	{ .compatible = "fsl,vf610-dspi", .data = &vf610_data, },
878 	{ .compatible = "fsl,ls1021a-v1.0-dspi", .data = &ls1021a_v1_data, },
879 	{ .compatible = "fsl,ls2085a-dspi", .data = &ls2085a_data, },
880 	{ /* sentinel */ }
881 };
882 MODULE_DEVICE_TABLE(of, fsl_dspi_dt_ids);
883 
884 #ifdef CONFIG_PM_SLEEP
885 static int dspi_suspend(struct device *dev)
886 {
887 	struct spi_master *master = dev_get_drvdata(dev);
888 	struct fsl_dspi *dspi = spi_master_get_devdata(master);
889 
890 	spi_master_suspend(master);
891 	clk_disable_unprepare(dspi->clk);
892 
893 	pinctrl_pm_select_sleep_state(dev);
894 
895 	return 0;
896 }
897 
898 static int dspi_resume(struct device *dev)
899 {
900 	struct spi_master *master = dev_get_drvdata(dev);
901 	struct fsl_dspi *dspi = spi_master_get_devdata(master);
902 	int ret;
903 
904 	pinctrl_pm_select_default_state(dev);
905 
906 	ret = clk_prepare_enable(dspi->clk);
907 	if (ret)
908 		return ret;
909 	spi_master_resume(master);
910 
911 	return 0;
912 }
913 #endif /* CONFIG_PM_SLEEP */
914 
915 static SIMPLE_DEV_PM_OPS(dspi_pm, dspi_suspend, dspi_resume);
916 
917 static const struct regmap_range dspi_volatile_ranges[] = {
918 	regmap_reg_range(SPI_MCR, SPI_TCR),
919 	regmap_reg_range(SPI_SR, SPI_SR),
920 	regmap_reg_range(SPI_PUSHR, SPI_RXFR3),
921 };
922 
923 static const struct regmap_access_table dspi_volatile_table = {
924 	.yes_ranges     = dspi_volatile_ranges,
925 	.n_yes_ranges   = ARRAY_SIZE(dspi_volatile_ranges),
926 };
927 
928 static const struct regmap_config dspi_regmap_config = {
929 	.reg_bits = 32,
930 	.val_bits = 32,
931 	.reg_stride = 4,
932 	.max_register = 0x88,
933 	.volatile_table = &dspi_volatile_table,
934 };
935 
936 static const struct regmap_range dspi_xspi_volatile_ranges[] = {
937 	regmap_reg_range(SPI_MCR, SPI_TCR),
938 	regmap_reg_range(SPI_SR, SPI_SR),
939 	regmap_reg_range(SPI_PUSHR, SPI_RXFR3),
940 	regmap_reg_range(SPI_SREX, SPI_SREX),
941 };
942 
943 static const struct regmap_access_table dspi_xspi_volatile_table = {
944 	.yes_ranges     = dspi_xspi_volatile_ranges,
945 	.n_yes_ranges   = ARRAY_SIZE(dspi_xspi_volatile_ranges),
946 };
947 
948 static const struct regmap_config dspi_xspi_regmap_config[] = {
949 	{
950 		.reg_bits = 32,
951 		.val_bits = 32,
952 		.reg_stride = 4,
953 		.max_register = 0x13c,
954 		.volatile_table = &dspi_xspi_volatile_table,
955 	},
956 	{
957 		.name = "pushr",
958 		.reg_bits = 16,
959 		.val_bits = 16,
960 		.reg_stride = 2,
961 		.max_register = 0x2,
962 	},
963 };
964 
965 static void dspi_init(struct fsl_dspi *dspi)
966 {
967 	regmap_write(dspi->regmap, SPI_MCR, SPI_MCR_MASTER | SPI_MCR_PCSIS |
968 		     (dspi->devtype_data->xspi_mode ? SPI_MCR_XSPI : 0));
969 	regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR);
970 	if (dspi->devtype_data->xspi_mode)
971 		regmap_write(dspi->regmap, SPI_CTARE(0),
972 			     SPI_CTARE_FMSZE(0) | SPI_CTARE_DTCP(1));
973 }
974 
975 static int dspi_probe(struct platform_device *pdev)
976 {
977 	struct device_node *np = pdev->dev.of_node;
978 	struct spi_master *master;
979 	struct fsl_dspi *dspi;
980 	struct resource *res;
981 	const struct regmap_config *regmap_config;
982 	void __iomem *base;
983 	struct fsl_dspi_platform_data *pdata;
984 	int ret = 0, cs_num, bus_num;
985 
986 	master = spi_alloc_master(&pdev->dev, sizeof(struct fsl_dspi));
987 	if (!master)
988 		return -ENOMEM;
989 
990 	dspi = spi_master_get_devdata(master);
991 	dspi->pdev = pdev;
992 	dspi->master = master;
993 
994 	master->transfer = NULL;
995 	master->setup = dspi_setup;
996 	master->transfer_one_message = dspi_transfer_one_message;
997 	master->dev.of_node = pdev->dev.of_node;
998 
999 	master->cleanup = dspi_cleanup;
1000 	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
1001 
1002 	pdata = dev_get_platdata(&pdev->dev);
1003 	if (pdata) {
1004 		master->num_chipselect = pdata->cs_num;
1005 		master->bus_num = pdata->bus_num;
1006 
1007 		dspi->devtype_data = &coldfire_data;
1008 	} else {
1009 
1010 		ret = of_property_read_u32(np, "spi-num-chipselects", &cs_num);
1011 		if (ret < 0) {
1012 			dev_err(&pdev->dev, "can't get spi-num-chipselects\n");
1013 			goto out_master_put;
1014 		}
1015 		master->num_chipselect = cs_num;
1016 
1017 		ret = of_property_read_u32(np, "bus-num", &bus_num);
1018 		if (ret < 0) {
1019 			dev_err(&pdev->dev, "can't get bus-num\n");
1020 			goto out_master_put;
1021 		}
1022 		master->bus_num = bus_num;
1023 
1024 		dspi->devtype_data = of_device_get_match_data(&pdev->dev);
1025 		if (!dspi->devtype_data) {
1026 			dev_err(&pdev->dev, "can't get devtype_data\n");
1027 			ret = -EFAULT;
1028 			goto out_master_put;
1029 		}
1030 	}
1031 
1032 	if (dspi->devtype_data->xspi_mode)
1033 		master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
1034 	else
1035 		master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
1036 
1037 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1038 	base = devm_ioremap_resource(&pdev->dev, res);
1039 	if (IS_ERR(base)) {
1040 		ret = PTR_ERR(base);
1041 		goto out_master_put;
1042 	}
1043 
1044 	if (dspi->devtype_data->xspi_mode)
1045 		regmap_config = &dspi_xspi_regmap_config[0];
1046 	else
1047 		regmap_config = &dspi_regmap_config;
1048 	dspi->regmap = devm_regmap_init_mmio(&pdev->dev, base, regmap_config);
1049 	if (IS_ERR(dspi->regmap)) {
1050 		dev_err(&pdev->dev, "failed to init regmap: %ld\n",
1051 				PTR_ERR(dspi->regmap));
1052 		ret = PTR_ERR(dspi->regmap);
1053 		goto out_master_put;
1054 	}
1055 
1056 	if (dspi->devtype_data->xspi_mode) {
1057 		dspi->regmap_pushr = devm_regmap_init_mmio(
1058 			&pdev->dev, base + SPI_PUSHR,
1059 			&dspi_xspi_regmap_config[1]);
1060 		if (IS_ERR(dspi->regmap_pushr)) {
1061 			dev_err(&pdev->dev,
1062 				"failed to init pushr regmap: %ld\n",
1063 				PTR_ERR(dspi->regmap_pushr));
1064 			ret = PTR_ERR(dspi->regmap_pushr);
1065 			goto out_master_put;
1066 		}
1067 	}
1068 
1069 	dspi->clk = devm_clk_get(&pdev->dev, "dspi");
1070 	if (IS_ERR(dspi->clk)) {
1071 		ret = PTR_ERR(dspi->clk);
1072 		dev_err(&pdev->dev, "unable to get clock\n");
1073 		goto out_master_put;
1074 	}
1075 	ret = clk_prepare_enable(dspi->clk);
1076 	if (ret)
1077 		goto out_master_put;
1078 
1079 	dspi_init(dspi);
1080 	dspi->irq = platform_get_irq(pdev, 0);
1081 	if (dspi->irq < 0) {
1082 		dev_err(&pdev->dev, "can't get platform irq\n");
1083 		ret = dspi->irq;
1084 		goto out_clk_put;
1085 	}
1086 
1087 	ret = devm_request_irq(&pdev->dev, dspi->irq, dspi_interrupt, 0,
1088 			pdev->name, dspi);
1089 	if (ret < 0) {
1090 		dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n");
1091 		goto out_clk_put;
1092 	}
1093 
1094 	if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
1095 		ret = dspi_request_dma(dspi, res->start);
1096 		if (ret < 0) {
1097 			dev_err(&pdev->dev, "can't get dma channels\n");
1098 			goto out_clk_put;
1099 		}
1100 	}
1101 
1102 	master->max_speed_hz =
1103 		clk_get_rate(dspi->clk) / dspi->devtype_data->max_clock_factor;
1104 
1105 	init_waitqueue_head(&dspi->waitq);
1106 	platform_set_drvdata(pdev, master);
1107 
1108 	ret = spi_register_master(master);
1109 	if (ret != 0) {
1110 		dev_err(&pdev->dev, "Problem registering DSPI master\n");
1111 		goto out_clk_put;
1112 	}
1113 
1114 	return ret;
1115 
1116 out_clk_put:
1117 	clk_disable_unprepare(dspi->clk);
1118 out_master_put:
1119 	spi_master_put(master);
1120 
1121 	return ret;
1122 }
1123 
1124 static int dspi_remove(struct platform_device *pdev)
1125 {
1126 	struct spi_master *master = platform_get_drvdata(pdev);
1127 	struct fsl_dspi *dspi = spi_master_get_devdata(master);
1128 
1129 	/* Disconnect from the SPI framework */
1130 	dspi_release_dma(dspi);
1131 	clk_disable_unprepare(dspi->clk);
1132 	spi_unregister_master(dspi->master);
1133 
1134 	return 0;
1135 }
1136 
1137 static struct platform_driver fsl_dspi_driver = {
1138 	.driver.name    = DRIVER_NAME,
1139 	.driver.of_match_table = fsl_dspi_dt_ids,
1140 	.driver.owner   = THIS_MODULE,
1141 	.driver.pm = &dspi_pm,
1142 	.probe          = dspi_probe,
1143 	.remove		= dspi_remove,
1144 };
1145 module_platform_driver(fsl_dspi_driver);
1146 
1147 MODULE_DESCRIPTION("Freescale DSPI Controller Driver");
1148 MODULE_LICENSE("GPL");
1149 MODULE_ALIAS("platform:" DRIVER_NAME);
1150