xref: /openbmc/linux/drivers/spi/spi-rspi.c (revision 34facb04)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * SH RSPI driver
4  *
5  * Copyright (C) 2012, 2013  Renesas Solutions Corp.
6  * Copyright (C) 2014 Glider bvba
7  *
8  * Based on spi-sh.c:
9  * Copyright (C) 2011 Renesas Solutions Corp.
10  */
11 
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
15 #include <linux/errno.h>
16 #include <linux/interrupt.h>
17 #include <linux/platform_device.h>
18 #include <linux/io.h>
19 #include <linux/clk.h>
20 #include <linux/dmaengine.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/of_device.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/sh_dma.h>
25 #include <linux/spi/spi.h>
26 #include <linux/spi/rspi.h>
27 #include <linux/spinlock.h>
28 
29 #define RSPI_SPCR		0x00	/* Control Register */
30 #define RSPI_SSLP		0x01	/* Slave Select Polarity Register */
31 #define RSPI_SPPCR		0x02	/* Pin Control Register */
32 #define RSPI_SPSR		0x03	/* Status Register */
33 #define RSPI_SPDR		0x04	/* Data Register */
34 #define RSPI_SPSCR		0x08	/* Sequence Control Register */
35 #define RSPI_SPSSR		0x09	/* Sequence Status Register */
36 #define RSPI_SPBR		0x0a	/* Bit Rate Register */
37 #define RSPI_SPDCR		0x0b	/* Data Control Register */
38 #define RSPI_SPCKD		0x0c	/* Clock Delay Register */
39 #define RSPI_SSLND		0x0d	/* Slave Select Negation Delay Register */
40 #define RSPI_SPND		0x0e	/* Next-Access Delay Register */
41 #define RSPI_SPCR2		0x0f	/* Control Register 2 (SH only) */
42 #define RSPI_SPCMD0		0x10	/* Command Register 0 */
43 #define RSPI_SPCMD1		0x12	/* Command Register 1 */
44 #define RSPI_SPCMD2		0x14	/* Command Register 2 */
45 #define RSPI_SPCMD3		0x16	/* Command Register 3 */
46 #define RSPI_SPCMD4		0x18	/* Command Register 4 */
47 #define RSPI_SPCMD5		0x1a	/* Command Register 5 */
48 #define RSPI_SPCMD6		0x1c	/* Command Register 6 */
49 #define RSPI_SPCMD7		0x1e	/* Command Register 7 */
50 #define RSPI_SPCMD(i)		(RSPI_SPCMD0 + (i) * 2)
51 #define RSPI_NUM_SPCMD		8
52 #define RSPI_RZ_NUM_SPCMD	4
53 #define QSPI_NUM_SPCMD		4
54 
55 /* RSPI on RZ only */
56 #define RSPI_SPBFCR		0x20	/* Buffer Control Register */
57 #define RSPI_SPBFDR		0x22	/* Buffer Data Count Setting Register */
58 
59 /* QSPI only */
60 #define QSPI_SPBFCR		0x18	/* Buffer Control Register */
61 #define QSPI_SPBDCR		0x1a	/* Buffer Data Count Register */
62 #define QSPI_SPBMUL0		0x1c	/* Transfer Data Length Multiplier Setting Register 0 */
63 #define QSPI_SPBMUL1		0x20	/* Transfer Data Length Multiplier Setting Register 1 */
64 #define QSPI_SPBMUL2		0x24	/* Transfer Data Length Multiplier Setting Register 2 */
65 #define QSPI_SPBMUL3		0x28	/* Transfer Data Length Multiplier Setting Register 3 */
66 #define QSPI_SPBMUL(i)		(QSPI_SPBMUL0 + (i) * 4)
67 
68 /* SPCR - Control Register */
69 #define SPCR_SPRIE		0x80	/* Receive Interrupt Enable */
70 #define SPCR_SPE		0x40	/* Function Enable */
71 #define SPCR_SPTIE		0x20	/* Transmit Interrupt Enable */
72 #define SPCR_SPEIE		0x10	/* Error Interrupt Enable */
73 #define SPCR_MSTR		0x08	/* Master/Slave Mode Select */
74 #define SPCR_MODFEN		0x04	/* Mode Fault Error Detection Enable */
75 /* RSPI on SH only */
76 #define SPCR_TXMD		0x02	/* TX Only Mode (vs. Full Duplex) */
77 #define SPCR_SPMS		0x01	/* 3-wire Mode (vs. 4-wire) */
78 /* QSPI on R-Car Gen2 only */
79 #define SPCR_WSWAP		0x02	/* Word Swap of read-data for DMAC */
80 #define SPCR_BSWAP		0x01	/* Byte Swap of read-data for DMAC */
81 
82 /* SSLP - Slave Select Polarity Register */
83 #define SSLP_SSLP(i)		BIT(i)	/* SSLi Signal Polarity Setting */
84 
85 /* SPPCR - Pin Control Register */
86 #define SPPCR_MOIFE		0x20	/* MOSI Idle Value Fixing Enable */
87 #define SPPCR_MOIFV		0x10	/* MOSI Idle Fixed Value */
88 #define SPPCR_SPOM		0x04
89 #define SPPCR_SPLP2		0x02	/* Loopback Mode 2 (non-inverting) */
90 #define SPPCR_SPLP		0x01	/* Loopback Mode (inverting) */
91 
92 #define SPPCR_IO3FV		0x04	/* Single-/Dual-SPI Mode IO3 Output Fixed Value */
93 #define SPPCR_IO2FV		0x04	/* Single-/Dual-SPI Mode IO2 Output Fixed Value */
94 
95 /* SPSR - Status Register */
96 #define SPSR_SPRF		0x80	/* Receive Buffer Full Flag */
97 #define SPSR_TEND		0x40	/* Transmit End */
98 #define SPSR_SPTEF		0x20	/* Transmit Buffer Empty Flag */
99 #define SPSR_PERF		0x08	/* Parity Error Flag */
100 #define SPSR_MODF		0x04	/* Mode Fault Error Flag */
101 #define SPSR_IDLNF		0x02	/* RSPI Idle Flag */
102 #define SPSR_OVRF		0x01	/* Overrun Error Flag (RSPI only) */
103 
104 /* SPSCR - Sequence Control Register */
105 #define SPSCR_SPSLN_MASK	0x07	/* Sequence Length Specification */
106 
107 /* SPSSR - Sequence Status Register */
108 #define SPSSR_SPECM_MASK	0x70	/* Command Error Mask */
109 #define SPSSR_SPCP_MASK		0x07	/* Command Pointer Mask */
110 
111 /* SPDCR - Data Control Register */
112 #define SPDCR_TXDMY		0x80	/* Dummy Data Transmission Enable */
113 #define SPDCR_SPLW1		0x40	/* Access Width Specification (RZ) */
114 #define SPDCR_SPLW0		0x20	/* Access Width Specification (RZ) */
115 #define SPDCR_SPLLWORD		(SPDCR_SPLW1 | SPDCR_SPLW0)
116 #define SPDCR_SPLWORD		SPDCR_SPLW1
117 #define SPDCR_SPLBYTE		SPDCR_SPLW0
118 #define SPDCR_SPLW		0x20	/* Access Width Specification (SH) */
119 #define SPDCR_SPRDTD		0x10	/* Receive Transmit Data Select (SH) */
120 #define SPDCR_SLSEL1		0x08
121 #define SPDCR_SLSEL0		0x04
122 #define SPDCR_SLSEL_MASK	0x0c	/* SSL1 Output Select (SH) */
123 #define SPDCR_SPFC1		0x02
124 #define SPDCR_SPFC0		0x01
125 #define SPDCR_SPFC_MASK		0x03	/* Frame Count Setting (1-4) (SH) */
126 
127 /* SPCKD - Clock Delay Register */
128 #define SPCKD_SCKDL_MASK	0x07	/* Clock Delay Setting (1-8) */
129 
130 /* SSLND - Slave Select Negation Delay Register */
131 #define SSLND_SLNDL_MASK	0x07	/* SSL Negation Delay Setting (1-8) */
132 
133 /* SPND - Next-Access Delay Register */
134 #define SPND_SPNDL_MASK		0x07	/* Next-Access Delay Setting (1-8) */
135 
136 /* SPCR2 - Control Register 2 */
137 #define SPCR2_PTE		0x08	/* Parity Self-Test Enable */
138 #define SPCR2_SPIE		0x04	/* Idle Interrupt Enable */
139 #define SPCR2_SPOE		0x02	/* Odd Parity Enable (vs. Even) */
140 #define SPCR2_SPPE		0x01	/* Parity Enable */
141 
142 /* SPCMDn - Command Registers */
143 #define SPCMD_SCKDEN		0x8000	/* Clock Delay Setting Enable */
144 #define SPCMD_SLNDEN		0x4000	/* SSL Negation Delay Setting Enable */
145 #define SPCMD_SPNDEN		0x2000	/* Next-Access Delay Enable */
146 #define SPCMD_LSBF		0x1000	/* LSB First */
147 #define SPCMD_SPB_MASK		0x0f00	/* Data Length Setting */
148 #define SPCMD_SPB_8_TO_16(bit)	(((bit - 1) << 8) & SPCMD_SPB_MASK)
149 #define SPCMD_SPB_8BIT		0x0000	/* QSPI only */
150 #define SPCMD_SPB_16BIT		0x0100
151 #define SPCMD_SPB_20BIT		0x0000
152 #define SPCMD_SPB_24BIT		0x0100
153 #define SPCMD_SPB_32BIT		0x0200
154 #define SPCMD_SSLKP		0x0080	/* SSL Signal Level Keeping */
155 #define SPCMD_SPIMOD_MASK	0x0060	/* SPI Operating Mode (QSPI only) */
156 #define SPCMD_SPIMOD1		0x0040
157 #define SPCMD_SPIMOD0		0x0020
158 #define SPCMD_SPIMOD_SINGLE	0
159 #define SPCMD_SPIMOD_DUAL	SPCMD_SPIMOD0
160 #define SPCMD_SPIMOD_QUAD	SPCMD_SPIMOD1
161 #define SPCMD_SPRW		0x0010	/* SPI Read/Write Access (Dual/Quad) */
162 #define SPCMD_SSLA(i)		((i) << 4)	/* SSL Assert Signal Setting */
163 #define SPCMD_BRDV_MASK		0x000c	/* Bit Rate Division Setting */
164 #define SPCMD_CPOL		0x0002	/* Clock Polarity Setting */
165 #define SPCMD_CPHA		0x0001	/* Clock Phase Setting */
166 
167 /* SPBFCR - Buffer Control Register */
168 #define SPBFCR_TXRST		0x80	/* Transmit Buffer Data Reset */
169 #define SPBFCR_RXRST		0x40	/* Receive Buffer Data Reset */
170 #define SPBFCR_TXTRG_MASK	0x30	/* Transmit Buffer Data Triggering Number */
171 #define SPBFCR_RXTRG_MASK	0x07	/* Receive Buffer Data Triggering Number */
172 /* QSPI on R-Car Gen2 */
173 #define SPBFCR_TXTRG_1B		0x00	/* 31 bytes (1 byte available) */
174 #define SPBFCR_TXTRG_32B	0x30	/* 0 byte (32 bytes available) */
175 #define SPBFCR_RXTRG_1B		0x00	/* 1 byte (31 bytes available) */
176 #define SPBFCR_RXTRG_32B	0x07	/* 32 bytes (0 byte available) */
177 
178 #define QSPI_BUFFER_SIZE        32u
179 
180 struct rspi_data {
181 	void __iomem *addr;
182 	u32 max_speed_hz;
183 	struct spi_controller *ctlr;
184 	struct platform_device *pdev;
185 	wait_queue_head_t wait;
186 	spinlock_t lock;		/* Protects RMW-access to RSPI_SSLP */
187 	struct clk *clk;
188 	u16 spcmd;
189 	u8 spsr;
190 	u8 sppcr;
191 	int rx_irq, tx_irq;
192 	const struct spi_ops *ops;
193 
194 	unsigned dma_callbacked:1;
195 	unsigned byte_access:1;
196 };
197 
198 static void rspi_write8(const struct rspi_data *rspi, u8 data, u16 offset)
199 {
200 	iowrite8(data, rspi->addr + offset);
201 }
202 
203 static void rspi_write16(const struct rspi_data *rspi, u16 data, u16 offset)
204 {
205 	iowrite16(data, rspi->addr + offset);
206 }
207 
208 static void rspi_write32(const struct rspi_data *rspi, u32 data, u16 offset)
209 {
210 	iowrite32(data, rspi->addr + offset);
211 }
212 
213 static u8 rspi_read8(const struct rspi_data *rspi, u16 offset)
214 {
215 	return ioread8(rspi->addr + offset);
216 }
217 
218 static u16 rspi_read16(const struct rspi_data *rspi, u16 offset)
219 {
220 	return ioread16(rspi->addr + offset);
221 }
222 
223 static void rspi_write_data(const struct rspi_data *rspi, u16 data)
224 {
225 	if (rspi->byte_access)
226 		rspi_write8(rspi, data, RSPI_SPDR);
227 	else /* 16 bit */
228 		rspi_write16(rspi, data, RSPI_SPDR);
229 }
230 
231 static u16 rspi_read_data(const struct rspi_data *rspi)
232 {
233 	if (rspi->byte_access)
234 		return rspi_read8(rspi, RSPI_SPDR);
235 	else /* 16 bit */
236 		return rspi_read16(rspi, RSPI_SPDR);
237 }
238 
239 /* optional functions */
240 struct spi_ops {
241 	int (*set_config_register)(struct rspi_data *rspi, int access_size);
242 	int (*transfer_one)(struct spi_controller *ctlr,
243 			    struct spi_device *spi, struct spi_transfer *xfer);
244 	u16 extra_mode_bits;
245 	u16 flags;
246 	u16 fifo_size;
247 	u8 num_hw_ss;
248 };
249 
250 /*
251  * functions for RSPI on legacy SH
252  */
253 static int rspi_set_config_register(struct rspi_data *rspi, int access_size)
254 {
255 	int spbr;
256 
257 	/* Sets output mode, MOSI signal, and (optionally) loopback */
258 	rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
259 
260 	/* Sets transfer bit rate */
261 	spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk),
262 			    2 * rspi->max_speed_hz) - 1;
263 	rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
264 
265 	/* Disable dummy transmission, set 16-bit word access, 1 frame */
266 	rspi_write8(rspi, 0, RSPI_SPDCR);
267 	rspi->byte_access = 0;
268 
269 	/* Sets RSPCK, SSL, next-access delay value */
270 	rspi_write8(rspi, 0x00, RSPI_SPCKD);
271 	rspi_write8(rspi, 0x00, RSPI_SSLND);
272 	rspi_write8(rspi, 0x00, RSPI_SPND);
273 
274 	/* Sets parity, interrupt mask */
275 	rspi_write8(rspi, 0x00, RSPI_SPCR2);
276 
277 	/* Resets sequencer */
278 	rspi_write8(rspi, 0, RSPI_SPSCR);
279 	rspi->spcmd |= SPCMD_SPB_8_TO_16(access_size);
280 	rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
281 
282 	/* Sets RSPI mode */
283 	rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR);
284 
285 	return 0;
286 }
287 
288 /*
289  * functions for RSPI on RZ
290  */
291 static int rspi_rz_set_config_register(struct rspi_data *rspi, int access_size)
292 {
293 	int spbr;
294 	int div = 0;
295 	unsigned long clksrc;
296 
297 	/* Sets output mode, MOSI signal, and (optionally) loopback */
298 	rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
299 
300 	clksrc = clk_get_rate(rspi->clk);
301 	while (div < 3) {
302 		if (rspi->max_speed_hz >= clksrc/4) /* 4=(CLK/2)/2 */
303 			break;
304 		div++;
305 		clksrc /= 2;
306 	}
307 
308 	/* Sets transfer bit rate */
309 	spbr = DIV_ROUND_UP(clksrc, 2 * rspi->max_speed_hz) - 1;
310 	rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
311 	rspi->spcmd |= div << 2;
312 
313 	/* Disable dummy transmission, set byte access */
314 	rspi_write8(rspi, SPDCR_SPLBYTE, RSPI_SPDCR);
315 	rspi->byte_access = 1;
316 
317 	/* Sets RSPCK, SSL, next-access delay value */
318 	rspi_write8(rspi, 0x00, RSPI_SPCKD);
319 	rspi_write8(rspi, 0x00, RSPI_SSLND);
320 	rspi_write8(rspi, 0x00, RSPI_SPND);
321 
322 	/* Resets sequencer */
323 	rspi_write8(rspi, 0, RSPI_SPSCR);
324 	rspi->spcmd |= SPCMD_SPB_8_TO_16(access_size);
325 	rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
326 
327 	/* Sets RSPI mode */
328 	rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR);
329 
330 	return 0;
331 }
332 
333 /*
334  * functions for QSPI
335  */
336 static int qspi_set_config_register(struct rspi_data *rspi, int access_size)
337 {
338 	int spbr;
339 
340 	/* Sets output mode, MOSI signal, and (optionally) loopback */
341 	rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
342 
343 	/* Sets transfer bit rate */
344 	spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk), 2 * rspi->max_speed_hz);
345 	rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
346 
347 	/* Disable dummy transmission, set byte access */
348 	rspi_write8(rspi, 0, RSPI_SPDCR);
349 	rspi->byte_access = 1;
350 
351 	/* Sets RSPCK, SSL, next-access delay value */
352 	rspi_write8(rspi, 0x00, RSPI_SPCKD);
353 	rspi_write8(rspi, 0x00, RSPI_SSLND);
354 	rspi_write8(rspi, 0x00, RSPI_SPND);
355 
356 	/* Data Length Setting */
357 	if (access_size == 8)
358 		rspi->spcmd |= SPCMD_SPB_8BIT;
359 	else if (access_size == 16)
360 		rspi->spcmd |= SPCMD_SPB_16BIT;
361 	else
362 		rspi->spcmd |= SPCMD_SPB_32BIT;
363 
364 	rspi->spcmd |= SPCMD_SCKDEN | SPCMD_SLNDEN | SPCMD_SPNDEN;
365 
366 	/* Resets transfer data length */
367 	rspi_write32(rspi, 0, QSPI_SPBMUL0);
368 
369 	/* Resets transmit and receive buffer */
370 	rspi_write8(rspi, SPBFCR_TXRST | SPBFCR_RXRST, QSPI_SPBFCR);
371 	/* Sets buffer to allow normal operation */
372 	rspi_write8(rspi, 0x00, QSPI_SPBFCR);
373 
374 	/* Resets sequencer */
375 	rspi_write8(rspi, 0, RSPI_SPSCR);
376 	rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
377 
378 	/* Sets RSPI mode */
379 	rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR);
380 
381 	return 0;
382 }
383 
384 static void qspi_update(const struct rspi_data *rspi, u8 mask, u8 val, u8 reg)
385 {
386 	u8 data;
387 
388 	data = rspi_read8(rspi, reg);
389 	data &= ~mask;
390 	data |= (val & mask);
391 	rspi_write8(rspi, data, reg);
392 }
393 
394 static unsigned int qspi_set_send_trigger(struct rspi_data *rspi,
395 					  unsigned int len)
396 {
397 	unsigned int n;
398 
399 	n = min(len, QSPI_BUFFER_SIZE);
400 
401 	if (len >= QSPI_BUFFER_SIZE) {
402 		/* sets triggering number to 32 bytes */
403 		qspi_update(rspi, SPBFCR_TXTRG_MASK,
404 			     SPBFCR_TXTRG_32B, QSPI_SPBFCR);
405 	} else {
406 		/* sets triggering number to 1 byte */
407 		qspi_update(rspi, SPBFCR_TXTRG_MASK,
408 			     SPBFCR_TXTRG_1B, QSPI_SPBFCR);
409 	}
410 
411 	return n;
412 }
413 
414 static int qspi_set_receive_trigger(struct rspi_data *rspi, unsigned int len)
415 {
416 	unsigned int n;
417 
418 	n = min(len, QSPI_BUFFER_SIZE);
419 
420 	if (len >= QSPI_BUFFER_SIZE) {
421 		/* sets triggering number to 32 bytes */
422 		qspi_update(rspi, SPBFCR_RXTRG_MASK,
423 			     SPBFCR_RXTRG_32B, QSPI_SPBFCR);
424 	} else {
425 		/* sets triggering number to 1 byte */
426 		qspi_update(rspi, SPBFCR_RXTRG_MASK,
427 			     SPBFCR_RXTRG_1B, QSPI_SPBFCR);
428 	}
429 	return n;
430 }
431 
432 static void rspi_enable_irq(const struct rspi_data *rspi, u8 enable)
433 {
434 	rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | enable, RSPI_SPCR);
435 }
436 
437 static void rspi_disable_irq(const struct rspi_data *rspi, u8 disable)
438 {
439 	rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~disable, RSPI_SPCR);
440 }
441 
442 static int rspi_wait_for_interrupt(struct rspi_data *rspi, u8 wait_mask,
443 				   u8 enable_bit)
444 {
445 	int ret;
446 
447 	rspi->spsr = rspi_read8(rspi, RSPI_SPSR);
448 	if (rspi->spsr & wait_mask)
449 		return 0;
450 
451 	rspi_enable_irq(rspi, enable_bit);
452 	ret = wait_event_timeout(rspi->wait, rspi->spsr & wait_mask, HZ);
453 	if (ret == 0 && !(rspi->spsr & wait_mask))
454 		return -ETIMEDOUT;
455 
456 	return 0;
457 }
458 
459 static inline int rspi_wait_for_tx_empty(struct rspi_data *rspi)
460 {
461 	return rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
462 }
463 
464 static inline int rspi_wait_for_rx_full(struct rspi_data *rspi)
465 {
466 	return rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE);
467 }
468 
469 static int rspi_data_out(struct rspi_data *rspi, u8 data)
470 {
471 	int error = rspi_wait_for_tx_empty(rspi);
472 	if (error < 0) {
473 		dev_err(&rspi->ctlr->dev, "transmit timeout\n");
474 		return error;
475 	}
476 	rspi_write_data(rspi, data);
477 	return 0;
478 }
479 
480 static int rspi_data_in(struct rspi_data *rspi)
481 {
482 	int error;
483 	u8 data;
484 
485 	error = rspi_wait_for_rx_full(rspi);
486 	if (error < 0) {
487 		dev_err(&rspi->ctlr->dev, "receive timeout\n");
488 		return error;
489 	}
490 	data = rspi_read_data(rspi);
491 	return data;
492 }
493 
494 static int rspi_pio_transfer(struct rspi_data *rspi, const u8 *tx, u8 *rx,
495 			     unsigned int n)
496 {
497 	while (n-- > 0) {
498 		if (tx) {
499 			int ret = rspi_data_out(rspi, *tx++);
500 			if (ret < 0)
501 				return ret;
502 		}
503 		if (rx) {
504 			int ret = rspi_data_in(rspi);
505 			if (ret < 0)
506 				return ret;
507 			*rx++ = ret;
508 		}
509 	}
510 
511 	return 0;
512 }
513 
514 static void rspi_dma_complete(void *arg)
515 {
516 	struct rspi_data *rspi = arg;
517 
518 	rspi->dma_callbacked = 1;
519 	wake_up_interruptible(&rspi->wait);
520 }
521 
522 static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
523 			     struct sg_table *rx)
524 {
525 	struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL;
526 	u8 irq_mask = 0;
527 	unsigned int other_irq = 0;
528 	dma_cookie_t cookie;
529 	int ret;
530 
531 	/* First prepare and submit the DMA request(s), as this may fail */
532 	if (rx) {
533 		desc_rx = dmaengine_prep_slave_sg(rspi->ctlr->dma_rx, rx->sgl,
534 					rx->nents, DMA_DEV_TO_MEM,
535 					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
536 		if (!desc_rx) {
537 			ret = -EAGAIN;
538 			goto no_dma_rx;
539 		}
540 
541 		desc_rx->callback = rspi_dma_complete;
542 		desc_rx->callback_param = rspi;
543 		cookie = dmaengine_submit(desc_rx);
544 		if (dma_submit_error(cookie)) {
545 			ret = cookie;
546 			goto no_dma_rx;
547 		}
548 
549 		irq_mask |= SPCR_SPRIE;
550 	}
551 
552 	if (tx) {
553 		desc_tx = dmaengine_prep_slave_sg(rspi->ctlr->dma_tx, tx->sgl,
554 					tx->nents, DMA_MEM_TO_DEV,
555 					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
556 		if (!desc_tx) {
557 			ret = -EAGAIN;
558 			goto no_dma_tx;
559 		}
560 
561 		if (rx) {
562 			/* No callback */
563 			desc_tx->callback = NULL;
564 		} else {
565 			desc_tx->callback = rspi_dma_complete;
566 			desc_tx->callback_param = rspi;
567 		}
568 		cookie = dmaengine_submit(desc_tx);
569 		if (dma_submit_error(cookie)) {
570 			ret = cookie;
571 			goto no_dma_tx;
572 		}
573 
574 		irq_mask |= SPCR_SPTIE;
575 	}
576 
577 	/*
578 	 * DMAC needs SPxIE, but if SPxIE is set, the IRQ routine will be
579 	 * called. So, this driver disables the IRQ while DMA transfer.
580 	 */
581 	if (tx)
582 		disable_irq(other_irq = rspi->tx_irq);
583 	if (rx && rspi->rx_irq != other_irq)
584 		disable_irq(rspi->rx_irq);
585 
586 	rspi_enable_irq(rspi, irq_mask);
587 	rspi->dma_callbacked = 0;
588 
589 	/* Now start DMA */
590 	if (rx)
591 		dma_async_issue_pending(rspi->ctlr->dma_rx);
592 	if (tx)
593 		dma_async_issue_pending(rspi->ctlr->dma_tx);
594 
595 	ret = wait_event_interruptible_timeout(rspi->wait,
596 					       rspi->dma_callbacked, HZ);
597 	if (ret > 0 && rspi->dma_callbacked) {
598 		ret = 0;
599 	} else {
600 		if (!ret) {
601 			dev_err(&rspi->ctlr->dev, "DMA timeout\n");
602 			ret = -ETIMEDOUT;
603 		}
604 		if (tx)
605 			dmaengine_terminate_all(rspi->ctlr->dma_tx);
606 		if (rx)
607 			dmaengine_terminate_all(rspi->ctlr->dma_rx);
608 	}
609 
610 	rspi_disable_irq(rspi, irq_mask);
611 
612 	if (tx)
613 		enable_irq(rspi->tx_irq);
614 	if (rx && rspi->rx_irq != other_irq)
615 		enable_irq(rspi->rx_irq);
616 
617 	return ret;
618 
619 no_dma_tx:
620 	if (rx)
621 		dmaengine_terminate_all(rspi->ctlr->dma_rx);
622 no_dma_rx:
623 	if (ret == -EAGAIN) {
624 		dev_warn_once(&rspi->ctlr->dev,
625 			      "DMA not available, falling back to PIO\n");
626 	}
627 	return ret;
628 }
629 
630 static void rspi_receive_init(const struct rspi_data *rspi)
631 {
632 	u8 spsr;
633 
634 	spsr = rspi_read8(rspi, RSPI_SPSR);
635 	if (spsr & SPSR_SPRF)
636 		rspi_read_data(rspi);	/* dummy read */
637 	if (spsr & SPSR_OVRF)
638 		rspi_write8(rspi, rspi_read8(rspi, RSPI_SPSR) & ~SPSR_OVRF,
639 			    RSPI_SPSR);
640 }
641 
642 static void rspi_rz_receive_init(const struct rspi_data *rspi)
643 {
644 	rspi_receive_init(rspi);
645 	rspi_write8(rspi, SPBFCR_TXRST | SPBFCR_RXRST, RSPI_SPBFCR);
646 	rspi_write8(rspi, 0, RSPI_SPBFCR);
647 }
648 
649 static void qspi_receive_init(const struct rspi_data *rspi)
650 {
651 	u8 spsr;
652 
653 	spsr = rspi_read8(rspi, RSPI_SPSR);
654 	if (spsr & SPSR_SPRF)
655 		rspi_read_data(rspi);   /* dummy read */
656 	rspi_write8(rspi, SPBFCR_TXRST | SPBFCR_RXRST, QSPI_SPBFCR);
657 	rspi_write8(rspi, 0, QSPI_SPBFCR);
658 }
659 
660 static bool __rspi_can_dma(const struct rspi_data *rspi,
661 			   const struct spi_transfer *xfer)
662 {
663 	return xfer->len > rspi->ops->fifo_size;
664 }
665 
666 static bool rspi_can_dma(struct spi_controller *ctlr, struct spi_device *spi,
667 			 struct spi_transfer *xfer)
668 {
669 	struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
670 
671 	return __rspi_can_dma(rspi, xfer);
672 }
673 
674 static int rspi_dma_check_then_transfer(struct rspi_data *rspi,
675 					 struct spi_transfer *xfer)
676 {
677 	if (!rspi->ctlr->can_dma || !__rspi_can_dma(rspi, xfer))
678 		return -EAGAIN;
679 
680 	/* rx_buf can be NULL on RSPI on SH in TX-only Mode */
681 	return rspi_dma_transfer(rspi, &xfer->tx_sg,
682 				xfer->rx_buf ? &xfer->rx_sg : NULL);
683 }
684 
685 static int rspi_common_transfer(struct rspi_data *rspi,
686 				struct spi_transfer *xfer)
687 {
688 	int ret;
689 
690 	ret = rspi_dma_check_then_transfer(rspi, xfer);
691 	if (ret != -EAGAIN)
692 		return ret;
693 
694 	ret = rspi_pio_transfer(rspi, xfer->tx_buf, xfer->rx_buf, xfer->len);
695 	if (ret < 0)
696 		return ret;
697 
698 	/* Wait for the last transmission */
699 	rspi_wait_for_tx_empty(rspi);
700 
701 	return 0;
702 }
703 
704 static int rspi_transfer_one(struct spi_controller *ctlr,
705 			     struct spi_device *spi, struct spi_transfer *xfer)
706 {
707 	struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
708 	u8 spcr;
709 
710 	spcr = rspi_read8(rspi, RSPI_SPCR);
711 	if (xfer->rx_buf) {
712 		rspi_receive_init(rspi);
713 		spcr &= ~SPCR_TXMD;
714 	} else {
715 		spcr |= SPCR_TXMD;
716 	}
717 	rspi_write8(rspi, spcr, RSPI_SPCR);
718 
719 	return rspi_common_transfer(rspi, xfer);
720 }
721 
722 static int rspi_rz_transfer_one(struct spi_controller *ctlr,
723 				struct spi_device *spi,
724 				struct spi_transfer *xfer)
725 {
726 	struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
727 
728 	rspi_rz_receive_init(rspi);
729 
730 	return rspi_common_transfer(rspi, xfer);
731 }
732 
733 static int qspi_trigger_transfer_out_in(struct rspi_data *rspi, const u8 *tx,
734 					u8 *rx, unsigned int len)
735 {
736 	unsigned int i, n;
737 	int ret;
738 
739 	while (len > 0) {
740 		n = qspi_set_send_trigger(rspi, len);
741 		qspi_set_receive_trigger(rspi, len);
742 		ret = rspi_wait_for_tx_empty(rspi);
743 		if (ret < 0) {
744 			dev_err(&rspi->ctlr->dev, "transmit timeout\n");
745 			return ret;
746 		}
747 		for (i = 0; i < n; i++)
748 			rspi_write_data(rspi, *tx++);
749 
750 		ret = rspi_wait_for_rx_full(rspi);
751 		if (ret < 0) {
752 			dev_err(&rspi->ctlr->dev, "receive timeout\n");
753 			return ret;
754 		}
755 		for (i = 0; i < n; i++)
756 			*rx++ = rspi_read_data(rspi);
757 
758 		len -= n;
759 	}
760 
761 	return 0;
762 }
763 
764 static int qspi_transfer_out_in(struct rspi_data *rspi,
765 				struct spi_transfer *xfer)
766 {
767 	int ret;
768 
769 	qspi_receive_init(rspi);
770 
771 	ret = rspi_dma_check_then_transfer(rspi, xfer);
772 	if (ret != -EAGAIN)
773 		return ret;
774 
775 	return qspi_trigger_transfer_out_in(rspi, xfer->tx_buf,
776 					    xfer->rx_buf, xfer->len);
777 }
778 
779 static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer)
780 {
781 	const u8 *tx = xfer->tx_buf;
782 	unsigned int n = xfer->len;
783 	unsigned int i, len;
784 	int ret;
785 
786 	if (rspi->ctlr->can_dma && __rspi_can_dma(rspi, xfer)) {
787 		ret = rspi_dma_transfer(rspi, &xfer->tx_sg, NULL);
788 		if (ret != -EAGAIN)
789 			return ret;
790 	}
791 
792 	while (n > 0) {
793 		len = qspi_set_send_trigger(rspi, n);
794 		ret = rspi_wait_for_tx_empty(rspi);
795 		if (ret < 0) {
796 			dev_err(&rspi->ctlr->dev, "transmit timeout\n");
797 			return ret;
798 		}
799 		for (i = 0; i < len; i++)
800 			rspi_write_data(rspi, *tx++);
801 
802 		n -= len;
803 	}
804 
805 	/* Wait for the last transmission */
806 	rspi_wait_for_tx_empty(rspi);
807 
808 	return 0;
809 }
810 
811 static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer)
812 {
813 	u8 *rx = xfer->rx_buf;
814 	unsigned int n = xfer->len;
815 	unsigned int i, len;
816 	int ret;
817 
818 	if (rspi->ctlr->can_dma && __rspi_can_dma(rspi, xfer)) {
819 		int ret = rspi_dma_transfer(rspi, NULL, &xfer->rx_sg);
820 		if (ret != -EAGAIN)
821 			return ret;
822 	}
823 
824 	while (n > 0) {
825 		len = qspi_set_receive_trigger(rspi, n);
826 		ret = rspi_wait_for_rx_full(rspi);
827 		if (ret < 0) {
828 			dev_err(&rspi->ctlr->dev, "receive timeout\n");
829 			return ret;
830 		}
831 		for (i = 0; i < len; i++)
832 			*rx++ = rspi_read_data(rspi);
833 
834 		n -= len;
835 	}
836 
837 	return 0;
838 }
839 
840 static int qspi_transfer_one(struct spi_controller *ctlr,
841 			     struct spi_device *spi, struct spi_transfer *xfer)
842 {
843 	struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
844 
845 	if (spi->mode & SPI_LOOP) {
846 		return qspi_transfer_out_in(rspi, xfer);
847 	} else if (xfer->tx_nbits > SPI_NBITS_SINGLE) {
848 		/* Quad or Dual SPI Write */
849 		return qspi_transfer_out(rspi, xfer);
850 	} else if (xfer->rx_nbits > SPI_NBITS_SINGLE) {
851 		/* Quad or Dual SPI Read */
852 		return qspi_transfer_in(rspi, xfer);
853 	} else {
854 		/* Single SPI Transfer */
855 		return qspi_transfer_out_in(rspi, xfer);
856 	}
857 }
858 
859 static u16 qspi_transfer_mode(const struct spi_transfer *xfer)
860 {
861 	if (xfer->tx_buf)
862 		switch (xfer->tx_nbits) {
863 		case SPI_NBITS_QUAD:
864 			return SPCMD_SPIMOD_QUAD;
865 		case SPI_NBITS_DUAL:
866 			return SPCMD_SPIMOD_DUAL;
867 		default:
868 			return 0;
869 		}
870 	if (xfer->rx_buf)
871 		switch (xfer->rx_nbits) {
872 		case SPI_NBITS_QUAD:
873 			return SPCMD_SPIMOD_QUAD | SPCMD_SPRW;
874 		case SPI_NBITS_DUAL:
875 			return SPCMD_SPIMOD_DUAL | SPCMD_SPRW;
876 		default:
877 			return 0;
878 		}
879 
880 	return 0;
881 }
882 
883 static int qspi_setup_sequencer(struct rspi_data *rspi,
884 				const struct spi_message *msg)
885 {
886 	const struct spi_transfer *xfer;
887 	unsigned int i = 0, len = 0;
888 	u16 current_mode = 0xffff, mode;
889 
890 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
891 		mode = qspi_transfer_mode(xfer);
892 		if (mode == current_mode) {
893 			len += xfer->len;
894 			continue;
895 		}
896 
897 		/* Transfer mode change */
898 		if (i) {
899 			/* Set transfer data length of previous transfer */
900 			rspi_write32(rspi, len, QSPI_SPBMUL(i - 1));
901 		}
902 
903 		if (i >= QSPI_NUM_SPCMD) {
904 			dev_err(&msg->spi->dev,
905 				"Too many different transfer modes");
906 			return -EINVAL;
907 		}
908 
909 		/* Program transfer mode for this transfer */
910 		rspi_write16(rspi, rspi->spcmd | mode, RSPI_SPCMD(i));
911 		current_mode = mode;
912 		len = xfer->len;
913 		i++;
914 	}
915 	if (i) {
916 		/* Set final transfer data length and sequence length */
917 		rspi_write32(rspi, len, QSPI_SPBMUL(i - 1));
918 		rspi_write8(rspi, i - 1, RSPI_SPSCR);
919 	}
920 
921 	return 0;
922 }
923 
924 static int rspi_setup(struct spi_device *spi)
925 {
926 	struct rspi_data *rspi = spi_controller_get_devdata(spi->controller);
927 	u8 sslp;
928 
929 	if (spi->cs_gpiod)
930 		return 0;
931 
932 	pm_runtime_get_sync(&rspi->pdev->dev);
933 	spin_lock_irq(&rspi->lock);
934 
935 	sslp = rspi_read8(rspi, RSPI_SSLP);
936 	if (spi->mode & SPI_CS_HIGH)
937 		sslp |= SSLP_SSLP(spi->chip_select);
938 	else
939 		sslp &= ~SSLP_SSLP(spi->chip_select);
940 	rspi_write8(rspi, sslp, RSPI_SSLP);
941 
942 	spin_unlock_irq(&rspi->lock);
943 	pm_runtime_put(&rspi->pdev->dev);
944 	return 0;
945 }
946 
947 static int rspi_prepare_message(struct spi_controller *ctlr,
948 				struct spi_message *msg)
949 {
950 	struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
951 	struct spi_device *spi = msg->spi;
952 	int ret;
953 
954 	rspi->max_speed_hz = spi->max_speed_hz;
955 
956 	rspi->spcmd = SPCMD_SSLKP;
957 	if (spi->mode & SPI_CPOL)
958 		rspi->spcmd |= SPCMD_CPOL;
959 	if (spi->mode & SPI_CPHA)
960 		rspi->spcmd |= SPCMD_CPHA;
961 	if (spi->mode & SPI_LSB_FIRST)
962 		rspi->spcmd |= SPCMD_LSBF;
963 
964 	/* Configure slave signal to assert */
965 	rspi->spcmd |= SPCMD_SSLA(spi->cs_gpiod ? rspi->ctlr->unused_native_cs
966 						: spi->chip_select);
967 
968 	/* CMOS output mode and MOSI signal from previous transfer */
969 	rspi->sppcr = 0;
970 	if (spi->mode & SPI_LOOP)
971 		rspi->sppcr |= SPPCR_SPLP;
972 
973 	rspi->ops->set_config_register(rspi, 8);
974 
975 	if (msg->spi->mode &
976 	    (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)) {
977 		/* Setup sequencer for messages with multiple transfer modes */
978 		ret = qspi_setup_sequencer(rspi, msg);
979 		if (ret < 0)
980 			return ret;
981 	}
982 
983 	/* Enable SPI function in master mode */
984 	rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_SPE, RSPI_SPCR);
985 	return 0;
986 }
987 
988 static int rspi_unprepare_message(struct spi_controller *ctlr,
989 				  struct spi_message *msg)
990 {
991 	struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
992 
993 	/* Disable SPI function */
994 	rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_SPE, RSPI_SPCR);
995 
996 	/* Reset sequencer for Single SPI Transfers */
997 	rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
998 	rspi_write8(rspi, 0, RSPI_SPSCR);
999 	return 0;
1000 }
1001 
1002 static irqreturn_t rspi_irq_mux(int irq, void *_sr)
1003 {
1004 	struct rspi_data *rspi = _sr;
1005 	u8 spsr;
1006 	irqreturn_t ret = IRQ_NONE;
1007 	u8 disable_irq = 0;
1008 
1009 	rspi->spsr = spsr = rspi_read8(rspi, RSPI_SPSR);
1010 	if (spsr & SPSR_SPRF)
1011 		disable_irq |= SPCR_SPRIE;
1012 	if (spsr & SPSR_SPTEF)
1013 		disable_irq |= SPCR_SPTIE;
1014 
1015 	if (disable_irq) {
1016 		ret = IRQ_HANDLED;
1017 		rspi_disable_irq(rspi, disable_irq);
1018 		wake_up(&rspi->wait);
1019 	}
1020 
1021 	return ret;
1022 }
1023 
1024 static irqreturn_t rspi_irq_rx(int irq, void *_sr)
1025 {
1026 	struct rspi_data *rspi = _sr;
1027 	u8 spsr;
1028 
1029 	rspi->spsr = spsr = rspi_read8(rspi, RSPI_SPSR);
1030 	if (spsr & SPSR_SPRF) {
1031 		rspi_disable_irq(rspi, SPCR_SPRIE);
1032 		wake_up(&rspi->wait);
1033 		return IRQ_HANDLED;
1034 	}
1035 
1036 	return 0;
1037 }
1038 
1039 static irqreturn_t rspi_irq_tx(int irq, void *_sr)
1040 {
1041 	struct rspi_data *rspi = _sr;
1042 	u8 spsr;
1043 
1044 	rspi->spsr = spsr = rspi_read8(rspi, RSPI_SPSR);
1045 	if (spsr & SPSR_SPTEF) {
1046 		rspi_disable_irq(rspi, SPCR_SPTIE);
1047 		wake_up(&rspi->wait);
1048 		return IRQ_HANDLED;
1049 	}
1050 
1051 	return 0;
1052 }
1053 
1054 static struct dma_chan *rspi_request_dma_chan(struct device *dev,
1055 					      enum dma_transfer_direction dir,
1056 					      unsigned int id,
1057 					      dma_addr_t port_addr)
1058 {
1059 	dma_cap_mask_t mask;
1060 	struct dma_chan *chan;
1061 	struct dma_slave_config cfg;
1062 	int ret;
1063 
1064 	dma_cap_zero(mask);
1065 	dma_cap_set(DMA_SLAVE, mask);
1066 
1067 	chan = dma_request_slave_channel_compat(mask, shdma_chan_filter,
1068 				(void *)(unsigned long)id, dev,
1069 				dir == DMA_MEM_TO_DEV ? "tx" : "rx");
1070 	if (!chan) {
1071 		dev_warn(dev, "dma_request_slave_channel_compat failed\n");
1072 		return NULL;
1073 	}
1074 
1075 	memset(&cfg, 0, sizeof(cfg));
1076 	cfg.direction = dir;
1077 	if (dir == DMA_MEM_TO_DEV) {
1078 		cfg.dst_addr = port_addr;
1079 		cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1080 	} else {
1081 		cfg.src_addr = port_addr;
1082 		cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1083 	}
1084 
1085 	ret = dmaengine_slave_config(chan, &cfg);
1086 	if (ret) {
1087 		dev_warn(dev, "dmaengine_slave_config failed %d\n", ret);
1088 		dma_release_channel(chan);
1089 		return NULL;
1090 	}
1091 
1092 	return chan;
1093 }
1094 
1095 static int rspi_request_dma(struct device *dev, struct spi_controller *ctlr,
1096 			    const struct resource *res)
1097 {
1098 	const struct rspi_plat_data *rspi_pd = dev_get_platdata(dev);
1099 	unsigned int dma_tx_id, dma_rx_id;
1100 
1101 	if (dev->of_node) {
1102 		/* In the OF case we will get the slave IDs from the DT */
1103 		dma_tx_id = 0;
1104 		dma_rx_id = 0;
1105 	} else if (rspi_pd && rspi_pd->dma_tx_id && rspi_pd->dma_rx_id) {
1106 		dma_tx_id = rspi_pd->dma_tx_id;
1107 		dma_rx_id = rspi_pd->dma_rx_id;
1108 	} else {
1109 		/* The driver assumes no error. */
1110 		return 0;
1111 	}
1112 
1113 	ctlr->dma_tx = rspi_request_dma_chan(dev, DMA_MEM_TO_DEV, dma_tx_id,
1114 					     res->start + RSPI_SPDR);
1115 	if (!ctlr->dma_tx)
1116 		return -ENODEV;
1117 
1118 	ctlr->dma_rx = rspi_request_dma_chan(dev, DMA_DEV_TO_MEM, dma_rx_id,
1119 					     res->start + RSPI_SPDR);
1120 	if (!ctlr->dma_rx) {
1121 		dma_release_channel(ctlr->dma_tx);
1122 		ctlr->dma_tx = NULL;
1123 		return -ENODEV;
1124 	}
1125 
1126 	ctlr->can_dma = rspi_can_dma;
1127 	dev_info(dev, "DMA available");
1128 	return 0;
1129 }
1130 
1131 static void rspi_release_dma(struct spi_controller *ctlr)
1132 {
1133 	if (ctlr->dma_tx)
1134 		dma_release_channel(ctlr->dma_tx);
1135 	if (ctlr->dma_rx)
1136 		dma_release_channel(ctlr->dma_rx);
1137 }
1138 
1139 static int rspi_remove(struct platform_device *pdev)
1140 {
1141 	struct rspi_data *rspi = platform_get_drvdata(pdev);
1142 
1143 	rspi_release_dma(rspi->ctlr);
1144 	pm_runtime_disable(&pdev->dev);
1145 
1146 	return 0;
1147 }
1148 
1149 static const struct spi_ops rspi_ops = {
1150 	.set_config_register =	rspi_set_config_register,
1151 	.transfer_one =		rspi_transfer_one,
1152 	.flags =		SPI_CONTROLLER_MUST_TX,
1153 	.fifo_size =		8,
1154 	.num_hw_ss =		2,
1155 };
1156 
1157 static const struct spi_ops rspi_rz_ops = {
1158 	.set_config_register =	rspi_rz_set_config_register,
1159 	.transfer_one =		rspi_rz_transfer_one,
1160 	.flags =		SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX,
1161 	.fifo_size =		8,	/* 8 for TX, 32 for RX */
1162 	.num_hw_ss =		1,
1163 };
1164 
1165 static const struct spi_ops qspi_ops = {
1166 	.set_config_register =	qspi_set_config_register,
1167 	.transfer_one =		qspi_transfer_one,
1168 	.extra_mode_bits =	SPI_TX_DUAL | SPI_TX_QUAD |
1169 				SPI_RX_DUAL | SPI_RX_QUAD,
1170 	.flags =		SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX,
1171 	.fifo_size =		32,
1172 	.num_hw_ss =		1,
1173 };
1174 
1175 #ifdef CONFIG_OF
1176 static const struct of_device_id rspi_of_match[] = {
1177 	/* RSPI on legacy SH */
1178 	{ .compatible = "renesas,rspi", .data = &rspi_ops },
1179 	/* RSPI on RZ/A1H */
1180 	{ .compatible = "renesas,rspi-rz", .data = &rspi_rz_ops },
1181 	/* QSPI on R-Car Gen2 */
1182 	{ .compatible = "renesas,qspi", .data = &qspi_ops },
1183 	{ /* sentinel */ }
1184 };
1185 
1186 MODULE_DEVICE_TABLE(of, rspi_of_match);
1187 
1188 static int rspi_parse_dt(struct device *dev, struct spi_controller *ctlr)
1189 {
1190 	u32 num_cs;
1191 	int error;
1192 
1193 	/* Parse DT properties */
1194 	error = of_property_read_u32(dev->of_node, "num-cs", &num_cs);
1195 	if (error) {
1196 		dev_err(dev, "of_property_read_u32 num-cs failed %d\n", error);
1197 		return error;
1198 	}
1199 
1200 	ctlr->num_chipselect = num_cs;
1201 	return 0;
1202 }
1203 #else
1204 #define rspi_of_match	NULL
1205 static inline int rspi_parse_dt(struct device *dev, struct spi_controller *ctlr)
1206 {
1207 	return -EINVAL;
1208 }
1209 #endif /* CONFIG_OF */
1210 
1211 static int rspi_request_irq(struct device *dev, unsigned int irq,
1212 			    irq_handler_t handler, const char *suffix,
1213 			    void *dev_id)
1214 {
1215 	const char *name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s",
1216 					  dev_name(dev), suffix);
1217 	if (!name)
1218 		return -ENOMEM;
1219 
1220 	return devm_request_irq(dev, irq, handler, 0, name, dev_id);
1221 }
1222 
1223 static int rspi_probe(struct platform_device *pdev)
1224 {
1225 	struct resource *res;
1226 	struct spi_controller *ctlr;
1227 	struct rspi_data *rspi;
1228 	int ret;
1229 	const struct rspi_plat_data *rspi_pd;
1230 	const struct spi_ops *ops;
1231 
1232 	ctlr = spi_alloc_master(&pdev->dev, sizeof(struct rspi_data));
1233 	if (ctlr == NULL)
1234 		return -ENOMEM;
1235 
1236 	ops = of_device_get_match_data(&pdev->dev);
1237 	if (ops) {
1238 		ret = rspi_parse_dt(&pdev->dev, ctlr);
1239 		if (ret)
1240 			goto error1;
1241 	} else {
1242 		ops = (struct spi_ops *)pdev->id_entry->driver_data;
1243 		rspi_pd = dev_get_platdata(&pdev->dev);
1244 		if (rspi_pd && rspi_pd->num_chipselect)
1245 			ctlr->num_chipselect = rspi_pd->num_chipselect;
1246 		else
1247 			ctlr->num_chipselect = 2; /* default */
1248 	}
1249 
1250 	/* ops parameter check */
1251 	if (!ops->set_config_register) {
1252 		dev_err(&pdev->dev, "there is no set_config_register\n");
1253 		ret = -ENODEV;
1254 		goto error1;
1255 	}
1256 
1257 	rspi = spi_controller_get_devdata(ctlr);
1258 	platform_set_drvdata(pdev, rspi);
1259 	rspi->ops = ops;
1260 	rspi->ctlr = ctlr;
1261 
1262 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1263 	rspi->addr = devm_ioremap_resource(&pdev->dev, res);
1264 	if (IS_ERR(rspi->addr)) {
1265 		ret = PTR_ERR(rspi->addr);
1266 		goto error1;
1267 	}
1268 
1269 	rspi->clk = devm_clk_get(&pdev->dev, NULL);
1270 	if (IS_ERR(rspi->clk)) {
1271 		dev_err(&pdev->dev, "cannot get clock\n");
1272 		ret = PTR_ERR(rspi->clk);
1273 		goto error1;
1274 	}
1275 
1276 	rspi->pdev = pdev;
1277 	pm_runtime_enable(&pdev->dev);
1278 
1279 	init_waitqueue_head(&rspi->wait);
1280 	spin_lock_init(&rspi->lock);
1281 
1282 	ctlr->bus_num = pdev->id;
1283 	ctlr->setup = rspi_setup;
1284 	ctlr->auto_runtime_pm = true;
1285 	ctlr->transfer_one = ops->transfer_one;
1286 	ctlr->prepare_message = rspi_prepare_message;
1287 	ctlr->unprepare_message = rspi_unprepare_message;
1288 	ctlr->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LSB_FIRST |
1289 			  SPI_LOOP | ops->extra_mode_bits;
1290 	ctlr->flags = ops->flags;
1291 	ctlr->dev.of_node = pdev->dev.of_node;
1292 	ctlr->use_gpio_descriptors = true;
1293 	ctlr->max_native_cs = rspi->ops->num_hw_ss;
1294 
1295 	ret = platform_get_irq_byname_optional(pdev, "rx");
1296 	if (ret < 0) {
1297 		ret = platform_get_irq_byname_optional(pdev, "mux");
1298 		if (ret < 0)
1299 			ret = platform_get_irq(pdev, 0);
1300 		if (ret >= 0)
1301 			rspi->rx_irq = rspi->tx_irq = ret;
1302 	} else {
1303 		rspi->rx_irq = ret;
1304 		ret = platform_get_irq_byname(pdev, "tx");
1305 		if (ret >= 0)
1306 			rspi->tx_irq = ret;
1307 	}
1308 
1309 	if (rspi->rx_irq == rspi->tx_irq) {
1310 		/* Single multiplexed interrupt */
1311 		ret = rspi_request_irq(&pdev->dev, rspi->rx_irq, rspi_irq_mux,
1312 				       "mux", rspi);
1313 	} else {
1314 		/* Multi-interrupt mode, only SPRI and SPTI are used */
1315 		ret = rspi_request_irq(&pdev->dev, rspi->rx_irq, rspi_irq_rx,
1316 				       "rx", rspi);
1317 		if (!ret)
1318 			ret = rspi_request_irq(&pdev->dev, rspi->tx_irq,
1319 					       rspi_irq_tx, "tx", rspi);
1320 	}
1321 	if (ret < 0) {
1322 		dev_err(&pdev->dev, "request_irq error\n");
1323 		goto error2;
1324 	}
1325 
1326 	ret = rspi_request_dma(&pdev->dev, ctlr, res);
1327 	if (ret < 0)
1328 		dev_warn(&pdev->dev, "DMA not available, using PIO\n");
1329 
1330 	ret = devm_spi_register_controller(&pdev->dev, ctlr);
1331 	if (ret < 0) {
1332 		dev_err(&pdev->dev, "devm_spi_register_controller error.\n");
1333 		goto error3;
1334 	}
1335 
1336 	dev_info(&pdev->dev, "probed\n");
1337 
1338 	return 0;
1339 
1340 error3:
1341 	rspi_release_dma(ctlr);
1342 error2:
1343 	pm_runtime_disable(&pdev->dev);
1344 error1:
1345 	spi_controller_put(ctlr);
1346 
1347 	return ret;
1348 }
1349 
1350 static const struct platform_device_id spi_driver_ids[] = {
1351 	{ "rspi",	(kernel_ulong_t)&rspi_ops },
1352 	{},
1353 };
1354 
1355 MODULE_DEVICE_TABLE(platform, spi_driver_ids);
1356 
1357 #ifdef CONFIG_PM_SLEEP
1358 static int rspi_suspend(struct device *dev)
1359 {
1360 	struct rspi_data *rspi = dev_get_drvdata(dev);
1361 
1362 	return spi_controller_suspend(rspi->ctlr);
1363 }
1364 
1365 static int rspi_resume(struct device *dev)
1366 {
1367 	struct rspi_data *rspi = dev_get_drvdata(dev);
1368 
1369 	return spi_controller_resume(rspi->ctlr);
1370 }
1371 
1372 static SIMPLE_DEV_PM_OPS(rspi_pm_ops, rspi_suspend, rspi_resume);
1373 #define DEV_PM_OPS	&rspi_pm_ops
1374 #else
1375 #define DEV_PM_OPS	NULL
1376 #endif /* CONFIG_PM_SLEEP */
1377 
1378 static struct platform_driver rspi_driver = {
1379 	.probe =	rspi_probe,
1380 	.remove =	rspi_remove,
1381 	.id_table =	spi_driver_ids,
1382 	.driver		= {
1383 		.name = "renesas_spi",
1384 		.pm = DEV_PM_OPS,
1385 		.of_match_table = of_match_ptr(rspi_of_match),
1386 	},
1387 };
1388 module_platform_driver(rspi_driver);
1389 
1390 MODULE_DESCRIPTION("Renesas RSPI bus driver");
1391 MODULE_LICENSE("GPL v2");
1392 MODULE_AUTHOR("Yoshihiro Shimoda");
1393 MODULE_ALIAS("platform:rspi");
1394