xref: /openbmc/linux/drivers/spi/spi-uniphier.c (revision 2f8be0e5)
1 // SPDX-License-Identifier: GPL-2.0
2 // spi-uniphier.c - Socionext UniPhier SPI controller driver
3 // Copyright 2012      Panasonic Corporation
4 // Copyright 2016-2018 Socionext Inc.
5 
6 #include <linux/kernel.h>
7 #include <linux/bitfield.h>
8 #include <linux/bitops.h>
9 #include <linux/clk.h>
10 #include <linux/delay.h>
11 #include <linux/dmaengine.h>
12 #include <linux/interrupt.h>
13 #include <linux/io.h>
14 #include <linux/module.h>
15 #include <linux/platform_device.h>
16 #include <linux/spi/spi.h>
17 
18 #include <asm/unaligned.h>
19 
20 #define SSI_TIMEOUT_MS		2000
21 #define SSI_POLL_TIMEOUT_US	200
22 #define SSI_MAX_CLK_DIVIDER	254
23 #define SSI_MIN_CLK_DIVIDER	4
24 
25 struct uniphier_spi_priv {
26 	void __iomem *base;
27 	dma_addr_t base_dma_addr;
28 	struct clk *clk;
29 	struct spi_master *master;
30 	struct completion xfer_done;
31 
32 	int error;
33 	unsigned int tx_bytes;
34 	unsigned int rx_bytes;
35 	const u8 *tx_buf;
36 	u8 *rx_buf;
37 	atomic_t dma_busy;
38 
39 	bool is_save_param;
40 	u8 bits_per_word;
41 	u16 mode;
42 	u32 speed_hz;
43 };
44 
45 #define SSI_CTL			0x00
46 #define   SSI_CTL_EN		BIT(0)
47 
48 #define SSI_CKS			0x04
49 #define   SSI_CKS_CKRAT_MASK	GENMASK(7, 0)
50 #define   SSI_CKS_CKPHS		BIT(14)
51 #define   SSI_CKS_CKINIT	BIT(13)
52 #define   SSI_CKS_CKDLY		BIT(12)
53 
54 #define SSI_TXWDS		0x08
55 #define   SSI_TXWDS_WDLEN_MASK	GENMASK(13, 8)
56 #define   SSI_TXWDS_TDTF_MASK	GENMASK(7, 6)
57 #define   SSI_TXWDS_DTLEN_MASK	GENMASK(5, 0)
58 
59 #define SSI_RXWDS		0x0c
60 #define   SSI_RXWDS_DTLEN_MASK	GENMASK(5, 0)
61 
62 #define SSI_FPS			0x10
63 #define   SSI_FPS_FSPOL		BIT(15)
64 #define   SSI_FPS_FSTRT		BIT(14)
65 
66 #define SSI_SR			0x14
67 #define   SSI_SR_BUSY		BIT(7)
68 #define   SSI_SR_RNE		BIT(0)
69 
70 #define SSI_IE			0x18
71 #define   SSI_IE_TCIE		BIT(4)
72 #define   SSI_IE_RCIE		BIT(3)
73 #define   SSI_IE_TXRE		BIT(2)
74 #define   SSI_IE_RXRE		BIT(1)
75 #define   SSI_IE_RORIE		BIT(0)
76 #define   SSI_IE_ALL_MASK	GENMASK(4, 0)
77 
78 #define SSI_IS			0x1c
79 #define   SSI_IS_RXRS		BIT(9)
80 #define   SSI_IS_RCID		BIT(3)
81 #define   SSI_IS_RORID		BIT(0)
82 
83 #define SSI_IC			0x1c
84 #define   SSI_IC_TCIC		BIT(4)
85 #define   SSI_IC_RCIC		BIT(3)
86 #define   SSI_IC_RORIC		BIT(0)
87 
88 #define SSI_FC			0x20
89 #define   SSI_FC_TXFFL		BIT(12)
90 #define   SSI_FC_TXFTH_MASK	GENMASK(11, 8)
91 #define   SSI_FC_RXFFL		BIT(4)
92 #define   SSI_FC_RXFTH_MASK	GENMASK(3, 0)
93 
94 #define SSI_TXDR		0x24
95 #define SSI_RXDR		0x24
96 
97 #define SSI_FIFO_DEPTH		8U
98 #define SSI_FIFO_BURST_NUM	1
99 
100 #define SSI_DMA_RX_BUSY		BIT(1)
101 #define SSI_DMA_TX_BUSY		BIT(0)
102 
103 static inline unsigned int bytes_per_word(unsigned int bits)
104 {
105 	return bits <= 8 ? 1 : (bits <= 16 ? 2 : 4);
106 }
107 
108 static inline void uniphier_spi_irq_enable(struct uniphier_spi_priv *priv,
109 					   u32 mask)
110 {
111 	u32 val;
112 
113 	val = readl(priv->base + SSI_IE);
114 	val |= mask;
115 	writel(val, priv->base + SSI_IE);
116 }
117 
118 static inline void uniphier_spi_irq_disable(struct uniphier_spi_priv *priv,
119 					    u32 mask)
120 {
121 	u32 val;
122 
123 	val = readl(priv->base + SSI_IE);
124 	val &= ~mask;
125 	writel(val, priv->base + SSI_IE);
126 }
127 
128 static void uniphier_spi_set_mode(struct spi_device *spi)
129 {
130 	struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
131 	u32 val1, val2;
132 
133 	/*
134 	 * clock setting
135 	 * CKPHS    capture timing. 0:rising edge, 1:falling edge
136 	 * CKINIT   clock initial level. 0:low, 1:high
137 	 * CKDLY    clock delay. 0:no delay, 1:delay depending on FSTRT
138 	 *          (FSTRT=0: 1 clock, FSTRT=1: 0.5 clock)
139 	 *
140 	 * frame setting
141 	 * FSPOL    frame signal porarity. 0: low, 1: high
142 	 * FSTRT    start frame timing
143 	 *          0: rising edge of clock, 1: falling edge of clock
144 	 */
145 	switch (spi->mode & (SPI_CPOL | SPI_CPHA)) {
146 	case SPI_MODE_0:
147 		/* CKPHS=1, CKINIT=0, CKDLY=1, FSTRT=0 */
148 		val1 = SSI_CKS_CKPHS | SSI_CKS_CKDLY;
149 		val2 = 0;
150 		break;
151 	case SPI_MODE_1:
152 		/* CKPHS=0, CKINIT=0, CKDLY=0, FSTRT=1 */
153 		val1 = 0;
154 		val2 = SSI_FPS_FSTRT;
155 		break;
156 	case SPI_MODE_2:
157 		/* CKPHS=0, CKINIT=1, CKDLY=1, FSTRT=1 */
158 		val1 = SSI_CKS_CKINIT | SSI_CKS_CKDLY;
159 		val2 = SSI_FPS_FSTRT;
160 		break;
161 	case SPI_MODE_3:
162 		/* CKPHS=1, CKINIT=1, CKDLY=0, FSTRT=0 */
163 		val1 = SSI_CKS_CKPHS | SSI_CKS_CKINIT;
164 		val2 = 0;
165 		break;
166 	}
167 
168 	if (!(spi->mode & SPI_CS_HIGH))
169 		val2 |= SSI_FPS_FSPOL;
170 
171 	writel(val1, priv->base + SSI_CKS);
172 	writel(val2, priv->base + SSI_FPS);
173 
174 	val1 = 0;
175 	if (spi->mode & SPI_LSB_FIRST)
176 		val1 |= FIELD_PREP(SSI_TXWDS_TDTF_MASK, 1);
177 	writel(val1, priv->base + SSI_TXWDS);
178 	writel(val1, priv->base + SSI_RXWDS);
179 }
180 
181 static void uniphier_spi_set_transfer_size(struct spi_device *spi, int size)
182 {
183 	struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
184 	u32 val;
185 
186 	val = readl(priv->base + SSI_TXWDS);
187 	val &= ~(SSI_TXWDS_WDLEN_MASK | SSI_TXWDS_DTLEN_MASK);
188 	val |= FIELD_PREP(SSI_TXWDS_WDLEN_MASK, size);
189 	val |= FIELD_PREP(SSI_TXWDS_DTLEN_MASK, size);
190 	writel(val, priv->base + SSI_TXWDS);
191 
192 	val = readl(priv->base + SSI_RXWDS);
193 	val &= ~SSI_RXWDS_DTLEN_MASK;
194 	val |= FIELD_PREP(SSI_RXWDS_DTLEN_MASK, size);
195 	writel(val, priv->base + SSI_RXWDS);
196 }
197 
198 static void uniphier_spi_set_baudrate(struct spi_device *spi,
199 				      unsigned int speed)
200 {
201 	struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
202 	u32 val, ckdiv;
203 
204 	/*
205 	 * the supported rates are even numbers from 4 to 254. (4,6,8...254)
206 	 * round up as we look for equal or less speed
207 	 */
208 	ckdiv = DIV_ROUND_UP(clk_get_rate(priv->clk), speed);
209 	ckdiv = round_up(ckdiv, 2);
210 
211 	val = readl(priv->base + SSI_CKS);
212 	val &= ~SSI_CKS_CKRAT_MASK;
213 	val |= ckdiv & SSI_CKS_CKRAT_MASK;
214 	writel(val, priv->base + SSI_CKS);
215 }
216 
217 static void uniphier_spi_setup_transfer(struct spi_device *spi,
218 				       struct spi_transfer *t)
219 {
220 	struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
221 	u32 val;
222 
223 	priv->error = 0;
224 	priv->tx_buf = t->tx_buf;
225 	priv->rx_buf = t->rx_buf;
226 	priv->tx_bytes = priv->rx_bytes = t->len;
227 
228 	if (!priv->is_save_param || priv->mode != spi->mode) {
229 		uniphier_spi_set_mode(spi);
230 		priv->mode = spi->mode;
231 		priv->is_save_param = false;
232 	}
233 
234 	if (!priv->is_save_param || priv->bits_per_word != t->bits_per_word) {
235 		uniphier_spi_set_transfer_size(spi, t->bits_per_word);
236 		priv->bits_per_word = t->bits_per_word;
237 	}
238 
239 	if (!priv->is_save_param || priv->speed_hz != t->speed_hz) {
240 		uniphier_spi_set_baudrate(spi, t->speed_hz);
241 		priv->speed_hz = t->speed_hz;
242 	}
243 
244 	priv->is_save_param = true;
245 
246 	/* reset FIFOs */
247 	val = SSI_FC_TXFFL | SSI_FC_RXFFL;
248 	writel(val, priv->base + SSI_FC);
249 }
250 
251 static void uniphier_spi_send(struct uniphier_spi_priv *priv)
252 {
253 	int wsize;
254 	u32 val = 0;
255 
256 	wsize = min(bytes_per_word(priv->bits_per_word), priv->tx_bytes);
257 	priv->tx_bytes -= wsize;
258 
259 	if (priv->tx_buf) {
260 		switch (wsize) {
261 		case 1:
262 			val = *priv->tx_buf;
263 			break;
264 		case 2:
265 			val = get_unaligned_le16(priv->tx_buf);
266 			break;
267 		case 4:
268 			val = get_unaligned_le32(priv->tx_buf);
269 			break;
270 		}
271 
272 		priv->tx_buf += wsize;
273 	}
274 
275 	writel(val, priv->base + SSI_TXDR);
276 }
277 
278 static void uniphier_spi_recv(struct uniphier_spi_priv *priv)
279 {
280 	int rsize;
281 	u32 val;
282 
283 	rsize = min(bytes_per_word(priv->bits_per_word), priv->rx_bytes);
284 	priv->rx_bytes -= rsize;
285 
286 	val = readl(priv->base + SSI_RXDR);
287 
288 	if (priv->rx_buf) {
289 		switch (rsize) {
290 		case 1:
291 			*priv->rx_buf = val;
292 			break;
293 		case 2:
294 			put_unaligned_le16(val, priv->rx_buf);
295 			break;
296 		case 4:
297 			put_unaligned_le32(val, priv->rx_buf);
298 			break;
299 		}
300 
301 		priv->rx_buf += rsize;
302 	}
303 }
304 
305 static void uniphier_spi_set_fifo_threshold(struct uniphier_spi_priv *priv,
306 					    unsigned int threshold)
307 {
308 	u32 val;
309 
310 	val = readl(priv->base + SSI_FC);
311 	val &= ~(SSI_FC_TXFTH_MASK | SSI_FC_RXFTH_MASK);
312 	val |= FIELD_PREP(SSI_FC_TXFTH_MASK, SSI_FIFO_DEPTH - threshold);
313 	val |= FIELD_PREP(SSI_FC_RXFTH_MASK, threshold);
314 	writel(val, priv->base + SSI_FC);
315 }
316 
317 static void uniphier_spi_fill_tx_fifo(struct uniphier_spi_priv *priv)
318 {
319 	unsigned int fifo_threshold, fill_words;
320 	unsigned int bpw = bytes_per_word(priv->bits_per_word);
321 
322 	fifo_threshold = DIV_ROUND_UP(priv->rx_bytes, bpw);
323 	fifo_threshold = min(fifo_threshold, SSI_FIFO_DEPTH);
324 
325 	uniphier_spi_set_fifo_threshold(priv, fifo_threshold);
326 
327 	fill_words = fifo_threshold -
328 		DIV_ROUND_UP(priv->rx_bytes - priv->tx_bytes, bpw);
329 
330 	while (fill_words--)
331 		uniphier_spi_send(priv);
332 }
333 
334 static void uniphier_spi_set_cs(struct spi_device *spi, bool enable)
335 {
336 	struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
337 	u32 val;
338 
339 	val = readl(priv->base + SSI_FPS);
340 
341 	if (enable)
342 		val |= SSI_FPS_FSPOL;
343 	else
344 		val &= ~SSI_FPS_FSPOL;
345 
346 	writel(val, priv->base + SSI_FPS);
347 }
348 
349 static bool uniphier_spi_can_dma(struct spi_master *master,
350 				 struct spi_device *spi,
351 				 struct spi_transfer *t)
352 {
353 	struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
354 	unsigned int bpw = bytes_per_word(priv->bits_per_word);
355 
356 	if ((!master->dma_tx && !master->dma_rx)
357 	    || (!master->dma_tx && t->tx_buf)
358 	    || (!master->dma_rx && t->rx_buf))
359 		return false;
360 
361 	return DIV_ROUND_UP(t->len, bpw) > SSI_FIFO_DEPTH;
362 }
363 
364 static void uniphier_spi_dma_rxcb(void *data)
365 {
366 	struct spi_master *master = data;
367 	struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
368 	int state = atomic_fetch_andnot(SSI_DMA_RX_BUSY, &priv->dma_busy);
369 
370 	uniphier_spi_irq_disable(priv, SSI_IE_RXRE);
371 
372 	if (!(state & SSI_DMA_TX_BUSY))
373 		spi_finalize_current_transfer(master);
374 }
375 
376 static void uniphier_spi_dma_txcb(void *data)
377 {
378 	struct spi_master *master = data;
379 	struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
380 	int state = atomic_fetch_andnot(SSI_DMA_TX_BUSY, &priv->dma_busy);
381 
382 	uniphier_spi_irq_disable(priv, SSI_IE_TXRE);
383 
384 	if (!(state & SSI_DMA_RX_BUSY))
385 		spi_finalize_current_transfer(master);
386 }
387 
388 static int uniphier_spi_transfer_one_dma(struct spi_master *master,
389 					 struct spi_device *spi,
390 					 struct spi_transfer *t)
391 {
392 	struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
393 	struct dma_async_tx_descriptor *rxdesc = NULL, *txdesc = NULL;
394 	int buswidth;
395 
396 	atomic_set(&priv->dma_busy, 0);
397 
398 	uniphier_spi_set_fifo_threshold(priv, SSI_FIFO_BURST_NUM);
399 
400 	if (priv->bits_per_word <= 8)
401 		buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
402 	else if (priv->bits_per_word <= 16)
403 		buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
404 	else
405 		buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
406 
407 	if (priv->rx_buf) {
408 		struct dma_slave_config rxconf = {
409 			.direction = DMA_DEV_TO_MEM,
410 			.src_addr = priv->base_dma_addr + SSI_RXDR,
411 			.src_addr_width = buswidth,
412 			.src_maxburst = SSI_FIFO_BURST_NUM,
413 		};
414 
415 		dmaengine_slave_config(master->dma_rx, &rxconf);
416 
417 		rxdesc = dmaengine_prep_slave_sg(
418 			master->dma_rx,
419 			t->rx_sg.sgl, t->rx_sg.nents,
420 			DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
421 		if (!rxdesc)
422 			goto out_err_prep;
423 
424 		rxdesc->callback = uniphier_spi_dma_rxcb;
425 		rxdesc->callback_param = master;
426 
427 		uniphier_spi_irq_enable(priv, SSI_IE_RXRE);
428 		atomic_or(SSI_DMA_RX_BUSY, &priv->dma_busy);
429 
430 		dmaengine_submit(rxdesc);
431 		dma_async_issue_pending(master->dma_rx);
432 	}
433 
434 	if (priv->tx_buf) {
435 		struct dma_slave_config txconf = {
436 			.direction = DMA_MEM_TO_DEV,
437 			.dst_addr = priv->base_dma_addr + SSI_TXDR,
438 			.dst_addr_width = buswidth,
439 			.dst_maxburst = SSI_FIFO_BURST_NUM,
440 		};
441 
442 		dmaengine_slave_config(master->dma_tx, &txconf);
443 
444 		txdesc = dmaengine_prep_slave_sg(
445 			master->dma_tx,
446 			t->tx_sg.sgl, t->tx_sg.nents,
447 			DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
448 		if (!txdesc)
449 			goto out_err_prep;
450 
451 		txdesc->callback = uniphier_spi_dma_txcb;
452 		txdesc->callback_param = master;
453 
454 		uniphier_spi_irq_enable(priv, SSI_IE_TXRE);
455 		atomic_or(SSI_DMA_TX_BUSY, &priv->dma_busy);
456 
457 		dmaengine_submit(txdesc);
458 		dma_async_issue_pending(master->dma_tx);
459 	}
460 
461 	/* signal that we need to wait for completion */
462 	return (priv->tx_buf || priv->rx_buf);
463 
464 out_err_prep:
465 	if (rxdesc)
466 		dmaengine_terminate_sync(master->dma_rx);
467 
468 	return -EINVAL;
469 }
470 
471 static int uniphier_spi_transfer_one_irq(struct spi_master *master,
472 					 struct spi_device *spi,
473 					 struct spi_transfer *t)
474 {
475 	struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
476 	struct device *dev = master->dev.parent;
477 	unsigned long time_left;
478 
479 	reinit_completion(&priv->xfer_done);
480 
481 	uniphier_spi_fill_tx_fifo(priv);
482 
483 	uniphier_spi_irq_enable(priv, SSI_IE_RCIE | SSI_IE_RORIE);
484 
485 	time_left = wait_for_completion_timeout(&priv->xfer_done,
486 					msecs_to_jiffies(SSI_TIMEOUT_MS));
487 
488 	uniphier_spi_irq_disable(priv, SSI_IE_RCIE | SSI_IE_RORIE);
489 
490 	if (!time_left) {
491 		dev_err(dev, "transfer timeout.\n");
492 		return -ETIMEDOUT;
493 	}
494 
495 	return priv->error;
496 }
497 
498 static int uniphier_spi_transfer_one_poll(struct spi_master *master,
499 					  struct spi_device *spi,
500 					  struct spi_transfer *t)
501 {
502 	struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
503 	int loop = SSI_POLL_TIMEOUT_US * 10;
504 
505 	while (priv->tx_bytes) {
506 		uniphier_spi_fill_tx_fifo(priv);
507 
508 		while ((priv->rx_bytes - priv->tx_bytes) > 0) {
509 			while (!(readl(priv->base + SSI_SR) & SSI_SR_RNE)
510 								&& loop--)
511 				ndelay(100);
512 
513 			if (loop == -1)
514 				goto irq_transfer;
515 
516 			uniphier_spi_recv(priv);
517 		}
518 	}
519 
520 	return 0;
521 
522 irq_transfer:
523 	return uniphier_spi_transfer_one_irq(master, spi, t);
524 }
525 
526 static int uniphier_spi_transfer_one(struct spi_master *master,
527 				     struct spi_device *spi,
528 				     struct spi_transfer *t)
529 {
530 	struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
531 	unsigned long threshold;
532 	bool use_dma;
533 
534 	/* Terminate and return success for 0 byte length transfer */
535 	if (!t->len)
536 		return 0;
537 
538 	uniphier_spi_setup_transfer(spi, t);
539 
540 	use_dma = master->can_dma ? master->can_dma(master, spi, t) : false;
541 	if (use_dma)
542 		return uniphier_spi_transfer_one_dma(master, spi, t);
543 
544 	/*
545 	 * If the transfer operation will take longer than
546 	 * SSI_POLL_TIMEOUT_US, it should use irq.
547 	 */
548 	threshold = DIV_ROUND_UP(SSI_POLL_TIMEOUT_US * priv->speed_hz,
549 					USEC_PER_SEC * BITS_PER_BYTE);
550 	if (t->len > threshold)
551 		return uniphier_spi_transfer_one_irq(master, spi, t);
552 	else
553 		return uniphier_spi_transfer_one_poll(master, spi, t);
554 }
555 
556 static int uniphier_spi_prepare_transfer_hardware(struct spi_master *master)
557 {
558 	struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
559 
560 	writel(SSI_CTL_EN, priv->base + SSI_CTL);
561 
562 	return 0;
563 }
564 
565 static int uniphier_spi_unprepare_transfer_hardware(struct spi_master *master)
566 {
567 	struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
568 
569 	writel(0, priv->base + SSI_CTL);
570 
571 	return 0;
572 }
573 
574 static void uniphier_spi_handle_err(struct spi_master *master,
575 				    struct spi_message *msg)
576 {
577 	struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
578 	u32 val;
579 
580 	/* stop running spi transfer */
581 	writel(0, priv->base + SSI_CTL);
582 
583 	/* reset FIFOs */
584 	val = SSI_FC_TXFFL | SSI_FC_RXFFL;
585 	writel(val, priv->base + SSI_FC);
586 
587 	uniphier_spi_irq_disable(priv, SSI_IE_ALL_MASK);
588 
589 	if (atomic_read(&priv->dma_busy) & SSI_DMA_TX_BUSY) {
590 		dmaengine_terminate_async(master->dma_tx);
591 		atomic_andnot(SSI_DMA_TX_BUSY, &priv->dma_busy);
592 	}
593 
594 	if (atomic_read(&priv->dma_busy) & SSI_DMA_RX_BUSY) {
595 		dmaengine_terminate_async(master->dma_rx);
596 		atomic_andnot(SSI_DMA_RX_BUSY, &priv->dma_busy);
597 	}
598 }
599 
600 static irqreturn_t uniphier_spi_handler(int irq, void *dev_id)
601 {
602 	struct uniphier_spi_priv *priv = dev_id;
603 	u32 val, stat;
604 
605 	stat = readl(priv->base + SSI_IS);
606 	val = SSI_IC_TCIC | SSI_IC_RCIC | SSI_IC_RORIC;
607 	writel(val, priv->base + SSI_IC);
608 
609 	/* rx fifo overrun */
610 	if (stat & SSI_IS_RORID) {
611 		priv->error = -EIO;
612 		goto done;
613 	}
614 
615 	/* rx complete */
616 	if ((stat & SSI_IS_RCID) && (stat & SSI_IS_RXRS)) {
617 		while ((readl(priv->base + SSI_SR) & SSI_SR_RNE) &&
618 				(priv->rx_bytes - priv->tx_bytes) > 0)
619 			uniphier_spi_recv(priv);
620 
621 		if ((readl(priv->base + SSI_SR) & SSI_SR_RNE) ||
622 				(priv->rx_bytes != priv->tx_bytes)) {
623 			priv->error = -EIO;
624 			goto done;
625 		} else if (priv->rx_bytes == 0)
626 			goto done;
627 
628 		/* next tx transfer */
629 		uniphier_spi_fill_tx_fifo(priv);
630 
631 		return IRQ_HANDLED;
632 	}
633 
634 	return IRQ_NONE;
635 
636 done:
637 	complete(&priv->xfer_done);
638 	return IRQ_HANDLED;
639 }
640 
641 static int uniphier_spi_probe(struct platform_device *pdev)
642 {
643 	struct uniphier_spi_priv *priv;
644 	struct spi_master *master;
645 	struct resource *res;
646 	struct dma_slave_caps caps;
647 	u32 dma_tx_burst = 0, dma_rx_burst = 0;
648 	unsigned long clk_rate;
649 	int irq;
650 	int ret;
651 
652 	master = spi_alloc_master(&pdev->dev, sizeof(*priv));
653 	if (!master)
654 		return -ENOMEM;
655 
656 	platform_set_drvdata(pdev, master);
657 
658 	priv = spi_master_get_devdata(master);
659 	priv->master = master;
660 	priv->is_save_param = false;
661 
662 	priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
663 	if (IS_ERR(priv->base)) {
664 		ret = PTR_ERR(priv->base);
665 		goto out_master_put;
666 	}
667 	priv->base_dma_addr = res->start;
668 
669 	priv->clk = devm_clk_get(&pdev->dev, NULL);
670 	if (IS_ERR(priv->clk)) {
671 		dev_err(&pdev->dev, "failed to get clock\n");
672 		ret = PTR_ERR(priv->clk);
673 		goto out_master_put;
674 	}
675 
676 	ret = clk_prepare_enable(priv->clk);
677 	if (ret)
678 		goto out_master_put;
679 
680 	irq = platform_get_irq(pdev, 0);
681 	if (irq < 0) {
682 		ret = irq;
683 		goto out_disable_clk;
684 	}
685 
686 	ret = devm_request_irq(&pdev->dev, irq, uniphier_spi_handler,
687 			       0, "uniphier-spi", priv);
688 	if (ret) {
689 		dev_err(&pdev->dev, "failed to request IRQ\n");
690 		goto out_disable_clk;
691 	}
692 
693 	init_completion(&priv->xfer_done);
694 
695 	clk_rate = clk_get_rate(priv->clk);
696 
697 	master->max_speed_hz = DIV_ROUND_UP(clk_rate, SSI_MIN_CLK_DIVIDER);
698 	master->min_speed_hz = DIV_ROUND_UP(clk_rate, SSI_MAX_CLK_DIVIDER);
699 	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
700 	master->dev.of_node = pdev->dev.of_node;
701 	master->bus_num = pdev->id;
702 	master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
703 
704 	master->set_cs = uniphier_spi_set_cs;
705 	master->transfer_one = uniphier_spi_transfer_one;
706 	master->prepare_transfer_hardware
707 				= uniphier_spi_prepare_transfer_hardware;
708 	master->unprepare_transfer_hardware
709 				= uniphier_spi_unprepare_transfer_hardware;
710 	master->handle_err = uniphier_spi_handle_err;
711 	master->can_dma = uniphier_spi_can_dma;
712 
713 	master->num_chipselect = 1;
714 	master->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
715 
716 	master->dma_tx = dma_request_chan(&pdev->dev, "tx");
717 	if (IS_ERR_OR_NULL(master->dma_tx)) {
718 		if (PTR_ERR(master->dma_tx) == -EPROBE_DEFER) {
719 			ret = -EPROBE_DEFER;
720 			goto out_disable_clk;
721 		}
722 		master->dma_tx = NULL;
723 		dma_tx_burst = INT_MAX;
724 	} else {
725 		ret = dma_get_slave_caps(master->dma_tx, &caps);
726 		if (ret) {
727 			dev_err(&pdev->dev, "failed to get TX DMA capacities: %d\n",
728 				ret);
729 			goto out_disable_clk;
730 		}
731 		dma_tx_burst = caps.max_burst;
732 	}
733 
734 	master->dma_rx = dma_request_chan(&pdev->dev, "rx");
735 	if (IS_ERR_OR_NULL(master->dma_rx)) {
736 		if (PTR_ERR(master->dma_rx) == -EPROBE_DEFER) {
737 			ret = -EPROBE_DEFER;
738 			goto out_disable_clk;
739 		}
740 		master->dma_rx = NULL;
741 		dma_rx_burst = INT_MAX;
742 	} else {
743 		ret = dma_get_slave_caps(master->dma_rx, &caps);
744 		if (ret) {
745 			dev_err(&pdev->dev, "failed to get RX DMA capacities: %d\n",
746 				ret);
747 			goto out_disable_clk;
748 		}
749 		dma_rx_burst = caps.max_burst;
750 	}
751 
752 	master->max_dma_len = min(dma_tx_burst, dma_rx_burst);
753 
754 	ret = devm_spi_register_master(&pdev->dev, master);
755 	if (ret)
756 		goto out_disable_clk;
757 
758 	return 0;
759 
760 out_disable_clk:
761 	clk_disable_unprepare(priv->clk);
762 
763 out_master_put:
764 	spi_master_put(master);
765 	return ret;
766 }
767 
768 static int uniphier_spi_remove(struct platform_device *pdev)
769 {
770 	struct uniphier_spi_priv *priv = platform_get_drvdata(pdev);
771 
772 	if (priv->master->dma_tx)
773 		dma_release_channel(priv->master->dma_tx);
774 	if (priv->master->dma_rx)
775 		dma_release_channel(priv->master->dma_rx);
776 
777 	clk_disable_unprepare(priv->clk);
778 
779 	return 0;
780 }
781 
782 static const struct of_device_id uniphier_spi_match[] = {
783 	{ .compatible = "socionext,uniphier-scssi" },
784 	{ /* sentinel */ }
785 };
786 MODULE_DEVICE_TABLE(of, uniphier_spi_match);
787 
788 static struct platform_driver uniphier_spi_driver = {
789 	.probe = uniphier_spi_probe,
790 	.remove = uniphier_spi_remove,
791 	.driver = {
792 		.name = "uniphier-spi",
793 		.of_match_table = uniphier_spi_match,
794 	},
795 };
796 module_platform_driver(uniphier_spi_driver);
797 
798 MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
799 MODULE_AUTHOR("Keiji Hayashibara <hayashibara.keiji@socionext.com>");
800 MODULE_DESCRIPTION("Socionext UniPhier SPI controller driver");
801 MODULE_LICENSE("GPL v2");
802