xref: /openbmc/linux/drivers/spi/spi-rspi.c (revision 0d456bad)
1 /*
2  * SH RSPI driver
3  *
4  * Copyright (C) 2012  Renesas Solutions Corp.
5  *
6  * Based on spi-sh.c:
7  * Copyright (C) 2011 Renesas Solutions Corp.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; version 2 of the License.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
21  *
22  */
23 
24 #include <linux/module.h>
25 #include <linux/kernel.h>
26 #include <linux/sched.h>
27 #include <linux/errno.h>
28 #include <linux/list.h>
29 #include <linux/workqueue.h>
30 #include <linux/interrupt.h>
31 #include <linux/platform_device.h>
32 #include <linux/io.h>
33 #include <linux/clk.h>
34 #include <linux/dmaengine.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/sh_dma.h>
37 #include <linux/spi/spi.h>
38 #include <linux/spi/rspi.h>
39 
40 #define RSPI_SPCR		0x00
41 #define RSPI_SSLP		0x01
42 #define RSPI_SPPCR		0x02
43 #define RSPI_SPSR		0x03
44 #define RSPI_SPDR		0x04
45 #define RSPI_SPSCR		0x08
46 #define RSPI_SPSSR		0x09
47 #define RSPI_SPBR		0x0a
48 #define RSPI_SPDCR		0x0b
49 #define RSPI_SPCKD		0x0c
50 #define RSPI_SSLND		0x0d
51 #define RSPI_SPND		0x0e
52 #define RSPI_SPCR2		0x0f
53 #define RSPI_SPCMD0		0x10
54 #define RSPI_SPCMD1		0x12
55 #define RSPI_SPCMD2		0x14
56 #define RSPI_SPCMD3		0x16
57 #define RSPI_SPCMD4		0x18
58 #define RSPI_SPCMD5		0x1a
59 #define RSPI_SPCMD6		0x1c
60 #define RSPI_SPCMD7		0x1e
61 
62 /* SPCR */
63 #define SPCR_SPRIE		0x80
64 #define SPCR_SPE		0x40
65 #define SPCR_SPTIE		0x20
66 #define SPCR_SPEIE		0x10
67 #define SPCR_MSTR		0x08
68 #define SPCR_MODFEN		0x04
69 #define SPCR_TXMD		0x02
70 #define SPCR_SPMS		0x01
71 
72 /* SSLP */
73 #define SSLP_SSL1P		0x02
74 #define SSLP_SSL0P		0x01
75 
76 /* SPPCR */
77 #define SPPCR_MOIFE		0x20
78 #define SPPCR_MOIFV		0x10
79 #define SPPCR_SPOM		0x04
80 #define SPPCR_SPLP2		0x02
81 #define SPPCR_SPLP		0x01
82 
83 /* SPSR */
84 #define SPSR_SPRF		0x80
85 #define SPSR_SPTEF		0x20
86 #define SPSR_PERF		0x08
87 #define SPSR_MODF		0x04
88 #define SPSR_IDLNF		0x02
89 #define SPSR_OVRF		0x01
90 
91 /* SPSCR */
92 #define SPSCR_SPSLN_MASK	0x07
93 
94 /* SPSSR */
95 #define SPSSR_SPECM_MASK	0x70
96 #define SPSSR_SPCP_MASK		0x07
97 
98 /* SPDCR */
99 #define SPDCR_SPLW		0x20
100 #define SPDCR_SPRDTD		0x10
101 #define SPDCR_SLSEL1		0x08
102 #define SPDCR_SLSEL0		0x04
103 #define SPDCR_SLSEL_MASK	0x0c
104 #define SPDCR_SPFC1		0x02
105 #define SPDCR_SPFC0		0x01
106 
107 /* SPCKD */
108 #define SPCKD_SCKDL_MASK	0x07
109 
110 /* SSLND */
111 #define SSLND_SLNDL_MASK	0x07
112 
113 /* SPND */
114 #define SPND_SPNDL_MASK		0x07
115 
116 /* SPCR2 */
117 #define SPCR2_PTE		0x08
118 #define SPCR2_SPIE		0x04
119 #define SPCR2_SPOE		0x02
120 #define SPCR2_SPPE		0x01
121 
122 /* SPCMDn */
123 #define SPCMD_SCKDEN		0x8000
124 #define SPCMD_SLNDEN		0x4000
125 #define SPCMD_SPNDEN		0x2000
126 #define SPCMD_LSBF		0x1000
127 #define SPCMD_SPB_MASK		0x0f00
128 #define SPCMD_SPB_8_TO_16(bit)	(((bit - 1) << 8) & SPCMD_SPB_MASK)
129 #define SPCMD_SPB_20BIT		0x0000
130 #define SPCMD_SPB_24BIT		0x0100
131 #define SPCMD_SPB_32BIT		0x0200
132 #define SPCMD_SSLKP		0x0080
133 #define SPCMD_SSLA_MASK		0x0030
134 #define SPCMD_BRDV_MASK		0x000c
135 #define SPCMD_CPOL		0x0002
136 #define SPCMD_CPHA		0x0001
137 
138 struct rspi_data {
139 	void __iomem *addr;
140 	u32 max_speed_hz;
141 	struct spi_master *master;
142 	struct list_head queue;
143 	struct work_struct ws;
144 	wait_queue_head_t wait;
145 	spinlock_t lock;
146 	struct clk *clk;
147 	unsigned char spsr;
148 
149 	/* for dmaengine */
150 	struct dma_chan *chan_tx;
151 	struct dma_chan *chan_rx;
152 	int irq;
153 
154 	unsigned dma_width_16bit:1;
155 	unsigned dma_callbacked:1;
156 };
157 
158 static void rspi_write8(struct rspi_data *rspi, u8 data, u16 offset)
159 {
160 	iowrite8(data, rspi->addr + offset);
161 }
162 
163 static void rspi_write16(struct rspi_data *rspi, u16 data, u16 offset)
164 {
165 	iowrite16(data, rspi->addr + offset);
166 }
167 
168 static u8 rspi_read8(struct rspi_data *rspi, u16 offset)
169 {
170 	return ioread8(rspi->addr + offset);
171 }
172 
173 static u16 rspi_read16(struct rspi_data *rspi, u16 offset)
174 {
175 	return ioread16(rspi->addr + offset);
176 }
177 
178 static unsigned char rspi_calc_spbr(struct rspi_data *rspi)
179 {
180 	int tmp;
181 	unsigned char spbr;
182 
183 	tmp = clk_get_rate(rspi->clk) / (2 * rspi->max_speed_hz) - 1;
184 	spbr = clamp(tmp, 0, 255);
185 
186 	return spbr;
187 }
188 
189 static void rspi_enable_irq(struct rspi_data *rspi, u8 enable)
190 {
191 	rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | enable, RSPI_SPCR);
192 }
193 
194 static void rspi_disable_irq(struct rspi_data *rspi, u8 disable)
195 {
196 	rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~disable, RSPI_SPCR);
197 }
198 
199 static int rspi_wait_for_interrupt(struct rspi_data *rspi, u8 wait_mask,
200 				   u8 enable_bit)
201 {
202 	int ret;
203 
204 	rspi->spsr = rspi_read8(rspi, RSPI_SPSR);
205 	rspi_enable_irq(rspi, enable_bit);
206 	ret = wait_event_timeout(rspi->wait, rspi->spsr & wait_mask, HZ);
207 	if (ret == 0 && !(rspi->spsr & wait_mask))
208 		return -ETIMEDOUT;
209 
210 	return 0;
211 }
212 
213 static void rspi_assert_ssl(struct rspi_data *rspi)
214 {
215 	rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_SPE, RSPI_SPCR);
216 }
217 
218 static void rspi_negate_ssl(struct rspi_data *rspi)
219 {
220 	rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_SPE, RSPI_SPCR);
221 }
222 
223 static int rspi_set_config_register(struct rspi_data *rspi, int access_size)
224 {
225 	/* Sets output mode(CMOS) and MOSI signal(from previous transfer) */
226 	rspi_write8(rspi, 0x00, RSPI_SPPCR);
227 
228 	/* Sets transfer bit rate */
229 	rspi_write8(rspi, rspi_calc_spbr(rspi), RSPI_SPBR);
230 
231 	/* Sets number of frames to be used: 1 frame */
232 	rspi_write8(rspi, 0x00, RSPI_SPDCR);
233 
234 	/* Sets RSPCK, SSL, next-access delay value */
235 	rspi_write8(rspi, 0x00, RSPI_SPCKD);
236 	rspi_write8(rspi, 0x00, RSPI_SSLND);
237 	rspi_write8(rspi, 0x00, RSPI_SPND);
238 
239 	/* Sets parity, interrupt mask */
240 	rspi_write8(rspi, 0x00, RSPI_SPCR2);
241 
242 	/* Sets SPCMD */
243 	rspi_write16(rspi, SPCMD_SPB_8_TO_16(access_size) | SPCMD_SSLKP,
244 		     RSPI_SPCMD0);
245 
246 	/* Sets RSPI mode */
247 	rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR);
248 
249 	return 0;
250 }
251 
252 static int rspi_send_pio(struct rspi_data *rspi, struct spi_message *mesg,
253 			 struct spi_transfer *t)
254 {
255 	int remain = t->len;
256 	u8 *data;
257 
258 	data = (u8 *)t->tx_buf;
259 	while (remain > 0) {
260 		rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_TXMD,
261 			    RSPI_SPCR);
262 
263 		if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) {
264 			dev_err(&rspi->master->dev,
265 				"%s: tx empty timeout\n", __func__);
266 			return -ETIMEDOUT;
267 		}
268 
269 		rspi_write16(rspi, *data, RSPI_SPDR);
270 		data++;
271 		remain--;
272 	}
273 
274 	/* Waiting for the last transmition */
275 	rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
276 
277 	return 0;
278 }
279 
280 static void rspi_dma_complete(void *arg)
281 {
282 	struct rspi_data *rspi = arg;
283 
284 	rspi->dma_callbacked = 1;
285 	wake_up_interruptible(&rspi->wait);
286 }
287 
288 static int rspi_dma_map_sg(struct scatterlist *sg, void *buf, unsigned len,
289 			   struct dma_chan *chan,
290 			   enum dma_transfer_direction dir)
291 {
292 	sg_init_table(sg, 1);
293 	sg_set_buf(sg, buf, len);
294 	sg_dma_len(sg) = len;
295 	return dma_map_sg(chan->device->dev, sg, 1, dir);
296 }
297 
298 static void rspi_dma_unmap_sg(struct scatterlist *sg, struct dma_chan *chan,
299 			      enum dma_transfer_direction dir)
300 {
301 	dma_unmap_sg(chan->device->dev, sg, 1, dir);
302 }
303 
304 static void rspi_memory_to_8bit(void *buf, const void *data, unsigned len)
305 {
306 	u16 *dst = buf;
307 	const u8 *src = data;
308 
309 	while (len) {
310 		*dst++ = (u16)(*src++);
311 		len--;
312 	}
313 }
314 
315 static void rspi_memory_from_8bit(void *buf, const void *data, unsigned len)
316 {
317 	u8 *dst = buf;
318 	const u16 *src = data;
319 
320 	while (len) {
321 		*dst++ = (u8)*src++;
322 		len--;
323 	}
324 }
325 
326 static int rspi_send_dma(struct rspi_data *rspi, struct spi_transfer *t)
327 {
328 	struct scatterlist sg;
329 	void *buf = NULL;
330 	struct dma_async_tx_descriptor *desc;
331 	unsigned len;
332 	int ret = 0;
333 
334 	if (rspi->dma_width_16bit) {
335 		/*
336 		 * If DMAC bus width is 16-bit, the driver allocates a dummy
337 		 * buffer. And, the driver converts original data into the
338 		 * DMAC data as the following format:
339 		 *  original data: 1st byte, 2nd byte ...
340 		 *  DMAC data:     1st byte, dummy, 2nd byte, dummy ...
341 		 */
342 		len = t->len * 2;
343 		buf = kmalloc(len, GFP_KERNEL);
344 		if (!buf)
345 			return -ENOMEM;
346 		rspi_memory_to_8bit(buf, t->tx_buf, t->len);
347 	} else {
348 		len = t->len;
349 		buf = (void *)t->tx_buf;
350 	}
351 
352 	if (!rspi_dma_map_sg(&sg, buf, len, rspi->chan_tx, DMA_TO_DEVICE)) {
353 		ret = -EFAULT;
354 		goto end_nomap;
355 	}
356 	desc = dmaengine_prep_slave_sg(rspi->chan_tx, &sg, 1, DMA_TO_DEVICE,
357 				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
358 	if (!desc) {
359 		ret = -EIO;
360 		goto end;
361 	}
362 
363 	/*
364 	 * DMAC needs SPTIE, but if SPTIE is set, this IRQ routine will be
365 	 * called. So, this driver disables the IRQ while DMA transfer.
366 	 */
367 	disable_irq(rspi->irq);
368 
369 	rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_TXMD, RSPI_SPCR);
370 	rspi_enable_irq(rspi, SPCR_SPTIE);
371 	rspi->dma_callbacked = 0;
372 
373 	desc->callback = rspi_dma_complete;
374 	desc->callback_param = rspi;
375 	dmaengine_submit(desc);
376 	dma_async_issue_pending(rspi->chan_tx);
377 
378 	ret = wait_event_interruptible_timeout(rspi->wait,
379 					       rspi->dma_callbacked, HZ);
380 	if (ret > 0 && rspi->dma_callbacked)
381 		ret = 0;
382 	else if (!ret)
383 		ret = -ETIMEDOUT;
384 	rspi_disable_irq(rspi, SPCR_SPTIE);
385 
386 	enable_irq(rspi->irq);
387 
388 end:
389 	rspi_dma_unmap_sg(&sg, rspi->chan_tx, DMA_TO_DEVICE);
390 end_nomap:
391 	if (rspi->dma_width_16bit)
392 		kfree(buf);
393 
394 	return ret;
395 }
396 
397 static void rspi_receive_init(struct rspi_data *rspi)
398 {
399 	unsigned char spsr;
400 
401 	spsr = rspi_read8(rspi, RSPI_SPSR);
402 	if (spsr & SPSR_SPRF)
403 		rspi_read16(rspi, RSPI_SPDR);	/* dummy read */
404 	if (spsr & SPSR_OVRF)
405 		rspi_write8(rspi, rspi_read8(rspi, RSPI_SPSR) & ~SPSR_OVRF,
406 			    RSPI_SPCR);
407 }
408 
409 static int rspi_receive_pio(struct rspi_data *rspi, struct spi_message *mesg,
410 			    struct spi_transfer *t)
411 {
412 	int remain = t->len;
413 	u8 *data;
414 
415 	rspi_receive_init(rspi);
416 
417 	data = (u8 *)t->rx_buf;
418 	while (remain > 0) {
419 		rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_TXMD,
420 			    RSPI_SPCR);
421 
422 		if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) {
423 			dev_err(&rspi->master->dev,
424 				"%s: tx empty timeout\n", __func__);
425 			return -ETIMEDOUT;
426 		}
427 		/* dummy write for generate clock */
428 		rspi_write16(rspi, 0x00, RSPI_SPDR);
429 
430 		if (rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE) < 0) {
431 			dev_err(&rspi->master->dev,
432 				"%s: receive timeout\n", __func__);
433 			return -ETIMEDOUT;
434 		}
435 		/* SPDR allows 16 or 32-bit access only */
436 		*data = (u8)rspi_read16(rspi, RSPI_SPDR);
437 
438 		data++;
439 		remain--;
440 	}
441 
442 	return 0;
443 }
444 
445 static int rspi_receive_dma(struct rspi_data *rspi, struct spi_transfer *t)
446 {
447 	struct scatterlist sg, sg_dummy;
448 	void *dummy = NULL, *rx_buf = NULL;
449 	struct dma_async_tx_descriptor *desc, *desc_dummy;
450 	unsigned len;
451 	int ret = 0;
452 
453 	if (rspi->dma_width_16bit) {
454 		/*
455 		 * If DMAC bus width is 16-bit, the driver allocates a dummy
456 		 * buffer. And, finally the driver converts the DMAC data into
457 		 * actual data as the following format:
458 		 *  DMAC data:   1st byte, dummy, 2nd byte, dummy ...
459 		 *  actual data: 1st byte, 2nd byte ...
460 		 */
461 		len = t->len * 2;
462 		rx_buf = kmalloc(len, GFP_KERNEL);
463 		if (!rx_buf)
464 			return -ENOMEM;
465 	 } else {
466 		len = t->len;
467 		rx_buf = t->rx_buf;
468 	}
469 
470 	/* prepare dummy transfer to generate SPI clocks */
471 	dummy = kzalloc(len, GFP_KERNEL);
472 	if (!dummy) {
473 		ret = -ENOMEM;
474 		goto end_nomap;
475 	}
476 	if (!rspi_dma_map_sg(&sg_dummy, dummy, len, rspi->chan_tx,
477 			     DMA_TO_DEVICE)) {
478 		ret = -EFAULT;
479 		goto end_nomap;
480 	}
481 	desc_dummy = dmaengine_prep_slave_sg(rspi->chan_tx, &sg_dummy, 1,
482 			DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
483 	if (!desc_dummy) {
484 		ret = -EIO;
485 		goto end_dummy_mapped;
486 	}
487 
488 	/* prepare receive transfer */
489 	if (!rspi_dma_map_sg(&sg, rx_buf, len, rspi->chan_rx,
490 			     DMA_FROM_DEVICE)) {
491 		ret = -EFAULT;
492 		goto end_dummy_mapped;
493 
494 	}
495 	desc = dmaengine_prep_slave_sg(rspi->chan_rx, &sg, 1, DMA_FROM_DEVICE,
496 				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
497 	if (!desc) {
498 		ret = -EIO;
499 		goto end;
500 	}
501 
502 	rspi_receive_init(rspi);
503 
504 	/*
505 	 * DMAC needs SPTIE, but if SPTIE is set, this IRQ routine will be
506 	 * called. So, this driver disables the IRQ while DMA transfer.
507 	 */
508 	disable_irq(rspi->irq);
509 
510 	rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_TXMD, RSPI_SPCR);
511 	rspi_enable_irq(rspi, SPCR_SPTIE | SPCR_SPRIE);
512 	rspi->dma_callbacked = 0;
513 
514 	desc->callback = rspi_dma_complete;
515 	desc->callback_param = rspi;
516 	dmaengine_submit(desc);
517 	dma_async_issue_pending(rspi->chan_rx);
518 
519 	desc_dummy->callback = NULL;	/* No callback */
520 	dmaengine_submit(desc_dummy);
521 	dma_async_issue_pending(rspi->chan_tx);
522 
523 	ret = wait_event_interruptible_timeout(rspi->wait,
524 					       rspi->dma_callbacked, HZ);
525 	if (ret > 0 && rspi->dma_callbacked)
526 		ret = 0;
527 	else if (!ret)
528 		ret = -ETIMEDOUT;
529 	rspi_disable_irq(rspi, SPCR_SPTIE | SPCR_SPRIE);
530 
531 	enable_irq(rspi->irq);
532 
533 end:
534 	rspi_dma_unmap_sg(&sg, rspi->chan_rx, DMA_FROM_DEVICE);
535 end_dummy_mapped:
536 	rspi_dma_unmap_sg(&sg_dummy, rspi->chan_tx, DMA_TO_DEVICE);
537 end_nomap:
538 	if (rspi->dma_width_16bit) {
539 		if (!ret)
540 			rspi_memory_from_8bit(t->rx_buf, rx_buf, t->len);
541 		kfree(rx_buf);
542 	}
543 	kfree(dummy);
544 
545 	return ret;
546 }
547 
548 static int rspi_is_dma(struct rspi_data *rspi, struct spi_transfer *t)
549 {
550 	if (t->tx_buf && rspi->chan_tx)
551 		return 1;
552 	/* If the module receives data by DMAC, it also needs TX DMAC */
553 	if (t->rx_buf && rspi->chan_tx && rspi->chan_rx)
554 		return 1;
555 
556 	return 0;
557 }
558 
559 static void rspi_work(struct work_struct *work)
560 {
561 	struct rspi_data *rspi = container_of(work, struct rspi_data, ws);
562 	struct spi_message *mesg;
563 	struct spi_transfer *t;
564 	unsigned long flags;
565 	int ret;
566 
567 	spin_lock_irqsave(&rspi->lock, flags);
568 	while (!list_empty(&rspi->queue)) {
569 		mesg = list_entry(rspi->queue.next, struct spi_message, queue);
570 		list_del_init(&mesg->queue);
571 		spin_unlock_irqrestore(&rspi->lock, flags);
572 
573 		rspi_assert_ssl(rspi);
574 
575 		list_for_each_entry(t, &mesg->transfers, transfer_list) {
576 			if (t->tx_buf) {
577 				if (rspi_is_dma(rspi, t))
578 					ret = rspi_send_dma(rspi, t);
579 				else
580 					ret = rspi_send_pio(rspi, mesg, t);
581 				if (ret < 0)
582 					goto error;
583 			}
584 			if (t->rx_buf) {
585 				if (rspi_is_dma(rspi, t))
586 					ret = rspi_receive_dma(rspi, t);
587 				else
588 					ret = rspi_receive_pio(rspi, mesg, t);
589 				if (ret < 0)
590 					goto error;
591 			}
592 			mesg->actual_length += t->len;
593 		}
594 		rspi_negate_ssl(rspi);
595 
596 		mesg->status = 0;
597 		mesg->complete(mesg->context);
598 
599 		spin_lock_irqsave(&rspi->lock, flags);
600 	}
601 
602 	return;
603 
604 error:
605 	mesg->status = ret;
606 	mesg->complete(mesg->context);
607 }
608 
609 static int rspi_setup(struct spi_device *spi)
610 {
611 	struct rspi_data *rspi = spi_master_get_devdata(spi->master);
612 
613 	if (!spi->bits_per_word)
614 		spi->bits_per_word = 8;
615 	rspi->max_speed_hz = spi->max_speed_hz;
616 
617 	rspi_set_config_register(rspi, 8);
618 
619 	return 0;
620 }
621 
622 static int rspi_transfer(struct spi_device *spi, struct spi_message *mesg)
623 {
624 	struct rspi_data *rspi = spi_master_get_devdata(spi->master);
625 	unsigned long flags;
626 
627 	mesg->actual_length = 0;
628 	mesg->status = -EINPROGRESS;
629 
630 	spin_lock_irqsave(&rspi->lock, flags);
631 	list_add_tail(&mesg->queue, &rspi->queue);
632 	schedule_work(&rspi->ws);
633 	spin_unlock_irqrestore(&rspi->lock, flags);
634 
635 	return 0;
636 }
637 
638 static void rspi_cleanup(struct spi_device *spi)
639 {
640 }
641 
642 static irqreturn_t rspi_irq(int irq, void *_sr)
643 {
644 	struct rspi_data *rspi = (struct rspi_data *)_sr;
645 	unsigned long spsr;
646 	irqreturn_t ret = IRQ_NONE;
647 	unsigned char disable_irq = 0;
648 
649 	rspi->spsr = spsr = rspi_read8(rspi, RSPI_SPSR);
650 	if (spsr & SPSR_SPRF)
651 		disable_irq |= SPCR_SPRIE;
652 	if (spsr & SPSR_SPTEF)
653 		disable_irq |= SPCR_SPTIE;
654 
655 	if (disable_irq) {
656 		ret = IRQ_HANDLED;
657 		rspi_disable_irq(rspi, disable_irq);
658 		wake_up(&rspi->wait);
659 	}
660 
661 	return ret;
662 }
663 
664 static int rspi_request_dma(struct rspi_data *rspi,
665 				      struct platform_device *pdev)
666 {
667 	struct rspi_plat_data *rspi_pd = pdev->dev.platform_data;
668 	dma_cap_mask_t mask;
669 	struct dma_slave_config cfg;
670 	int ret;
671 
672 	if (!rspi_pd)
673 		return 0;	/* The driver assumes no error. */
674 
675 	rspi->dma_width_16bit = rspi_pd->dma_width_16bit;
676 
677 	/* If the module receives data by DMAC, it also needs TX DMAC */
678 	if (rspi_pd->dma_rx_id && rspi_pd->dma_tx_id) {
679 		dma_cap_zero(mask);
680 		dma_cap_set(DMA_SLAVE, mask);
681 		rspi->chan_rx = dma_request_channel(mask, shdma_chan_filter,
682 						    (void *)rspi_pd->dma_rx_id);
683 		if (rspi->chan_rx) {
684 			cfg.slave_id = rspi_pd->dma_rx_id;
685 			cfg.direction = DMA_DEV_TO_MEM;
686 			ret = dmaengine_slave_config(rspi->chan_rx, &cfg);
687 			if (!ret)
688 				dev_info(&pdev->dev, "Use DMA when rx.\n");
689 			else
690 				return ret;
691 		}
692 	}
693 	if (rspi_pd->dma_tx_id) {
694 		dma_cap_zero(mask);
695 		dma_cap_set(DMA_SLAVE, mask);
696 		rspi->chan_tx = dma_request_channel(mask, shdma_chan_filter,
697 						    (void *)rspi_pd->dma_tx_id);
698 		if (rspi->chan_tx) {
699 			cfg.slave_id = rspi_pd->dma_tx_id;
700 			cfg.direction = DMA_MEM_TO_DEV;
701 			ret = dmaengine_slave_config(rspi->chan_tx, &cfg);
702 			if (!ret)
703 				dev_info(&pdev->dev, "Use DMA when tx\n");
704 			else
705 				return ret;
706 		}
707 	}
708 
709 	return 0;
710 }
711 
712 static void rspi_release_dma(struct rspi_data *rspi)
713 {
714 	if (rspi->chan_tx)
715 		dma_release_channel(rspi->chan_tx);
716 	if (rspi->chan_rx)
717 		dma_release_channel(rspi->chan_rx);
718 }
719 
720 static int rspi_remove(struct platform_device *pdev)
721 {
722 	struct rspi_data *rspi = dev_get_drvdata(&pdev->dev);
723 
724 	spi_unregister_master(rspi->master);
725 	rspi_release_dma(rspi);
726 	free_irq(platform_get_irq(pdev, 0), rspi);
727 	clk_put(rspi->clk);
728 	iounmap(rspi->addr);
729 	spi_master_put(rspi->master);
730 
731 	return 0;
732 }
733 
734 static int rspi_probe(struct platform_device *pdev)
735 {
736 	struct resource *res;
737 	struct spi_master *master;
738 	struct rspi_data *rspi;
739 	int ret, irq;
740 	char clk_name[16];
741 
742 	/* get base addr */
743 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
744 	if (unlikely(res == NULL)) {
745 		dev_err(&pdev->dev, "invalid resource\n");
746 		return -EINVAL;
747 	}
748 
749 	irq = platform_get_irq(pdev, 0);
750 	if (irq < 0) {
751 		dev_err(&pdev->dev, "platform_get_irq error\n");
752 		return -ENODEV;
753 	}
754 
755 	master = spi_alloc_master(&pdev->dev, sizeof(struct rspi_data));
756 	if (master == NULL) {
757 		dev_err(&pdev->dev, "spi_alloc_master error.\n");
758 		return -ENOMEM;
759 	}
760 
761 	rspi = spi_master_get_devdata(master);
762 	dev_set_drvdata(&pdev->dev, rspi);
763 
764 	rspi->master = master;
765 	rspi->addr = ioremap(res->start, resource_size(res));
766 	if (rspi->addr == NULL) {
767 		dev_err(&pdev->dev, "ioremap error.\n");
768 		ret = -ENOMEM;
769 		goto error1;
770 	}
771 
772 	snprintf(clk_name, sizeof(clk_name), "rspi%d", pdev->id);
773 	rspi->clk = clk_get(&pdev->dev, clk_name);
774 	if (IS_ERR(rspi->clk)) {
775 		dev_err(&pdev->dev, "cannot get clock\n");
776 		ret = PTR_ERR(rspi->clk);
777 		goto error2;
778 	}
779 	clk_enable(rspi->clk);
780 
781 	INIT_LIST_HEAD(&rspi->queue);
782 	spin_lock_init(&rspi->lock);
783 	INIT_WORK(&rspi->ws, rspi_work);
784 	init_waitqueue_head(&rspi->wait);
785 
786 	master->num_chipselect = 2;
787 	master->bus_num = pdev->id;
788 	master->setup = rspi_setup;
789 	master->transfer = rspi_transfer;
790 	master->cleanup = rspi_cleanup;
791 
792 	ret = request_irq(irq, rspi_irq, 0, dev_name(&pdev->dev), rspi);
793 	if (ret < 0) {
794 		dev_err(&pdev->dev, "request_irq error\n");
795 		goto error3;
796 	}
797 
798 	rspi->irq = irq;
799 	ret = rspi_request_dma(rspi, pdev);
800 	if (ret < 0) {
801 		dev_err(&pdev->dev, "rspi_request_dma failed.\n");
802 		goto error4;
803 	}
804 
805 	ret = spi_register_master(master);
806 	if (ret < 0) {
807 		dev_err(&pdev->dev, "spi_register_master error.\n");
808 		goto error4;
809 	}
810 
811 	dev_info(&pdev->dev, "probed\n");
812 
813 	return 0;
814 
815 error4:
816 	rspi_release_dma(rspi);
817 	free_irq(irq, rspi);
818 error3:
819 	clk_put(rspi->clk);
820 error2:
821 	iounmap(rspi->addr);
822 error1:
823 	spi_master_put(master);
824 
825 	return ret;
826 }
827 
828 static struct platform_driver rspi_driver = {
829 	.probe =	rspi_probe,
830 	.remove =	rspi_remove,
831 	.driver		= {
832 		.name = "rspi",
833 		.owner	= THIS_MODULE,
834 	},
835 };
836 module_platform_driver(rspi_driver);
837 
838 MODULE_DESCRIPTION("Renesas RSPI bus driver");
839 MODULE_LICENSE("GPL v2");
840 MODULE_AUTHOR("Yoshihiro Shimoda");
841 MODULE_ALIAS("platform:rspi");
842