xref: /openbmc/linux/drivers/spi/spi-omap2-mcspi.c (revision e23feb16)
1 /*
2  * OMAP2 McSPI controller driver
3  *
4  * Copyright (C) 2005, 2006 Nokia Corporation
5  * Author:	Samuel Ortiz <samuel.ortiz@nokia.com> and
6  *		Juha Yrj�l� <juha.yrjola@nokia.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21  *
22  */
23 
24 #include <linux/kernel.h>
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
27 #include <linux/module.h>
28 #include <linux/device.h>
29 #include <linux/delay.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/dmaengine.h>
32 #include <linux/omap-dma.h>
33 #include <linux/platform_device.h>
34 #include <linux/err.h>
35 #include <linux/clk.h>
36 #include <linux/io.h>
37 #include <linux/slab.h>
38 #include <linux/pm_runtime.h>
39 #include <linux/of.h>
40 #include <linux/of_device.h>
41 #include <linux/gcd.h>
42 
43 #include <linux/spi/spi.h>
44 
45 #include <linux/platform_data/spi-omap2-mcspi.h>
46 
47 #define OMAP2_MCSPI_MAX_FREQ		48000000
48 #define OMAP2_MCSPI_MAX_FIFODEPTH	64
49 #define OMAP2_MCSPI_MAX_FIFOWCNT	0xFFFF
50 #define SPI_AUTOSUSPEND_TIMEOUT		2000
51 
52 #define OMAP2_MCSPI_REVISION		0x00
53 #define OMAP2_MCSPI_SYSSTATUS		0x14
54 #define OMAP2_MCSPI_IRQSTATUS		0x18
55 #define OMAP2_MCSPI_IRQENABLE		0x1c
56 #define OMAP2_MCSPI_WAKEUPENABLE	0x20
57 #define OMAP2_MCSPI_SYST		0x24
58 #define OMAP2_MCSPI_MODULCTRL		0x28
59 #define OMAP2_MCSPI_XFERLEVEL		0x7c
60 
61 /* per-channel banks, 0x14 bytes each, first is: */
62 #define OMAP2_MCSPI_CHCONF0		0x2c
63 #define OMAP2_MCSPI_CHSTAT0		0x30
64 #define OMAP2_MCSPI_CHCTRL0		0x34
65 #define OMAP2_MCSPI_TX0			0x38
66 #define OMAP2_MCSPI_RX0			0x3c
67 
68 /* per-register bitmasks: */
69 #define OMAP2_MCSPI_IRQSTATUS_EOW	BIT(17)
70 
71 #define OMAP2_MCSPI_MODULCTRL_SINGLE	BIT(0)
72 #define OMAP2_MCSPI_MODULCTRL_MS	BIT(2)
73 #define OMAP2_MCSPI_MODULCTRL_STEST	BIT(3)
74 
75 #define OMAP2_MCSPI_CHCONF_PHA		BIT(0)
76 #define OMAP2_MCSPI_CHCONF_POL		BIT(1)
77 #define OMAP2_MCSPI_CHCONF_CLKD_MASK	(0x0f << 2)
78 #define OMAP2_MCSPI_CHCONF_EPOL		BIT(6)
79 #define OMAP2_MCSPI_CHCONF_WL_MASK	(0x1f << 7)
80 #define OMAP2_MCSPI_CHCONF_TRM_RX_ONLY	BIT(12)
81 #define OMAP2_MCSPI_CHCONF_TRM_TX_ONLY	BIT(13)
82 #define OMAP2_MCSPI_CHCONF_TRM_MASK	(0x03 << 12)
83 #define OMAP2_MCSPI_CHCONF_DMAW		BIT(14)
84 #define OMAP2_MCSPI_CHCONF_DMAR		BIT(15)
85 #define OMAP2_MCSPI_CHCONF_DPE0		BIT(16)
86 #define OMAP2_MCSPI_CHCONF_DPE1		BIT(17)
87 #define OMAP2_MCSPI_CHCONF_IS		BIT(18)
88 #define OMAP2_MCSPI_CHCONF_TURBO	BIT(19)
89 #define OMAP2_MCSPI_CHCONF_FORCE	BIT(20)
90 #define OMAP2_MCSPI_CHCONF_FFET		BIT(27)
91 #define OMAP2_MCSPI_CHCONF_FFER		BIT(28)
92 
93 #define OMAP2_MCSPI_CHSTAT_RXS		BIT(0)
94 #define OMAP2_MCSPI_CHSTAT_TXS		BIT(1)
95 #define OMAP2_MCSPI_CHSTAT_EOT		BIT(2)
96 #define OMAP2_MCSPI_CHSTAT_TXFFE	BIT(3)
97 
98 #define OMAP2_MCSPI_CHCTRL_EN		BIT(0)
99 
100 #define OMAP2_MCSPI_WAKEUPENABLE_WKEN	BIT(0)
101 
102 /* We have 2 DMA channels per CS, one for RX and one for TX */
103 struct omap2_mcspi_dma {
104 	struct dma_chan *dma_tx;
105 	struct dma_chan *dma_rx;
106 
107 	int dma_tx_sync_dev;
108 	int dma_rx_sync_dev;
109 
110 	struct completion dma_tx_completion;
111 	struct completion dma_rx_completion;
112 
113 	char dma_rx_ch_name[14];
114 	char dma_tx_ch_name[14];
115 };
116 
117 /* use PIO for small transfers, avoiding DMA setup/teardown overhead and
118  * cache operations; better heuristics consider wordsize and bitrate.
119  */
120 #define DMA_MIN_BYTES			160
121 
122 
123 /*
124  * Used for context save and restore, structure members to be updated whenever
125  * corresponding registers are modified.
126  */
127 struct omap2_mcspi_regs {
128 	u32 modulctrl;
129 	u32 wakeupenable;
130 	struct list_head cs;
131 };
132 
133 struct omap2_mcspi {
134 	struct spi_master	*master;
135 	/* Virtual base address of the controller */
136 	void __iomem		*base;
137 	unsigned long		phys;
138 	/* SPI1 has 4 channels, while SPI2 has 2 */
139 	struct omap2_mcspi_dma	*dma_channels;
140 	struct device		*dev;
141 	struct omap2_mcspi_regs ctx;
142 	int			fifo_depth;
143 	unsigned int		pin_dir:1;
144 };
145 
146 struct omap2_mcspi_cs {
147 	void __iomem		*base;
148 	unsigned long		phys;
149 	int			word_len;
150 	struct list_head	node;
151 	/* Context save and restore shadow register */
152 	u32			chconf0;
153 };
154 
155 static inline void mcspi_write_reg(struct spi_master *master,
156 		int idx, u32 val)
157 {
158 	struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
159 
160 	__raw_writel(val, mcspi->base + idx);
161 }
162 
163 static inline u32 mcspi_read_reg(struct spi_master *master, int idx)
164 {
165 	struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
166 
167 	return __raw_readl(mcspi->base + idx);
168 }
169 
170 static inline void mcspi_write_cs_reg(const struct spi_device *spi,
171 		int idx, u32 val)
172 {
173 	struct omap2_mcspi_cs	*cs = spi->controller_state;
174 
175 	__raw_writel(val, cs->base +  idx);
176 }
177 
178 static inline u32 mcspi_read_cs_reg(const struct spi_device *spi, int idx)
179 {
180 	struct omap2_mcspi_cs	*cs = spi->controller_state;
181 
182 	return __raw_readl(cs->base + idx);
183 }
184 
185 static inline u32 mcspi_cached_chconf0(const struct spi_device *spi)
186 {
187 	struct omap2_mcspi_cs *cs = spi->controller_state;
188 
189 	return cs->chconf0;
190 }
191 
192 static inline void mcspi_write_chconf0(const struct spi_device *spi, u32 val)
193 {
194 	struct omap2_mcspi_cs *cs = spi->controller_state;
195 
196 	cs->chconf0 = val;
197 	mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, val);
198 	mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0);
199 }
200 
201 static inline int mcspi_bytes_per_word(int word_len)
202 {
203 	if (word_len <= 8)
204 		return 1;
205 	else if (word_len <= 16)
206 		return 2;
207 	else /* word_len <= 32 */
208 		return 4;
209 }
210 
211 static void omap2_mcspi_set_dma_req(const struct spi_device *spi,
212 		int is_read, int enable)
213 {
214 	u32 l, rw;
215 
216 	l = mcspi_cached_chconf0(spi);
217 
218 	if (is_read) /* 1 is read, 0 write */
219 		rw = OMAP2_MCSPI_CHCONF_DMAR;
220 	else
221 		rw = OMAP2_MCSPI_CHCONF_DMAW;
222 
223 	if (enable)
224 		l |= rw;
225 	else
226 		l &= ~rw;
227 
228 	mcspi_write_chconf0(spi, l);
229 }
230 
231 static void omap2_mcspi_set_enable(const struct spi_device *spi, int enable)
232 {
233 	u32 l;
234 
235 	l = enable ? OMAP2_MCSPI_CHCTRL_EN : 0;
236 	mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, l);
237 	/* Flash post-writes */
238 	mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCTRL0);
239 }
240 
241 static void omap2_mcspi_force_cs(struct spi_device *spi, int cs_active)
242 {
243 	u32 l;
244 
245 	l = mcspi_cached_chconf0(spi);
246 	if (cs_active)
247 		l |= OMAP2_MCSPI_CHCONF_FORCE;
248 	else
249 		l &= ~OMAP2_MCSPI_CHCONF_FORCE;
250 
251 	mcspi_write_chconf0(spi, l);
252 }
253 
254 static void omap2_mcspi_set_master_mode(struct spi_master *master)
255 {
256 	struct omap2_mcspi	*mcspi = spi_master_get_devdata(master);
257 	struct omap2_mcspi_regs	*ctx = &mcspi->ctx;
258 	u32 l;
259 
260 	/*
261 	 * Setup when switching from (reset default) slave mode
262 	 * to single-channel master mode
263 	 */
264 	l = mcspi_read_reg(master, OMAP2_MCSPI_MODULCTRL);
265 	l &= ~(OMAP2_MCSPI_MODULCTRL_STEST | OMAP2_MCSPI_MODULCTRL_MS);
266 	l |= OMAP2_MCSPI_MODULCTRL_SINGLE;
267 	mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, l);
268 
269 	ctx->modulctrl = l;
270 }
271 
272 static void omap2_mcspi_set_fifo(const struct spi_device *spi,
273 				struct spi_transfer *t, int enable)
274 {
275 	struct spi_master *master = spi->master;
276 	struct omap2_mcspi_cs *cs = spi->controller_state;
277 	struct omap2_mcspi *mcspi;
278 	unsigned int wcnt;
279 	int fifo_depth, bytes_per_word;
280 	u32 chconf, xferlevel;
281 
282 	mcspi = spi_master_get_devdata(master);
283 
284 	chconf = mcspi_cached_chconf0(spi);
285 	if (enable) {
286 		bytes_per_word = mcspi_bytes_per_word(cs->word_len);
287 		if (t->len % bytes_per_word != 0)
288 			goto disable_fifo;
289 
290 		fifo_depth = gcd(t->len, OMAP2_MCSPI_MAX_FIFODEPTH);
291 		if (fifo_depth < 2 || fifo_depth % bytes_per_word != 0)
292 			goto disable_fifo;
293 
294 		wcnt = t->len / bytes_per_word;
295 		if (wcnt > OMAP2_MCSPI_MAX_FIFOWCNT)
296 			goto disable_fifo;
297 
298 		xferlevel = wcnt << 16;
299 		if (t->rx_buf != NULL) {
300 			chconf |= OMAP2_MCSPI_CHCONF_FFER;
301 			xferlevel |= (fifo_depth - 1) << 8;
302 		} else {
303 			chconf |= OMAP2_MCSPI_CHCONF_FFET;
304 			xferlevel |= fifo_depth - 1;
305 		}
306 
307 		mcspi_write_reg(master, OMAP2_MCSPI_XFERLEVEL, xferlevel);
308 		mcspi_write_chconf0(spi, chconf);
309 		mcspi->fifo_depth = fifo_depth;
310 
311 		return;
312 	}
313 
314 disable_fifo:
315 	if (t->rx_buf != NULL)
316 		chconf &= ~OMAP2_MCSPI_CHCONF_FFER;
317 	else
318 		chconf &= ~OMAP2_MCSPI_CHCONF_FFET;
319 
320 	mcspi_write_chconf0(spi, chconf);
321 	mcspi->fifo_depth = 0;
322 }
323 
324 static void omap2_mcspi_restore_ctx(struct omap2_mcspi *mcspi)
325 {
326 	struct spi_master	*spi_cntrl = mcspi->master;
327 	struct omap2_mcspi_regs	*ctx = &mcspi->ctx;
328 	struct omap2_mcspi_cs	*cs;
329 
330 	/* McSPI: context restore */
331 	mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_MODULCTRL, ctx->modulctrl);
332 	mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_WAKEUPENABLE, ctx->wakeupenable);
333 
334 	list_for_each_entry(cs, &ctx->cs, node)
335 		__raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
336 }
337 
338 static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
339 {
340 	unsigned long timeout;
341 
342 	timeout = jiffies + msecs_to_jiffies(1000);
343 	while (!(__raw_readl(reg) & bit)) {
344 		if (time_after(jiffies, timeout)) {
345 			if (!(__raw_readl(reg) & bit))
346 				return -ETIMEDOUT;
347 			else
348 				return 0;
349 		}
350 		cpu_relax();
351 	}
352 	return 0;
353 }
354 
355 static void omap2_mcspi_rx_callback(void *data)
356 {
357 	struct spi_device *spi = data;
358 	struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
359 	struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select];
360 
361 	/* We must disable the DMA RX request */
362 	omap2_mcspi_set_dma_req(spi, 1, 0);
363 
364 	complete(&mcspi_dma->dma_rx_completion);
365 }
366 
367 static void omap2_mcspi_tx_callback(void *data)
368 {
369 	struct spi_device *spi = data;
370 	struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
371 	struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select];
372 
373 	/* We must disable the DMA TX request */
374 	omap2_mcspi_set_dma_req(spi, 0, 0);
375 
376 	complete(&mcspi_dma->dma_tx_completion);
377 }
378 
379 static void omap2_mcspi_tx_dma(struct spi_device *spi,
380 				struct spi_transfer *xfer,
381 				struct dma_slave_config cfg)
382 {
383 	struct omap2_mcspi	*mcspi;
384 	struct omap2_mcspi_dma  *mcspi_dma;
385 	unsigned int		count;
386 
387 	mcspi = spi_master_get_devdata(spi->master);
388 	mcspi_dma = &mcspi->dma_channels[spi->chip_select];
389 	count = xfer->len;
390 
391 	if (mcspi_dma->dma_tx) {
392 		struct dma_async_tx_descriptor *tx;
393 		struct scatterlist sg;
394 
395 		dmaengine_slave_config(mcspi_dma->dma_tx, &cfg);
396 
397 		sg_init_table(&sg, 1);
398 		sg_dma_address(&sg) = xfer->tx_dma;
399 		sg_dma_len(&sg) = xfer->len;
400 
401 		tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, &sg, 1,
402 		DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
403 		if (tx) {
404 			tx->callback = omap2_mcspi_tx_callback;
405 			tx->callback_param = spi;
406 			dmaengine_submit(tx);
407 		} else {
408 			/* FIXME: fall back to PIO? */
409 		}
410 	}
411 	dma_async_issue_pending(mcspi_dma->dma_tx);
412 	omap2_mcspi_set_dma_req(spi, 0, 1);
413 
414 }
415 
416 static unsigned
417 omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
418 				struct dma_slave_config cfg,
419 				unsigned es)
420 {
421 	struct omap2_mcspi	*mcspi;
422 	struct omap2_mcspi_dma  *mcspi_dma;
423 	unsigned int		count, dma_count;
424 	u32			l;
425 	int			elements = 0;
426 	int			word_len, element_count;
427 	struct omap2_mcspi_cs	*cs = spi->controller_state;
428 	mcspi = spi_master_get_devdata(spi->master);
429 	mcspi_dma = &mcspi->dma_channels[spi->chip_select];
430 	count = xfer->len;
431 	dma_count = xfer->len;
432 
433 	if (mcspi->fifo_depth == 0)
434 		dma_count -= es;
435 
436 	word_len = cs->word_len;
437 	l = mcspi_cached_chconf0(spi);
438 
439 	if (word_len <= 8)
440 		element_count = count;
441 	else if (word_len <= 16)
442 		element_count = count >> 1;
443 	else /* word_len <= 32 */
444 		element_count = count >> 2;
445 
446 	if (mcspi_dma->dma_rx) {
447 		struct dma_async_tx_descriptor *tx;
448 		struct scatterlist sg;
449 
450 		dmaengine_slave_config(mcspi_dma->dma_rx, &cfg);
451 
452 		if ((l & OMAP2_MCSPI_CHCONF_TURBO) && mcspi->fifo_depth == 0)
453 			dma_count -= es;
454 
455 		sg_init_table(&sg, 1);
456 		sg_dma_address(&sg) = xfer->rx_dma;
457 		sg_dma_len(&sg) = dma_count;
458 
459 		tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, &sg, 1,
460 				DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT |
461 				DMA_CTRL_ACK);
462 		if (tx) {
463 			tx->callback = omap2_mcspi_rx_callback;
464 			tx->callback_param = spi;
465 			dmaengine_submit(tx);
466 		} else {
467 				/* FIXME: fall back to PIO? */
468 		}
469 	}
470 
471 	dma_async_issue_pending(mcspi_dma->dma_rx);
472 	omap2_mcspi_set_dma_req(spi, 1, 1);
473 
474 	wait_for_completion(&mcspi_dma->dma_rx_completion);
475 	dma_unmap_single(mcspi->dev, xfer->rx_dma, count,
476 			 DMA_FROM_DEVICE);
477 
478 	if (mcspi->fifo_depth > 0)
479 		return count;
480 
481 	omap2_mcspi_set_enable(spi, 0);
482 
483 	elements = element_count - 1;
484 
485 	if (l & OMAP2_MCSPI_CHCONF_TURBO) {
486 		elements--;
487 
488 		if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0)
489 				   & OMAP2_MCSPI_CHSTAT_RXS)) {
490 			u32 w;
491 
492 			w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
493 			if (word_len <= 8)
494 				((u8 *)xfer->rx_buf)[elements++] = w;
495 			else if (word_len <= 16)
496 				((u16 *)xfer->rx_buf)[elements++] = w;
497 			else /* word_len <= 32 */
498 				((u32 *)xfer->rx_buf)[elements++] = w;
499 		} else {
500 			int bytes_per_word = mcspi_bytes_per_word(word_len);
501 			dev_err(&spi->dev, "DMA RX penultimate word empty");
502 			count -= (bytes_per_word << 1);
503 			omap2_mcspi_set_enable(spi, 1);
504 			return count;
505 		}
506 	}
507 	if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0)
508 				& OMAP2_MCSPI_CHSTAT_RXS)) {
509 		u32 w;
510 
511 		w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
512 		if (word_len <= 8)
513 			((u8 *)xfer->rx_buf)[elements] = w;
514 		else if (word_len <= 16)
515 			((u16 *)xfer->rx_buf)[elements] = w;
516 		else /* word_len <= 32 */
517 			((u32 *)xfer->rx_buf)[elements] = w;
518 	} else {
519 		dev_err(&spi->dev, "DMA RX last word empty");
520 		count -= mcspi_bytes_per_word(word_len);
521 	}
522 	omap2_mcspi_set_enable(spi, 1);
523 	return count;
524 }
525 
526 static unsigned
527 omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
528 {
529 	struct omap2_mcspi	*mcspi;
530 	struct omap2_mcspi_cs	*cs = spi->controller_state;
531 	struct omap2_mcspi_dma  *mcspi_dma;
532 	unsigned int		count;
533 	u32			l;
534 	u8			*rx;
535 	const u8		*tx;
536 	struct dma_slave_config	cfg;
537 	enum dma_slave_buswidth width;
538 	unsigned es;
539 	u32			burst;
540 	void __iomem		*chstat_reg;
541 	void __iomem            *irqstat_reg;
542 	int			wait_res;
543 
544 	mcspi = spi_master_get_devdata(spi->master);
545 	mcspi_dma = &mcspi->dma_channels[spi->chip_select];
546 	l = mcspi_cached_chconf0(spi);
547 
548 
549 	if (cs->word_len <= 8) {
550 		width = DMA_SLAVE_BUSWIDTH_1_BYTE;
551 		es = 1;
552 	} else if (cs->word_len <= 16) {
553 		width = DMA_SLAVE_BUSWIDTH_2_BYTES;
554 		es = 2;
555 	} else {
556 		width = DMA_SLAVE_BUSWIDTH_4_BYTES;
557 		es = 4;
558 	}
559 
560 	count = xfer->len;
561 	burst = 1;
562 
563 	if (mcspi->fifo_depth > 0) {
564 		if (count > mcspi->fifo_depth)
565 			burst = mcspi->fifo_depth / es;
566 		else
567 			burst = count / es;
568 	}
569 
570 	memset(&cfg, 0, sizeof(cfg));
571 	cfg.src_addr = cs->phys + OMAP2_MCSPI_RX0;
572 	cfg.dst_addr = cs->phys + OMAP2_MCSPI_TX0;
573 	cfg.src_addr_width = width;
574 	cfg.dst_addr_width = width;
575 	cfg.src_maxburst = burst;
576 	cfg.dst_maxburst = burst;
577 
578 	rx = xfer->rx_buf;
579 	tx = xfer->tx_buf;
580 
581 	if (tx != NULL)
582 		omap2_mcspi_tx_dma(spi, xfer, cfg);
583 
584 	if (rx != NULL)
585 		count = omap2_mcspi_rx_dma(spi, xfer, cfg, es);
586 
587 	if (tx != NULL) {
588 		wait_for_completion(&mcspi_dma->dma_tx_completion);
589 		dma_unmap_single(mcspi->dev, xfer->tx_dma, xfer->len,
590 				 DMA_TO_DEVICE);
591 
592 		if (mcspi->fifo_depth > 0) {
593 			irqstat_reg = mcspi->base + OMAP2_MCSPI_IRQSTATUS;
594 
595 			if (mcspi_wait_for_reg_bit(irqstat_reg,
596 						OMAP2_MCSPI_IRQSTATUS_EOW) < 0)
597 				dev_err(&spi->dev, "EOW timed out\n");
598 
599 			mcspi_write_reg(mcspi->master, OMAP2_MCSPI_IRQSTATUS,
600 					OMAP2_MCSPI_IRQSTATUS_EOW);
601 		}
602 
603 		/* for TX_ONLY mode, be sure all words have shifted out */
604 		if (rx == NULL) {
605 			chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
606 			if (mcspi->fifo_depth > 0) {
607 				wait_res = mcspi_wait_for_reg_bit(chstat_reg,
608 						OMAP2_MCSPI_CHSTAT_TXFFE);
609 				if (wait_res < 0)
610 					dev_err(&spi->dev, "TXFFE timed out\n");
611 			} else {
612 				wait_res = mcspi_wait_for_reg_bit(chstat_reg,
613 						OMAP2_MCSPI_CHSTAT_TXS);
614 				if (wait_res < 0)
615 					dev_err(&spi->dev, "TXS timed out\n");
616 			}
617 			if (wait_res >= 0 &&
618 				(mcspi_wait_for_reg_bit(chstat_reg,
619 					OMAP2_MCSPI_CHSTAT_EOT) < 0))
620 				dev_err(&spi->dev, "EOT timed out\n");
621 		}
622 	}
623 	return count;
624 }
625 
626 static unsigned
627 omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
628 {
629 	struct omap2_mcspi	*mcspi;
630 	struct omap2_mcspi_cs	*cs = spi->controller_state;
631 	unsigned int		count, c;
632 	u32			l;
633 	void __iomem		*base = cs->base;
634 	void __iomem		*tx_reg;
635 	void __iomem		*rx_reg;
636 	void __iomem		*chstat_reg;
637 	int			word_len;
638 
639 	mcspi = spi_master_get_devdata(spi->master);
640 	count = xfer->len;
641 	c = count;
642 	word_len = cs->word_len;
643 
644 	l = mcspi_cached_chconf0(spi);
645 
646 	/* We store the pre-calculated register addresses on stack to speed
647 	 * up the transfer loop. */
648 	tx_reg		= base + OMAP2_MCSPI_TX0;
649 	rx_reg		= base + OMAP2_MCSPI_RX0;
650 	chstat_reg	= base + OMAP2_MCSPI_CHSTAT0;
651 
652 	if (c < (word_len>>3))
653 		return 0;
654 
655 	if (word_len <= 8) {
656 		u8		*rx;
657 		const u8	*tx;
658 
659 		rx = xfer->rx_buf;
660 		tx = xfer->tx_buf;
661 
662 		do {
663 			c -= 1;
664 			if (tx != NULL) {
665 				if (mcspi_wait_for_reg_bit(chstat_reg,
666 						OMAP2_MCSPI_CHSTAT_TXS) < 0) {
667 					dev_err(&spi->dev, "TXS timed out\n");
668 					goto out;
669 				}
670 				dev_vdbg(&spi->dev, "write-%d %02x\n",
671 						word_len, *tx);
672 				__raw_writel(*tx++, tx_reg);
673 			}
674 			if (rx != NULL) {
675 				if (mcspi_wait_for_reg_bit(chstat_reg,
676 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
677 					dev_err(&spi->dev, "RXS timed out\n");
678 					goto out;
679 				}
680 
681 				if (c == 1 && tx == NULL &&
682 				    (l & OMAP2_MCSPI_CHCONF_TURBO)) {
683 					omap2_mcspi_set_enable(spi, 0);
684 					*rx++ = __raw_readl(rx_reg);
685 					dev_vdbg(&spi->dev, "read-%d %02x\n",
686 						    word_len, *(rx - 1));
687 					if (mcspi_wait_for_reg_bit(chstat_reg,
688 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
689 						dev_err(&spi->dev,
690 							"RXS timed out\n");
691 						goto out;
692 					}
693 					c = 0;
694 				} else if (c == 0 && tx == NULL) {
695 					omap2_mcspi_set_enable(spi, 0);
696 				}
697 
698 				*rx++ = __raw_readl(rx_reg);
699 				dev_vdbg(&spi->dev, "read-%d %02x\n",
700 						word_len, *(rx - 1));
701 			}
702 		} while (c);
703 	} else if (word_len <= 16) {
704 		u16		*rx;
705 		const u16	*tx;
706 
707 		rx = xfer->rx_buf;
708 		tx = xfer->tx_buf;
709 		do {
710 			c -= 2;
711 			if (tx != NULL) {
712 				if (mcspi_wait_for_reg_bit(chstat_reg,
713 						OMAP2_MCSPI_CHSTAT_TXS) < 0) {
714 					dev_err(&spi->dev, "TXS timed out\n");
715 					goto out;
716 				}
717 				dev_vdbg(&spi->dev, "write-%d %04x\n",
718 						word_len, *tx);
719 				__raw_writel(*tx++, tx_reg);
720 			}
721 			if (rx != NULL) {
722 				if (mcspi_wait_for_reg_bit(chstat_reg,
723 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
724 					dev_err(&spi->dev, "RXS timed out\n");
725 					goto out;
726 				}
727 
728 				if (c == 2 && tx == NULL &&
729 				    (l & OMAP2_MCSPI_CHCONF_TURBO)) {
730 					omap2_mcspi_set_enable(spi, 0);
731 					*rx++ = __raw_readl(rx_reg);
732 					dev_vdbg(&spi->dev, "read-%d %04x\n",
733 						    word_len, *(rx - 1));
734 					if (mcspi_wait_for_reg_bit(chstat_reg,
735 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
736 						dev_err(&spi->dev,
737 							"RXS timed out\n");
738 						goto out;
739 					}
740 					c = 0;
741 				} else if (c == 0 && tx == NULL) {
742 					omap2_mcspi_set_enable(spi, 0);
743 				}
744 
745 				*rx++ = __raw_readl(rx_reg);
746 				dev_vdbg(&spi->dev, "read-%d %04x\n",
747 						word_len, *(rx - 1));
748 			}
749 		} while (c >= 2);
750 	} else if (word_len <= 32) {
751 		u32		*rx;
752 		const u32	*tx;
753 
754 		rx = xfer->rx_buf;
755 		tx = xfer->tx_buf;
756 		do {
757 			c -= 4;
758 			if (tx != NULL) {
759 				if (mcspi_wait_for_reg_bit(chstat_reg,
760 						OMAP2_MCSPI_CHSTAT_TXS) < 0) {
761 					dev_err(&spi->dev, "TXS timed out\n");
762 					goto out;
763 				}
764 				dev_vdbg(&spi->dev, "write-%d %08x\n",
765 						word_len, *tx);
766 				__raw_writel(*tx++, tx_reg);
767 			}
768 			if (rx != NULL) {
769 				if (mcspi_wait_for_reg_bit(chstat_reg,
770 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
771 					dev_err(&spi->dev, "RXS timed out\n");
772 					goto out;
773 				}
774 
775 				if (c == 4 && tx == NULL &&
776 				    (l & OMAP2_MCSPI_CHCONF_TURBO)) {
777 					omap2_mcspi_set_enable(spi, 0);
778 					*rx++ = __raw_readl(rx_reg);
779 					dev_vdbg(&spi->dev, "read-%d %08x\n",
780 						    word_len, *(rx - 1));
781 					if (mcspi_wait_for_reg_bit(chstat_reg,
782 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
783 						dev_err(&spi->dev,
784 							"RXS timed out\n");
785 						goto out;
786 					}
787 					c = 0;
788 				} else if (c == 0 && tx == NULL) {
789 					omap2_mcspi_set_enable(spi, 0);
790 				}
791 
792 				*rx++ = __raw_readl(rx_reg);
793 				dev_vdbg(&spi->dev, "read-%d %08x\n",
794 						word_len, *(rx - 1));
795 			}
796 		} while (c >= 4);
797 	}
798 
799 	/* for TX_ONLY mode, be sure all words have shifted out */
800 	if (xfer->rx_buf == NULL) {
801 		if (mcspi_wait_for_reg_bit(chstat_reg,
802 				OMAP2_MCSPI_CHSTAT_TXS) < 0) {
803 			dev_err(&spi->dev, "TXS timed out\n");
804 		} else if (mcspi_wait_for_reg_bit(chstat_reg,
805 				OMAP2_MCSPI_CHSTAT_EOT) < 0)
806 			dev_err(&spi->dev, "EOT timed out\n");
807 
808 		/* disable chan to purge rx datas received in TX_ONLY transfer,
809 		 * otherwise these rx datas will affect the direct following
810 		 * RX_ONLY transfer.
811 		 */
812 		omap2_mcspi_set_enable(spi, 0);
813 	}
814 out:
815 	omap2_mcspi_set_enable(spi, 1);
816 	return count - c;
817 }
818 
819 static u32 omap2_mcspi_calc_divisor(u32 speed_hz)
820 {
821 	u32 div;
822 
823 	for (div = 0; div < 15; div++)
824 		if (speed_hz >= (OMAP2_MCSPI_MAX_FREQ >> div))
825 			return div;
826 
827 	return 15;
828 }
829 
830 /* called only when no transfer is active to this device */
831 static int omap2_mcspi_setup_transfer(struct spi_device *spi,
832 		struct spi_transfer *t)
833 {
834 	struct omap2_mcspi_cs *cs = spi->controller_state;
835 	struct omap2_mcspi *mcspi;
836 	struct spi_master *spi_cntrl;
837 	u32 l = 0, div = 0;
838 	u8 word_len = spi->bits_per_word;
839 	u32 speed_hz = spi->max_speed_hz;
840 
841 	mcspi = spi_master_get_devdata(spi->master);
842 	spi_cntrl = mcspi->master;
843 
844 	if (t != NULL && t->bits_per_word)
845 		word_len = t->bits_per_word;
846 
847 	cs->word_len = word_len;
848 
849 	if (t && t->speed_hz)
850 		speed_hz = t->speed_hz;
851 
852 	speed_hz = min_t(u32, speed_hz, OMAP2_MCSPI_MAX_FREQ);
853 	div = omap2_mcspi_calc_divisor(speed_hz);
854 
855 	l = mcspi_cached_chconf0(spi);
856 
857 	/* standard 4-wire master mode:  SCK, MOSI/out, MISO/in, nCS
858 	 * REVISIT: this controller could support SPI_3WIRE mode.
859 	 */
860 	if (mcspi->pin_dir == MCSPI_PINDIR_D0_IN_D1_OUT) {
861 		l &= ~OMAP2_MCSPI_CHCONF_IS;
862 		l &= ~OMAP2_MCSPI_CHCONF_DPE1;
863 		l |= OMAP2_MCSPI_CHCONF_DPE0;
864 	} else {
865 		l |= OMAP2_MCSPI_CHCONF_IS;
866 		l |= OMAP2_MCSPI_CHCONF_DPE1;
867 		l &= ~OMAP2_MCSPI_CHCONF_DPE0;
868 	}
869 
870 	/* wordlength */
871 	l &= ~OMAP2_MCSPI_CHCONF_WL_MASK;
872 	l |= (word_len - 1) << 7;
873 
874 	/* set chipselect polarity; manage with FORCE */
875 	if (!(spi->mode & SPI_CS_HIGH))
876 		l |= OMAP2_MCSPI_CHCONF_EPOL;	/* active-low; normal */
877 	else
878 		l &= ~OMAP2_MCSPI_CHCONF_EPOL;
879 
880 	/* set clock divisor */
881 	l &= ~OMAP2_MCSPI_CHCONF_CLKD_MASK;
882 	l |= div << 2;
883 
884 	/* set SPI mode 0..3 */
885 	if (spi->mode & SPI_CPOL)
886 		l |= OMAP2_MCSPI_CHCONF_POL;
887 	else
888 		l &= ~OMAP2_MCSPI_CHCONF_POL;
889 	if (spi->mode & SPI_CPHA)
890 		l |= OMAP2_MCSPI_CHCONF_PHA;
891 	else
892 		l &= ~OMAP2_MCSPI_CHCONF_PHA;
893 
894 	mcspi_write_chconf0(spi, l);
895 
896 	dev_dbg(&spi->dev, "setup: speed %d, sample %s edge, clk %s\n",
897 			OMAP2_MCSPI_MAX_FREQ >> div,
898 			(spi->mode & SPI_CPHA) ? "trailing" : "leading",
899 			(spi->mode & SPI_CPOL) ? "inverted" : "normal");
900 
901 	return 0;
902 }
903 
904 /*
905  * Note that we currently allow DMA only if we get a channel
906  * for both rx and tx. Otherwise we'll do PIO for both rx and tx.
907  */
908 static int omap2_mcspi_request_dma(struct spi_device *spi)
909 {
910 	struct spi_master	*master = spi->master;
911 	struct omap2_mcspi	*mcspi;
912 	struct omap2_mcspi_dma	*mcspi_dma;
913 	dma_cap_mask_t mask;
914 	unsigned sig;
915 
916 	mcspi = spi_master_get_devdata(master);
917 	mcspi_dma = mcspi->dma_channels + spi->chip_select;
918 
919 	init_completion(&mcspi_dma->dma_rx_completion);
920 	init_completion(&mcspi_dma->dma_tx_completion);
921 
922 	dma_cap_zero(mask);
923 	dma_cap_set(DMA_SLAVE, mask);
924 	sig = mcspi_dma->dma_rx_sync_dev;
925 
926 	mcspi_dma->dma_rx =
927 		dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
928 						 &sig, &master->dev,
929 						 mcspi_dma->dma_rx_ch_name);
930 	if (!mcspi_dma->dma_rx)
931 		goto no_dma;
932 
933 	sig = mcspi_dma->dma_tx_sync_dev;
934 	mcspi_dma->dma_tx =
935 		dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
936 						 &sig, &master->dev,
937 						 mcspi_dma->dma_tx_ch_name);
938 
939 	if (!mcspi_dma->dma_tx) {
940 		dma_release_channel(mcspi_dma->dma_rx);
941 		mcspi_dma->dma_rx = NULL;
942 		goto no_dma;
943 	}
944 
945 	return 0;
946 
947 no_dma:
948 	dev_warn(&spi->dev, "not using DMA for McSPI\n");
949 	return -EAGAIN;
950 }
951 
952 static int omap2_mcspi_setup(struct spi_device *spi)
953 {
954 	int			ret;
955 	struct omap2_mcspi	*mcspi = spi_master_get_devdata(spi->master);
956 	struct omap2_mcspi_regs	*ctx = &mcspi->ctx;
957 	struct omap2_mcspi_dma	*mcspi_dma;
958 	struct omap2_mcspi_cs	*cs = spi->controller_state;
959 
960 	mcspi_dma = &mcspi->dma_channels[spi->chip_select];
961 
962 	if (!cs) {
963 		cs = kzalloc(sizeof *cs, GFP_KERNEL);
964 		if (!cs)
965 			return -ENOMEM;
966 		cs->base = mcspi->base + spi->chip_select * 0x14;
967 		cs->phys = mcspi->phys + spi->chip_select * 0x14;
968 		cs->chconf0 = 0;
969 		spi->controller_state = cs;
970 		/* Link this to context save list */
971 		list_add_tail(&cs->node, &ctx->cs);
972 	}
973 
974 	if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx) {
975 		ret = omap2_mcspi_request_dma(spi);
976 		if (ret < 0 && ret != -EAGAIN)
977 			return ret;
978 	}
979 
980 	ret = pm_runtime_get_sync(mcspi->dev);
981 	if (ret < 0)
982 		return ret;
983 
984 	ret = omap2_mcspi_setup_transfer(spi, NULL);
985 	pm_runtime_mark_last_busy(mcspi->dev);
986 	pm_runtime_put_autosuspend(mcspi->dev);
987 
988 	return ret;
989 }
990 
991 static void omap2_mcspi_cleanup(struct spi_device *spi)
992 {
993 	struct omap2_mcspi	*mcspi;
994 	struct omap2_mcspi_dma	*mcspi_dma;
995 	struct omap2_mcspi_cs	*cs;
996 
997 	mcspi = spi_master_get_devdata(spi->master);
998 
999 	if (spi->controller_state) {
1000 		/* Unlink controller state from context save list */
1001 		cs = spi->controller_state;
1002 		list_del(&cs->node);
1003 
1004 		kfree(cs);
1005 	}
1006 
1007 	if (spi->chip_select < spi->master->num_chipselect) {
1008 		mcspi_dma = &mcspi->dma_channels[spi->chip_select];
1009 
1010 		if (mcspi_dma->dma_rx) {
1011 			dma_release_channel(mcspi_dma->dma_rx);
1012 			mcspi_dma->dma_rx = NULL;
1013 		}
1014 		if (mcspi_dma->dma_tx) {
1015 			dma_release_channel(mcspi_dma->dma_tx);
1016 			mcspi_dma->dma_tx = NULL;
1017 		}
1018 	}
1019 }
1020 
1021 static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m)
1022 {
1023 
1024 	/* We only enable one channel at a time -- the one whose message is
1025 	 * -- although this controller would gladly
1026 	 * arbitrate among multiple channels.  This corresponds to "single
1027 	 * channel" master mode.  As a side effect, we need to manage the
1028 	 * chipselect with the FORCE bit ... CS != channel enable.
1029 	 */
1030 
1031 	struct spi_device		*spi;
1032 	struct spi_transfer		*t = NULL;
1033 	struct spi_master		*master;
1034 	struct omap2_mcspi_dma		*mcspi_dma;
1035 	int				cs_active = 0;
1036 	struct omap2_mcspi_cs		*cs;
1037 	struct omap2_mcspi_device_config *cd;
1038 	int				par_override = 0;
1039 	int				status = 0;
1040 	u32				chconf;
1041 
1042 	spi = m->spi;
1043 	master = spi->master;
1044 	mcspi_dma = mcspi->dma_channels + spi->chip_select;
1045 	cs = spi->controller_state;
1046 	cd = spi->controller_data;
1047 
1048 	omap2_mcspi_set_enable(spi, 0);
1049 	list_for_each_entry(t, &m->transfers, transfer_list) {
1050 		if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) {
1051 			status = -EINVAL;
1052 			break;
1053 		}
1054 		if (par_override || t->speed_hz || t->bits_per_word) {
1055 			par_override = 1;
1056 			status = omap2_mcspi_setup_transfer(spi, t);
1057 			if (status < 0)
1058 				break;
1059 			if (!t->speed_hz && !t->bits_per_word)
1060 				par_override = 0;
1061 		}
1062 		if (cd && cd->cs_per_word) {
1063 			chconf = mcspi->ctx.modulctrl;
1064 			chconf &= ~OMAP2_MCSPI_MODULCTRL_SINGLE;
1065 			mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, chconf);
1066 			mcspi->ctx.modulctrl =
1067 				mcspi_read_cs_reg(spi, OMAP2_MCSPI_MODULCTRL);
1068 		}
1069 
1070 
1071 		if (!cs_active) {
1072 			omap2_mcspi_force_cs(spi, 1);
1073 			cs_active = 1;
1074 		}
1075 
1076 		chconf = mcspi_cached_chconf0(spi);
1077 		chconf &= ~OMAP2_MCSPI_CHCONF_TRM_MASK;
1078 		chconf &= ~OMAP2_MCSPI_CHCONF_TURBO;
1079 
1080 		if (t->tx_buf == NULL)
1081 			chconf |= OMAP2_MCSPI_CHCONF_TRM_RX_ONLY;
1082 		else if (t->rx_buf == NULL)
1083 			chconf |= OMAP2_MCSPI_CHCONF_TRM_TX_ONLY;
1084 
1085 		if (cd && cd->turbo_mode && t->tx_buf == NULL) {
1086 			/* Turbo mode is for more than one word */
1087 			if (t->len > ((cs->word_len + 7) >> 3))
1088 				chconf |= OMAP2_MCSPI_CHCONF_TURBO;
1089 		}
1090 
1091 		mcspi_write_chconf0(spi, chconf);
1092 
1093 		if (t->len) {
1094 			unsigned	count;
1095 
1096 			if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) &&
1097 			    (m->is_dma_mapped || t->len >= DMA_MIN_BYTES))
1098 				omap2_mcspi_set_fifo(spi, t, 1);
1099 
1100 			omap2_mcspi_set_enable(spi, 1);
1101 
1102 			/* RX_ONLY mode needs dummy data in TX reg */
1103 			if (t->tx_buf == NULL)
1104 				__raw_writel(0, cs->base
1105 						+ OMAP2_MCSPI_TX0);
1106 
1107 			if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) &&
1108 			    (m->is_dma_mapped || t->len >= DMA_MIN_BYTES))
1109 				count = omap2_mcspi_txrx_dma(spi, t);
1110 			else
1111 				count = omap2_mcspi_txrx_pio(spi, t);
1112 			m->actual_length += count;
1113 
1114 			if (count != t->len) {
1115 				status = -EIO;
1116 				break;
1117 			}
1118 		}
1119 
1120 		if (t->delay_usecs)
1121 			udelay(t->delay_usecs);
1122 
1123 		/* ignore the "leave it on after last xfer" hint */
1124 		if (t->cs_change) {
1125 			omap2_mcspi_force_cs(spi, 0);
1126 			cs_active = 0;
1127 		}
1128 
1129 		omap2_mcspi_set_enable(spi, 0);
1130 
1131 		if (mcspi->fifo_depth > 0)
1132 			omap2_mcspi_set_fifo(spi, t, 0);
1133 	}
1134 	/* Restore defaults if they were overriden */
1135 	if (par_override) {
1136 		par_override = 0;
1137 		status = omap2_mcspi_setup_transfer(spi, NULL);
1138 	}
1139 
1140 	if (cs_active)
1141 		omap2_mcspi_force_cs(spi, 0);
1142 
1143 	if (cd && cd->cs_per_word) {
1144 		chconf = mcspi->ctx.modulctrl;
1145 		chconf |= OMAP2_MCSPI_MODULCTRL_SINGLE;
1146 		mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, chconf);
1147 		mcspi->ctx.modulctrl =
1148 			mcspi_read_cs_reg(spi, OMAP2_MCSPI_MODULCTRL);
1149 	}
1150 
1151 	omap2_mcspi_set_enable(spi, 0);
1152 
1153 	if (mcspi->fifo_depth > 0 && t)
1154 		omap2_mcspi_set_fifo(spi, t, 0);
1155 
1156 	m->status = status;
1157 }
1158 
1159 static int omap2_mcspi_transfer_one_message(struct spi_master *master,
1160 		struct spi_message *m)
1161 {
1162 	struct spi_device	*spi;
1163 	struct omap2_mcspi	*mcspi;
1164 	struct omap2_mcspi_dma	*mcspi_dma;
1165 	struct spi_transfer	*t;
1166 
1167 	spi = m->spi;
1168 	mcspi = spi_master_get_devdata(master);
1169 	mcspi_dma = mcspi->dma_channels + spi->chip_select;
1170 	m->actual_length = 0;
1171 	m->status = 0;
1172 
1173 	/* reject invalid messages and transfers */
1174 	if (list_empty(&m->transfers))
1175 		return -EINVAL;
1176 	list_for_each_entry(t, &m->transfers, transfer_list) {
1177 		const void	*tx_buf = t->tx_buf;
1178 		void		*rx_buf = t->rx_buf;
1179 		unsigned	len = t->len;
1180 
1181 		if (t->speed_hz > OMAP2_MCSPI_MAX_FREQ
1182 				|| (len && !(rx_buf || tx_buf))) {
1183 			dev_dbg(mcspi->dev, "transfer: %d Hz, %d %s%s, %d bpw\n",
1184 					t->speed_hz,
1185 					len,
1186 					tx_buf ? "tx" : "",
1187 					rx_buf ? "rx" : "",
1188 					t->bits_per_word);
1189 			return -EINVAL;
1190 		}
1191 		if (t->speed_hz && t->speed_hz < (OMAP2_MCSPI_MAX_FREQ >> 15)) {
1192 			dev_dbg(mcspi->dev, "speed_hz %d below minimum %d Hz\n",
1193 					t->speed_hz,
1194 					OMAP2_MCSPI_MAX_FREQ >> 15);
1195 			return -EINVAL;
1196 		}
1197 
1198 		if (m->is_dma_mapped || len < DMA_MIN_BYTES)
1199 			continue;
1200 
1201 		if (mcspi_dma->dma_tx && tx_buf != NULL) {
1202 			t->tx_dma = dma_map_single(mcspi->dev, (void *) tx_buf,
1203 					len, DMA_TO_DEVICE);
1204 			if (dma_mapping_error(mcspi->dev, t->tx_dma)) {
1205 				dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
1206 						'T', len);
1207 				return -EINVAL;
1208 			}
1209 		}
1210 		if (mcspi_dma->dma_rx && rx_buf != NULL) {
1211 			t->rx_dma = dma_map_single(mcspi->dev, rx_buf, t->len,
1212 					DMA_FROM_DEVICE);
1213 			if (dma_mapping_error(mcspi->dev, t->rx_dma)) {
1214 				dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
1215 						'R', len);
1216 				if (tx_buf != NULL)
1217 					dma_unmap_single(mcspi->dev, t->tx_dma,
1218 							len, DMA_TO_DEVICE);
1219 				return -EINVAL;
1220 			}
1221 		}
1222 	}
1223 
1224 	omap2_mcspi_work(mcspi, m);
1225 	spi_finalize_current_message(master);
1226 	return 0;
1227 }
1228 
1229 static int omap2_mcspi_master_setup(struct omap2_mcspi *mcspi)
1230 {
1231 	struct spi_master	*master = mcspi->master;
1232 	struct omap2_mcspi_regs	*ctx = &mcspi->ctx;
1233 	int			ret = 0;
1234 
1235 	ret = pm_runtime_get_sync(mcspi->dev);
1236 	if (ret < 0)
1237 		return ret;
1238 
1239 	mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE,
1240 			OMAP2_MCSPI_WAKEUPENABLE_WKEN);
1241 	ctx->wakeupenable = OMAP2_MCSPI_WAKEUPENABLE_WKEN;
1242 
1243 	omap2_mcspi_set_master_mode(master);
1244 	pm_runtime_mark_last_busy(mcspi->dev);
1245 	pm_runtime_put_autosuspend(mcspi->dev);
1246 	return 0;
1247 }
1248 
1249 static int omap_mcspi_runtime_resume(struct device *dev)
1250 {
1251 	struct omap2_mcspi	*mcspi;
1252 	struct spi_master	*master;
1253 
1254 	master = dev_get_drvdata(dev);
1255 	mcspi = spi_master_get_devdata(master);
1256 	omap2_mcspi_restore_ctx(mcspi);
1257 
1258 	return 0;
1259 }
1260 
1261 static struct omap2_mcspi_platform_config omap2_pdata = {
1262 	.regs_offset = 0,
1263 };
1264 
1265 static struct omap2_mcspi_platform_config omap4_pdata = {
1266 	.regs_offset = OMAP4_MCSPI_REG_OFFSET,
1267 };
1268 
1269 static const struct of_device_id omap_mcspi_of_match[] = {
1270 	{
1271 		.compatible = "ti,omap2-mcspi",
1272 		.data = &omap2_pdata,
1273 	},
1274 	{
1275 		.compatible = "ti,omap4-mcspi",
1276 		.data = &omap4_pdata,
1277 	},
1278 	{ },
1279 };
1280 MODULE_DEVICE_TABLE(of, omap_mcspi_of_match);
1281 
1282 static int omap2_mcspi_probe(struct platform_device *pdev)
1283 {
1284 	struct spi_master	*master;
1285 	const struct omap2_mcspi_platform_config *pdata;
1286 	struct omap2_mcspi	*mcspi;
1287 	struct resource		*r;
1288 	int			status = 0, i;
1289 	u32			regs_offset = 0;
1290 	static int		bus_num = 1;
1291 	struct device_node	*node = pdev->dev.of_node;
1292 	const struct of_device_id *match;
1293 
1294 	master = spi_alloc_master(&pdev->dev, sizeof *mcspi);
1295 	if (master == NULL) {
1296 		dev_dbg(&pdev->dev, "master allocation failed\n");
1297 		return -ENOMEM;
1298 	}
1299 
1300 	/* the spi->mode bits understood by this driver: */
1301 	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
1302 	master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
1303 	master->setup = omap2_mcspi_setup;
1304 	master->auto_runtime_pm = true;
1305 	master->transfer_one_message = omap2_mcspi_transfer_one_message;
1306 	master->cleanup = omap2_mcspi_cleanup;
1307 	master->dev.of_node = node;
1308 
1309 	platform_set_drvdata(pdev, master);
1310 
1311 	mcspi = spi_master_get_devdata(master);
1312 	mcspi->master = master;
1313 
1314 	match = of_match_device(omap_mcspi_of_match, &pdev->dev);
1315 	if (match) {
1316 		u32 num_cs = 1; /* default number of chipselect */
1317 		pdata = match->data;
1318 
1319 		of_property_read_u32(node, "ti,spi-num-cs", &num_cs);
1320 		master->num_chipselect = num_cs;
1321 		master->bus_num = bus_num++;
1322 		if (of_get_property(node, "ti,pindir-d0-out-d1-in", NULL))
1323 			mcspi->pin_dir = MCSPI_PINDIR_D0_OUT_D1_IN;
1324 	} else {
1325 		pdata = dev_get_platdata(&pdev->dev);
1326 		master->num_chipselect = pdata->num_cs;
1327 		if (pdev->id != -1)
1328 			master->bus_num = pdev->id;
1329 		mcspi->pin_dir = pdata->pin_dir;
1330 	}
1331 	regs_offset = pdata->regs_offset;
1332 
1333 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1334 	if (r == NULL) {
1335 		status = -ENODEV;
1336 		goto free_master;
1337 	}
1338 
1339 	r->start += regs_offset;
1340 	r->end += regs_offset;
1341 	mcspi->phys = r->start;
1342 
1343 	mcspi->base = devm_ioremap_resource(&pdev->dev, r);
1344 	if (IS_ERR(mcspi->base)) {
1345 		status = PTR_ERR(mcspi->base);
1346 		goto free_master;
1347 	}
1348 
1349 	mcspi->dev = &pdev->dev;
1350 
1351 	INIT_LIST_HEAD(&mcspi->ctx.cs);
1352 
1353 	mcspi->dma_channels = kcalloc(master->num_chipselect,
1354 			sizeof(struct omap2_mcspi_dma),
1355 			GFP_KERNEL);
1356 
1357 	if (mcspi->dma_channels == NULL)
1358 		goto free_master;
1359 
1360 	for (i = 0; i < master->num_chipselect; i++) {
1361 		char *dma_rx_ch_name = mcspi->dma_channels[i].dma_rx_ch_name;
1362 		char *dma_tx_ch_name = mcspi->dma_channels[i].dma_tx_ch_name;
1363 		struct resource *dma_res;
1364 
1365 		sprintf(dma_rx_ch_name, "rx%d", i);
1366 		if (!pdev->dev.of_node) {
1367 			dma_res =
1368 				platform_get_resource_byname(pdev,
1369 							     IORESOURCE_DMA,
1370 							     dma_rx_ch_name);
1371 			if (!dma_res) {
1372 				dev_dbg(&pdev->dev,
1373 					"cannot get DMA RX channel\n");
1374 				status = -ENODEV;
1375 				break;
1376 			}
1377 
1378 			mcspi->dma_channels[i].dma_rx_sync_dev =
1379 				dma_res->start;
1380 		}
1381 		sprintf(dma_tx_ch_name, "tx%d", i);
1382 		if (!pdev->dev.of_node) {
1383 			dma_res =
1384 				platform_get_resource_byname(pdev,
1385 							     IORESOURCE_DMA,
1386 							     dma_tx_ch_name);
1387 			if (!dma_res) {
1388 				dev_dbg(&pdev->dev,
1389 					"cannot get DMA TX channel\n");
1390 				status = -ENODEV;
1391 				break;
1392 			}
1393 
1394 			mcspi->dma_channels[i].dma_tx_sync_dev =
1395 				dma_res->start;
1396 		}
1397 	}
1398 
1399 	if (status < 0)
1400 		goto dma_chnl_free;
1401 
1402 	pm_runtime_use_autosuspend(&pdev->dev);
1403 	pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
1404 	pm_runtime_enable(&pdev->dev);
1405 
1406 	status = omap2_mcspi_master_setup(mcspi);
1407 	if (status < 0)
1408 		goto disable_pm;
1409 
1410 	status = spi_register_master(master);
1411 	if (status < 0)
1412 		goto disable_pm;
1413 
1414 	return status;
1415 
1416 disable_pm:
1417 	pm_runtime_disable(&pdev->dev);
1418 dma_chnl_free:
1419 	kfree(mcspi->dma_channels);
1420 free_master:
1421 	spi_master_put(master);
1422 	return status;
1423 }
1424 
1425 static int omap2_mcspi_remove(struct platform_device *pdev)
1426 {
1427 	struct spi_master	*master;
1428 	struct omap2_mcspi	*mcspi;
1429 	struct omap2_mcspi_dma	*dma_channels;
1430 
1431 	master = platform_get_drvdata(pdev);
1432 	mcspi = spi_master_get_devdata(master);
1433 	dma_channels = mcspi->dma_channels;
1434 
1435 	pm_runtime_put_sync(mcspi->dev);
1436 	pm_runtime_disable(&pdev->dev);
1437 
1438 	spi_unregister_master(master);
1439 	kfree(dma_channels);
1440 
1441 	return 0;
1442 }
1443 
1444 /* work with hotplug and coldplug */
1445 MODULE_ALIAS("platform:omap2_mcspi");
1446 
1447 #ifdef	CONFIG_SUSPEND
1448 /*
1449  * When SPI wake up from off-mode, CS is in activate state. If it was in
1450  * unactive state when driver was suspend, then force it to unactive state at
1451  * wake up.
1452  */
1453 static int omap2_mcspi_resume(struct device *dev)
1454 {
1455 	struct spi_master	*master = dev_get_drvdata(dev);
1456 	struct omap2_mcspi	*mcspi = spi_master_get_devdata(master);
1457 	struct omap2_mcspi_regs	*ctx = &mcspi->ctx;
1458 	struct omap2_mcspi_cs	*cs;
1459 
1460 	pm_runtime_get_sync(mcspi->dev);
1461 	list_for_each_entry(cs, &ctx->cs, node) {
1462 		if ((cs->chconf0 & OMAP2_MCSPI_CHCONF_FORCE) == 0) {
1463 			/*
1464 			 * We need to toggle CS state for OMAP take this
1465 			 * change in account.
1466 			 */
1467 			cs->chconf0 |= OMAP2_MCSPI_CHCONF_FORCE;
1468 			__raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
1469 			cs->chconf0 &= ~OMAP2_MCSPI_CHCONF_FORCE;
1470 			__raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
1471 		}
1472 	}
1473 	pm_runtime_mark_last_busy(mcspi->dev);
1474 	pm_runtime_put_autosuspend(mcspi->dev);
1475 	return 0;
1476 }
1477 #else
1478 #define	omap2_mcspi_resume	NULL
1479 #endif
1480 
1481 static const struct dev_pm_ops omap2_mcspi_pm_ops = {
1482 	.resume = omap2_mcspi_resume,
1483 	.runtime_resume	= omap_mcspi_runtime_resume,
1484 };
1485 
1486 static struct platform_driver omap2_mcspi_driver = {
1487 	.driver = {
1488 		.name =		"omap2_mcspi",
1489 		.owner =	THIS_MODULE,
1490 		.pm =		&omap2_mcspi_pm_ops,
1491 		.of_match_table = omap_mcspi_of_match,
1492 	},
1493 	.probe =	omap2_mcspi_probe,
1494 	.remove =	omap2_mcspi_remove,
1495 };
1496 
1497 module_platform_driver(omap2_mcspi_driver);
1498 MODULE_LICENSE("GPL");
1499