xref: /openbmc/linux/drivers/spi/spi-omap2-mcspi.c (revision 95e9fd10)
1 /*
2  * OMAP2 McSPI controller driver
3  *
4  * Copyright (C) 2005, 2006 Nokia Corporation
5  * Author:	Samuel Ortiz <samuel.ortiz@nokia.com> and
6  *		Juha Yrj�l� <juha.yrjola@nokia.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21  *
22  */
23 
24 #include <linux/kernel.h>
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
27 #include <linux/module.h>
28 #include <linux/device.h>
29 #include <linux/delay.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/dmaengine.h>
32 #include <linux/omap-dma.h>
33 #include <linux/platform_device.h>
34 #include <linux/err.h>
35 #include <linux/clk.h>
36 #include <linux/io.h>
37 #include <linux/slab.h>
38 #include <linux/pm_runtime.h>
39 #include <linux/of.h>
40 #include <linux/of_device.h>
41 
42 #include <linux/spi/spi.h>
43 
44 #include <plat/clock.h>
45 #include <plat/mcspi.h>
46 
47 #define OMAP2_MCSPI_MAX_FREQ		48000000
48 #define SPI_AUTOSUSPEND_TIMEOUT		2000
49 
50 #define OMAP2_MCSPI_REVISION		0x00
51 #define OMAP2_MCSPI_SYSSTATUS		0x14
52 #define OMAP2_MCSPI_IRQSTATUS		0x18
53 #define OMAP2_MCSPI_IRQENABLE		0x1c
54 #define OMAP2_MCSPI_WAKEUPENABLE	0x20
55 #define OMAP2_MCSPI_SYST		0x24
56 #define OMAP2_MCSPI_MODULCTRL		0x28
57 
58 /* per-channel banks, 0x14 bytes each, first is: */
59 #define OMAP2_MCSPI_CHCONF0		0x2c
60 #define OMAP2_MCSPI_CHSTAT0		0x30
61 #define OMAP2_MCSPI_CHCTRL0		0x34
62 #define OMAP2_MCSPI_TX0			0x38
63 #define OMAP2_MCSPI_RX0			0x3c
64 
65 /* per-register bitmasks: */
66 
67 #define OMAP2_MCSPI_MODULCTRL_SINGLE	BIT(0)
68 #define OMAP2_MCSPI_MODULCTRL_MS	BIT(2)
69 #define OMAP2_MCSPI_MODULCTRL_STEST	BIT(3)
70 
71 #define OMAP2_MCSPI_CHCONF_PHA		BIT(0)
72 #define OMAP2_MCSPI_CHCONF_POL		BIT(1)
73 #define OMAP2_MCSPI_CHCONF_CLKD_MASK	(0x0f << 2)
74 #define OMAP2_MCSPI_CHCONF_EPOL		BIT(6)
75 #define OMAP2_MCSPI_CHCONF_WL_MASK	(0x1f << 7)
76 #define OMAP2_MCSPI_CHCONF_TRM_RX_ONLY	BIT(12)
77 #define OMAP2_MCSPI_CHCONF_TRM_TX_ONLY	BIT(13)
78 #define OMAP2_MCSPI_CHCONF_TRM_MASK	(0x03 << 12)
79 #define OMAP2_MCSPI_CHCONF_DMAW		BIT(14)
80 #define OMAP2_MCSPI_CHCONF_DMAR		BIT(15)
81 #define OMAP2_MCSPI_CHCONF_DPE0		BIT(16)
82 #define OMAP2_MCSPI_CHCONF_DPE1		BIT(17)
83 #define OMAP2_MCSPI_CHCONF_IS		BIT(18)
84 #define OMAP2_MCSPI_CHCONF_TURBO	BIT(19)
85 #define OMAP2_MCSPI_CHCONF_FORCE	BIT(20)
86 
87 #define OMAP2_MCSPI_CHSTAT_RXS		BIT(0)
88 #define OMAP2_MCSPI_CHSTAT_TXS		BIT(1)
89 #define OMAP2_MCSPI_CHSTAT_EOT		BIT(2)
90 
91 #define OMAP2_MCSPI_CHCTRL_EN		BIT(0)
92 
93 #define OMAP2_MCSPI_WAKEUPENABLE_WKEN	BIT(0)
94 
95 /* We have 2 DMA channels per CS, one for RX and one for TX */
96 struct omap2_mcspi_dma {
97 	struct dma_chan *dma_tx;
98 	struct dma_chan *dma_rx;
99 
100 	int dma_tx_sync_dev;
101 	int dma_rx_sync_dev;
102 
103 	struct completion dma_tx_completion;
104 	struct completion dma_rx_completion;
105 };
106 
107 /* use PIO for small transfers, avoiding DMA setup/teardown overhead and
108  * cache operations; better heuristics consider wordsize and bitrate.
109  */
110 #define DMA_MIN_BYTES			160
111 
112 
113 /*
114  * Used for context save and restore, structure members to be updated whenever
115  * corresponding registers are modified.
116  */
117 struct omap2_mcspi_regs {
118 	u32 modulctrl;
119 	u32 wakeupenable;
120 	struct list_head cs;
121 };
122 
123 struct omap2_mcspi {
124 	struct spi_master	*master;
125 	/* Virtual base address of the controller */
126 	void __iomem		*base;
127 	unsigned long		phys;
128 	/* SPI1 has 4 channels, while SPI2 has 2 */
129 	struct omap2_mcspi_dma	*dma_channels;
130 	struct device		*dev;
131 	struct omap2_mcspi_regs ctx;
132 };
133 
134 struct omap2_mcspi_cs {
135 	void __iomem		*base;
136 	unsigned long		phys;
137 	int			word_len;
138 	struct list_head	node;
139 	/* Context save and restore shadow register */
140 	u32			chconf0;
141 };
142 
143 #define MOD_REG_BIT(val, mask, set) do { \
144 	if (set) \
145 		val |= mask; \
146 	else \
147 		val &= ~mask; \
148 } while (0)
149 
150 static inline void mcspi_write_reg(struct spi_master *master,
151 		int idx, u32 val)
152 {
153 	struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
154 
155 	__raw_writel(val, mcspi->base + idx);
156 }
157 
158 static inline u32 mcspi_read_reg(struct spi_master *master, int idx)
159 {
160 	struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
161 
162 	return __raw_readl(mcspi->base + idx);
163 }
164 
165 static inline void mcspi_write_cs_reg(const struct spi_device *spi,
166 		int idx, u32 val)
167 {
168 	struct omap2_mcspi_cs	*cs = spi->controller_state;
169 
170 	__raw_writel(val, cs->base +  idx);
171 }
172 
173 static inline u32 mcspi_read_cs_reg(const struct spi_device *spi, int idx)
174 {
175 	struct omap2_mcspi_cs	*cs = spi->controller_state;
176 
177 	return __raw_readl(cs->base + idx);
178 }
179 
180 static inline u32 mcspi_cached_chconf0(const struct spi_device *spi)
181 {
182 	struct omap2_mcspi_cs *cs = spi->controller_state;
183 
184 	return cs->chconf0;
185 }
186 
187 static inline void mcspi_write_chconf0(const struct spi_device *spi, u32 val)
188 {
189 	struct omap2_mcspi_cs *cs = spi->controller_state;
190 
191 	cs->chconf0 = val;
192 	mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, val);
193 	mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0);
194 }
195 
196 static void omap2_mcspi_set_dma_req(const struct spi_device *spi,
197 		int is_read, int enable)
198 {
199 	u32 l, rw;
200 
201 	l = mcspi_cached_chconf0(spi);
202 
203 	if (is_read) /* 1 is read, 0 write */
204 		rw = OMAP2_MCSPI_CHCONF_DMAR;
205 	else
206 		rw = OMAP2_MCSPI_CHCONF_DMAW;
207 
208 	MOD_REG_BIT(l, rw, enable);
209 	mcspi_write_chconf0(spi, l);
210 }
211 
212 static void omap2_mcspi_set_enable(const struct spi_device *spi, int enable)
213 {
214 	u32 l;
215 
216 	l = enable ? OMAP2_MCSPI_CHCTRL_EN : 0;
217 	mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, l);
218 	/* Flash post-writes */
219 	mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCTRL0);
220 }
221 
222 static void omap2_mcspi_force_cs(struct spi_device *spi, int cs_active)
223 {
224 	u32 l;
225 
226 	l = mcspi_cached_chconf0(spi);
227 	MOD_REG_BIT(l, OMAP2_MCSPI_CHCONF_FORCE, cs_active);
228 	mcspi_write_chconf0(spi, l);
229 }
230 
231 static void omap2_mcspi_set_master_mode(struct spi_master *master)
232 {
233 	struct omap2_mcspi	*mcspi = spi_master_get_devdata(master);
234 	struct omap2_mcspi_regs	*ctx = &mcspi->ctx;
235 	u32 l;
236 
237 	/*
238 	 * Setup when switching from (reset default) slave mode
239 	 * to single-channel master mode
240 	 */
241 	l = mcspi_read_reg(master, OMAP2_MCSPI_MODULCTRL);
242 	MOD_REG_BIT(l, OMAP2_MCSPI_MODULCTRL_STEST, 0);
243 	MOD_REG_BIT(l, OMAP2_MCSPI_MODULCTRL_MS, 0);
244 	MOD_REG_BIT(l, OMAP2_MCSPI_MODULCTRL_SINGLE, 1);
245 	mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, l);
246 
247 	ctx->modulctrl = l;
248 }
249 
250 static void omap2_mcspi_restore_ctx(struct omap2_mcspi *mcspi)
251 {
252 	struct spi_master	*spi_cntrl = mcspi->master;
253 	struct omap2_mcspi_regs	*ctx = &mcspi->ctx;
254 	struct omap2_mcspi_cs	*cs;
255 
256 	/* McSPI: context restore */
257 	mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_MODULCTRL, ctx->modulctrl);
258 	mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_WAKEUPENABLE, ctx->wakeupenable);
259 
260 	list_for_each_entry(cs, &ctx->cs, node)
261 		__raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
262 }
263 static void omap2_mcspi_disable_clocks(struct omap2_mcspi *mcspi)
264 {
265 	pm_runtime_mark_last_busy(mcspi->dev);
266 	pm_runtime_put_autosuspend(mcspi->dev);
267 }
268 
269 static int omap2_mcspi_enable_clocks(struct omap2_mcspi *mcspi)
270 {
271 	return pm_runtime_get_sync(mcspi->dev);
272 }
273 
274 static int omap2_prepare_transfer(struct spi_master *master)
275 {
276 	struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
277 
278 	pm_runtime_get_sync(mcspi->dev);
279 	return 0;
280 }
281 
282 static int omap2_unprepare_transfer(struct spi_master *master)
283 {
284 	struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
285 
286 	pm_runtime_mark_last_busy(mcspi->dev);
287 	pm_runtime_put_autosuspend(mcspi->dev);
288 	return 0;
289 }
290 
291 static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
292 {
293 	unsigned long timeout;
294 
295 	timeout = jiffies + msecs_to_jiffies(1000);
296 	while (!(__raw_readl(reg) & bit)) {
297 		if (time_after(jiffies, timeout))
298 			return -1;
299 		cpu_relax();
300 	}
301 	return 0;
302 }
303 
304 static void omap2_mcspi_rx_callback(void *data)
305 {
306 	struct spi_device *spi = data;
307 	struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
308 	struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select];
309 
310 	complete(&mcspi_dma->dma_rx_completion);
311 
312 	/* We must disable the DMA RX request */
313 	omap2_mcspi_set_dma_req(spi, 1, 0);
314 }
315 
316 static void omap2_mcspi_tx_callback(void *data)
317 {
318 	struct spi_device *spi = data;
319 	struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
320 	struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select];
321 
322 	complete(&mcspi_dma->dma_tx_completion);
323 
324 	/* We must disable the DMA TX request */
325 	omap2_mcspi_set_dma_req(spi, 0, 0);
326 }
327 
328 static unsigned
329 omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
330 {
331 	struct omap2_mcspi	*mcspi;
332 	struct omap2_mcspi_cs	*cs = spi->controller_state;
333 	struct omap2_mcspi_dma  *mcspi_dma;
334 	unsigned int		count;
335 	int			word_len, element_count;
336 	int			elements = 0;
337 	u32			l;
338 	u8			* rx;
339 	const u8		* tx;
340 	void __iomem		*chstat_reg;
341 	struct dma_slave_config	cfg;
342 	enum dma_slave_buswidth width;
343 	unsigned es;
344 
345 	mcspi = spi_master_get_devdata(spi->master);
346 	mcspi_dma = &mcspi->dma_channels[spi->chip_select];
347 	l = mcspi_cached_chconf0(spi);
348 
349 	chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
350 
351 	if (cs->word_len <= 8) {
352 		width = DMA_SLAVE_BUSWIDTH_1_BYTE;
353 		es = 1;
354 	} else if (cs->word_len <= 16) {
355 		width = DMA_SLAVE_BUSWIDTH_2_BYTES;
356 		es = 2;
357 	} else {
358 		width = DMA_SLAVE_BUSWIDTH_4_BYTES;
359 		es = 4;
360 	}
361 
362 	memset(&cfg, 0, sizeof(cfg));
363 	cfg.src_addr = cs->phys + OMAP2_MCSPI_RX0;
364 	cfg.dst_addr = cs->phys + OMAP2_MCSPI_TX0;
365 	cfg.src_addr_width = width;
366 	cfg.dst_addr_width = width;
367 	cfg.src_maxburst = 1;
368 	cfg.dst_maxburst = 1;
369 
370 	if (xfer->tx_buf && mcspi_dma->dma_tx) {
371 		struct dma_async_tx_descriptor *tx;
372 		struct scatterlist sg;
373 
374 		dmaengine_slave_config(mcspi_dma->dma_tx, &cfg);
375 
376 		sg_init_table(&sg, 1);
377 		sg_dma_address(&sg) = xfer->tx_dma;
378 		sg_dma_len(&sg) = xfer->len;
379 
380 		tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, &sg, 1,
381 			DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
382 		if (tx) {
383 			tx->callback = omap2_mcspi_tx_callback;
384 			tx->callback_param = spi;
385 			dmaengine_submit(tx);
386 		} else {
387 			/* FIXME: fall back to PIO? */
388 		}
389 	}
390 
391 	if (xfer->rx_buf && mcspi_dma->dma_rx) {
392 		struct dma_async_tx_descriptor *tx;
393 		struct scatterlist sg;
394 		size_t len = xfer->len - es;
395 
396 		dmaengine_slave_config(mcspi_dma->dma_rx, &cfg);
397 
398 		if (l & OMAP2_MCSPI_CHCONF_TURBO)
399 			len -= es;
400 
401 		sg_init_table(&sg, 1);
402 		sg_dma_address(&sg) = xfer->rx_dma;
403 		sg_dma_len(&sg) = len;
404 
405 		tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, &sg, 1,
406 			DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
407 		if (tx) {
408 			tx->callback = omap2_mcspi_rx_callback;
409 			tx->callback_param = spi;
410 			dmaengine_submit(tx);
411 		} else {
412 			/* FIXME: fall back to PIO? */
413 		}
414 	}
415 
416 	count = xfer->len;
417 	word_len = cs->word_len;
418 
419 	rx = xfer->rx_buf;
420 	tx = xfer->tx_buf;
421 
422 	if (word_len <= 8) {
423 		element_count = count;
424 	} else if (word_len <= 16) {
425 		element_count = count >> 1;
426 	} else /* word_len <= 32 */ {
427 		element_count = count >> 2;
428 	}
429 
430 	if (tx != NULL) {
431 		dma_async_issue_pending(mcspi_dma->dma_tx);
432 		omap2_mcspi_set_dma_req(spi, 0, 1);
433 	}
434 
435 	if (rx != NULL) {
436 		dma_async_issue_pending(mcspi_dma->dma_rx);
437 		omap2_mcspi_set_dma_req(spi, 1, 1);
438 	}
439 
440 	if (tx != NULL) {
441 		wait_for_completion(&mcspi_dma->dma_tx_completion);
442 		dma_unmap_single(mcspi->dev, xfer->tx_dma, count,
443 				 DMA_TO_DEVICE);
444 
445 		/* for TX_ONLY mode, be sure all words have shifted out */
446 		if (rx == NULL) {
447 			if (mcspi_wait_for_reg_bit(chstat_reg,
448 						OMAP2_MCSPI_CHSTAT_TXS) < 0)
449 				dev_err(&spi->dev, "TXS timed out\n");
450 			else if (mcspi_wait_for_reg_bit(chstat_reg,
451 						OMAP2_MCSPI_CHSTAT_EOT) < 0)
452 				dev_err(&spi->dev, "EOT timed out\n");
453 		}
454 	}
455 
456 	if (rx != NULL) {
457 		wait_for_completion(&mcspi_dma->dma_rx_completion);
458 		dma_unmap_single(mcspi->dev, xfer->rx_dma, count,
459 				 DMA_FROM_DEVICE);
460 		omap2_mcspi_set_enable(spi, 0);
461 
462 		elements = element_count - 1;
463 
464 		if (l & OMAP2_MCSPI_CHCONF_TURBO) {
465 			elements--;
466 
467 			if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0)
468 				   & OMAP2_MCSPI_CHSTAT_RXS)) {
469 				u32 w;
470 
471 				w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
472 				if (word_len <= 8)
473 					((u8 *)xfer->rx_buf)[elements++] = w;
474 				else if (word_len <= 16)
475 					((u16 *)xfer->rx_buf)[elements++] = w;
476 				else /* word_len <= 32 */
477 					((u32 *)xfer->rx_buf)[elements++] = w;
478 			} else {
479 				dev_err(&spi->dev,
480 					"DMA RX penultimate word empty");
481 				count -= (word_len <= 8)  ? 2 :
482 					(word_len <= 16) ? 4 :
483 					/* word_len <= 32 */ 8;
484 				omap2_mcspi_set_enable(spi, 1);
485 				return count;
486 			}
487 		}
488 
489 		if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0)
490 				& OMAP2_MCSPI_CHSTAT_RXS)) {
491 			u32 w;
492 
493 			w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
494 			if (word_len <= 8)
495 				((u8 *)xfer->rx_buf)[elements] = w;
496 			else if (word_len <= 16)
497 				((u16 *)xfer->rx_buf)[elements] = w;
498 			else /* word_len <= 32 */
499 				((u32 *)xfer->rx_buf)[elements] = w;
500 		} else {
501 			dev_err(&spi->dev, "DMA RX last word empty");
502 			count -= (word_len <= 8)  ? 1 :
503 				 (word_len <= 16) ? 2 :
504 			       /* word_len <= 32 */ 4;
505 		}
506 		omap2_mcspi_set_enable(spi, 1);
507 	}
508 	return count;
509 }
510 
511 static unsigned
512 omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
513 {
514 	struct omap2_mcspi	*mcspi;
515 	struct omap2_mcspi_cs	*cs = spi->controller_state;
516 	unsigned int		count, c;
517 	u32			l;
518 	void __iomem		*base = cs->base;
519 	void __iomem		*tx_reg;
520 	void __iomem		*rx_reg;
521 	void __iomem		*chstat_reg;
522 	int			word_len;
523 
524 	mcspi = spi_master_get_devdata(spi->master);
525 	count = xfer->len;
526 	c = count;
527 	word_len = cs->word_len;
528 
529 	l = mcspi_cached_chconf0(spi);
530 
531 	/* We store the pre-calculated register addresses on stack to speed
532 	 * up the transfer loop. */
533 	tx_reg		= base + OMAP2_MCSPI_TX0;
534 	rx_reg		= base + OMAP2_MCSPI_RX0;
535 	chstat_reg	= base + OMAP2_MCSPI_CHSTAT0;
536 
537 	if (c < (word_len>>3))
538 		return 0;
539 
540 	if (word_len <= 8) {
541 		u8		*rx;
542 		const u8	*tx;
543 
544 		rx = xfer->rx_buf;
545 		tx = xfer->tx_buf;
546 
547 		do {
548 			c -= 1;
549 			if (tx != NULL) {
550 				if (mcspi_wait_for_reg_bit(chstat_reg,
551 						OMAP2_MCSPI_CHSTAT_TXS) < 0) {
552 					dev_err(&spi->dev, "TXS timed out\n");
553 					goto out;
554 				}
555 				dev_vdbg(&spi->dev, "write-%d %02x\n",
556 						word_len, *tx);
557 				__raw_writel(*tx++, tx_reg);
558 			}
559 			if (rx != NULL) {
560 				if (mcspi_wait_for_reg_bit(chstat_reg,
561 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
562 					dev_err(&spi->dev, "RXS timed out\n");
563 					goto out;
564 				}
565 
566 				if (c == 1 && tx == NULL &&
567 				    (l & OMAP2_MCSPI_CHCONF_TURBO)) {
568 					omap2_mcspi_set_enable(spi, 0);
569 					*rx++ = __raw_readl(rx_reg);
570 					dev_vdbg(&spi->dev, "read-%d %02x\n",
571 						    word_len, *(rx - 1));
572 					if (mcspi_wait_for_reg_bit(chstat_reg,
573 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
574 						dev_err(&spi->dev,
575 							"RXS timed out\n");
576 						goto out;
577 					}
578 					c = 0;
579 				} else if (c == 0 && tx == NULL) {
580 					omap2_mcspi_set_enable(spi, 0);
581 				}
582 
583 				*rx++ = __raw_readl(rx_reg);
584 				dev_vdbg(&spi->dev, "read-%d %02x\n",
585 						word_len, *(rx - 1));
586 			}
587 		} while (c);
588 	} else if (word_len <= 16) {
589 		u16		*rx;
590 		const u16	*tx;
591 
592 		rx = xfer->rx_buf;
593 		tx = xfer->tx_buf;
594 		do {
595 			c -= 2;
596 			if (tx != NULL) {
597 				if (mcspi_wait_for_reg_bit(chstat_reg,
598 						OMAP2_MCSPI_CHSTAT_TXS) < 0) {
599 					dev_err(&spi->dev, "TXS timed out\n");
600 					goto out;
601 				}
602 				dev_vdbg(&spi->dev, "write-%d %04x\n",
603 						word_len, *tx);
604 				__raw_writel(*tx++, tx_reg);
605 			}
606 			if (rx != NULL) {
607 				if (mcspi_wait_for_reg_bit(chstat_reg,
608 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
609 					dev_err(&spi->dev, "RXS timed out\n");
610 					goto out;
611 				}
612 
613 				if (c == 2 && tx == NULL &&
614 				    (l & OMAP2_MCSPI_CHCONF_TURBO)) {
615 					omap2_mcspi_set_enable(spi, 0);
616 					*rx++ = __raw_readl(rx_reg);
617 					dev_vdbg(&spi->dev, "read-%d %04x\n",
618 						    word_len, *(rx - 1));
619 					if (mcspi_wait_for_reg_bit(chstat_reg,
620 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
621 						dev_err(&spi->dev,
622 							"RXS timed out\n");
623 						goto out;
624 					}
625 					c = 0;
626 				} else if (c == 0 && tx == NULL) {
627 					omap2_mcspi_set_enable(spi, 0);
628 				}
629 
630 				*rx++ = __raw_readl(rx_reg);
631 				dev_vdbg(&spi->dev, "read-%d %04x\n",
632 						word_len, *(rx - 1));
633 			}
634 		} while (c >= 2);
635 	} else if (word_len <= 32) {
636 		u32		*rx;
637 		const u32	*tx;
638 
639 		rx = xfer->rx_buf;
640 		tx = xfer->tx_buf;
641 		do {
642 			c -= 4;
643 			if (tx != NULL) {
644 				if (mcspi_wait_for_reg_bit(chstat_reg,
645 						OMAP2_MCSPI_CHSTAT_TXS) < 0) {
646 					dev_err(&spi->dev, "TXS timed out\n");
647 					goto out;
648 				}
649 				dev_vdbg(&spi->dev, "write-%d %08x\n",
650 						word_len, *tx);
651 				__raw_writel(*tx++, tx_reg);
652 			}
653 			if (rx != NULL) {
654 				if (mcspi_wait_for_reg_bit(chstat_reg,
655 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
656 					dev_err(&spi->dev, "RXS timed out\n");
657 					goto out;
658 				}
659 
660 				if (c == 4 && tx == NULL &&
661 				    (l & OMAP2_MCSPI_CHCONF_TURBO)) {
662 					omap2_mcspi_set_enable(spi, 0);
663 					*rx++ = __raw_readl(rx_reg);
664 					dev_vdbg(&spi->dev, "read-%d %08x\n",
665 						    word_len, *(rx - 1));
666 					if (mcspi_wait_for_reg_bit(chstat_reg,
667 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
668 						dev_err(&spi->dev,
669 							"RXS timed out\n");
670 						goto out;
671 					}
672 					c = 0;
673 				} else if (c == 0 && tx == NULL) {
674 					omap2_mcspi_set_enable(spi, 0);
675 				}
676 
677 				*rx++ = __raw_readl(rx_reg);
678 				dev_vdbg(&spi->dev, "read-%d %08x\n",
679 						word_len, *(rx - 1));
680 			}
681 		} while (c >= 4);
682 	}
683 
684 	/* for TX_ONLY mode, be sure all words have shifted out */
685 	if (xfer->rx_buf == NULL) {
686 		if (mcspi_wait_for_reg_bit(chstat_reg,
687 				OMAP2_MCSPI_CHSTAT_TXS) < 0) {
688 			dev_err(&spi->dev, "TXS timed out\n");
689 		} else if (mcspi_wait_for_reg_bit(chstat_reg,
690 				OMAP2_MCSPI_CHSTAT_EOT) < 0)
691 			dev_err(&spi->dev, "EOT timed out\n");
692 
693 		/* disable chan to purge rx datas received in TX_ONLY transfer,
694 		 * otherwise these rx datas will affect the direct following
695 		 * RX_ONLY transfer.
696 		 */
697 		omap2_mcspi_set_enable(spi, 0);
698 	}
699 out:
700 	omap2_mcspi_set_enable(spi, 1);
701 	return count - c;
702 }
703 
704 static u32 omap2_mcspi_calc_divisor(u32 speed_hz)
705 {
706 	u32 div;
707 
708 	for (div = 0; div < 15; div++)
709 		if (speed_hz >= (OMAP2_MCSPI_MAX_FREQ >> div))
710 			return div;
711 
712 	return 15;
713 }
714 
715 /* called only when no transfer is active to this device */
716 static int omap2_mcspi_setup_transfer(struct spi_device *spi,
717 		struct spi_transfer *t)
718 {
719 	struct omap2_mcspi_cs *cs = spi->controller_state;
720 	struct omap2_mcspi *mcspi;
721 	struct spi_master *spi_cntrl;
722 	u32 l = 0, div = 0;
723 	u8 word_len = spi->bits_per_word;
724 	u32 speed_hz = spi->max_speed_hz;
725 
726 	mcspi = spi_master_get_devdata(spi->master);
727 	spi_cntrl = mcspi->master;
728 
729 	if (t != NULL && t->bits_per_word)
730 		word_len = t->bits_per_word;
731 
732 	cs->word_len = word_len;
733 
734 	if (t && t->speed_hz)
735 		speed_hz = t->speed_hz;
736 
737 	speed_hz = min_t(u32, speed_hz, OMAP2_MCSPI_MAX_FREQ);
738 	div = omap2_mcspi_calc_divisor(speed_hz);
739 
740 	l = mcspi_cached_chconf0(spi);
741 
742 	/* standard 4-wire master mode:  SCK, MOSI/out, MISO/in, nCS
743 	 * REVISIT: this controller could support SPI_3WIRE mode.
744 	 */
745 	l &= ~(OMAP2_MCSPI_CHCONF_IS|OMAP2_MCSPI_CHCONF_DPE1);
746 	l |= OMAP2_MCSPI_CHCONF_DPE0;
747 
748 	/* wordlength */
749 	l &= ~OMAP2_MCSPI_CHCONF_WL_MASK;
750 	l |= (word_len - 1) << 7;
751 
752 	/* set chipselect polarity; manage with FORCE */
753 	if (!(spi->mode & SPI_CS_HIGH))
754 		l |= OMAP2_MCSPI_CHCONF_EPOL;	/* active-low; normal */
755 	else
756 		l &= ~OMAP2_MCSPI_CHCONF_EPOL;
757 
758 	/* set clock divisor */
759 	l &= ~OMAP2_MCSPI_CHCONF_CLKD_MASK;
760 	l |= div << 2;
761 
762 	/* set SPI mode 0..3 */
763 	if (spi->mode & SPI_CPOL)
764 		l |= OMAP2_MCSPI_CHCONF_POL;
765 	else
766 		l &= ~OMAP2_MCSPI_CHCONF_POL;
767 	if (spi->mode & SPI_CPHA)
768 		l |= OMAP2_MCSPI_CHCONF_PHA;
769 	else
770 		l &= ~OMAP2_MCSPI_CHCONF_PHA;
771 
772 	mcspi_write_chconf0(spi, l);
773 
774 	dev_dbg(&spi->dev, "setup: speed %d, sample %s edge, clk %s\n",
775 			OMAP2_MCSPI_MAX_FREQ >> div,
776 			(spi->mode & SPI_CPHA) ? "trailing" : "leading",
777 			(spi->mode & SPI_CPOL) ? "inverted" : "normal");
778 
779 	return 0;
780 }
781 
782 static int omap2_mcspi_request_dma(struct spi_device *spi)
783 {
784 	struct spi_master	*master = spi->master;
785 	struct omap2_mcspi	*mcspi;
786 	struct omap2_mcspi_dma	*mcspi_dma;
787 	dma_cap_mask_t mask;
788 	unsigned sig;
789 
790 	mcspi = spi_master_get_devdata(master);
791 	mcspi_dma = mcspi->dma_channels + spi->chip_select;
792 
793 	init_completion(&mcspi_dma->dma_rx_completion);
794 	init_completion(&mcspi_dma->dma_tx_completion);
795 
796 	dma_cap_zero(mask);
797 	dma_cap_set(DMA_SLAVE, mask);
798 	sig = mcspi_dma->dma_rx_sync_dev;
799 	mcspi_dma->dma_rx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
800 	if (!mcspi_dma->dma_rx) {
801 		dev_err(&spi->dev, "no RX DMA engine channel for McSPI\n");
802 		return -EAGAIN;
803 	}
804 
805 	sig = mcspi_dma->dma_tx_sync_dev;
806 	mcspi_dma->dma_tx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
807 	if (!mcspi_dma->dma_tx) {
808 		dev_err(&spi->dev, "no TX DMA engine channel for McSPI\n");
809 		dma_release_channel(mcspi_dma->dma_rx);
810 		mcspi_dma->dma_rx = NULL;
811 		return -EAGAIN;
812 	}
813 
814 	return 0;
815 }
816 
817 static int omap2_mcspi_setup(struct spi_device *spi)
818 {
819 	int			ret;
820 	struct omap2_mcspi	*mcspi = spi_master_get_devdata(spi->master);
821 	struct omap2_mcspi_regs	*ctx = &mcspi->ctx;
822 	struct omap2_mcspi_dma	*mcspi_dma;
823 	struct omap2_mcspi_cs	*cs = spi->controller_state;
824 
825 	if (spi->bits_per_word < 4 || spi->bits_per_word > 32) {
826 		dev_dbg(&spi->dev, "setup: unsupported %d bit words\n",
827 			spi->bits_per_word);
828 		return -EINVAL;
829 	}
830 
831 	mcspi_dma = &mcspi->dma_channels[spi->chip_select];
832 
833 	if (!cs) {
834 		cs = kzalloc(sizeof *cs, GFP_KERNEL);
835 		if (!cs)
836 			return -ENOMEM;
837 		cs->base = mcspi->base + spi->chip_select * 0x14;
838 		cs->phys = mcspi->phys + spi->chip_select * 0x14;
839 		cs->chconf0 = 0;
840 		spi->controller_state = cs;
841 		/* Link this to context save list */
842 		list_add_tail(&cs->node, &ctx->cs);
843 	}
844 
845 	if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx) {
846 		ret = omap2_mcspi_request_dma(spi);
847 		if (ret < 0)
848 			return ret;
849 	}
850 
851 	ret = omap2_mcspi_enable_clocks(mcspi);
852 	if (ret < 0)
853 		return ret;
854 
855 	ret = omap2_mcspi_setup_transfer(spi, NULL);
856 	omap2_mcspi_disable_clocks(mcspi);
857 
858 	return ret;
859 }
860 
861 static void omap2_mcspi_cleanup(struct spi_device *spi)
862 {
863 	struct omap2_mcspi	*mcspi;
864 	struct omap2_mcspi_dma	*mcspi_dma;
865 	struct omap2_mcspi_cs	*cs;
866 
867 	mcspi = spi_master_get_devdata(spi->master);
868 
869 	if (spi->controller_state) {
870 		/* Unlink controller state from context save list */
871 		cs = spi->controller_state;
872 		list_del(&cs->node);
873 
874 		kfree(cs);
875 	}
876 
877 	if (spi->chip_select < spi->master->num_chipselect) {
878 		mcspi_dma = &mcspi->dma_channels[spi->chip_select];
879 
880 		if (mcspi_dma->dma_rx) {
881 			dma_release_channel(mcspi_dma->dma_rx);
882 			mcspi_dma->dma_rx = NULL;
883 		}
884 		if (mcspi_dma->dma_tx) {
885 			dma_release_channel(mcspi_dma->dma_tx);
886 			mcspi_dma->dma_tx = NULL;
887 		}
888 	}
889 }
890 
891 static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m)
892 {
893 
894 	/* We only enable one channel at a time -- the one whose message is
895 	 * -- although this controller would gladly
896 	 * arbitrate among multiple channels.  This corresponds to "single
897 	 * channel" master mode.  As a side effect, we need to manage the
898 	 * chipselect with the FORCE bit ... CS != channel enable.
899 	 */
900 
901 	struct spi_device		*spi;
902 	struct spi_transfer		*t = NULL;
903 	int				cs_active = 0;
904 	struct omap2_mcspi_cs		*cs;
905 	struct omap2_mcspi_device_config *cd;
906 	int				par_override = 0;
907 	int				status = 0;
908 	u32				chconf;
909 
910 	spi = m->spi;
911 	cs = spi->controller_state;
912 	cd = spi->controller_data;
913 
914 	omap2_mcspi_set_enable(spi, 1);
915 	list_for_each_entry(t, &m->transfers, transfer_list) {
916 		if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) {
917 			status = -EINVAL;
918 			break;
919 		}
920 		if (par_override || t->speed_hz || t->bits_per_word) {
921 			par_override = 1;
922 			status = omap2_mcspi_setup_transfer(spi, t);
923 			if (status < 0)
924 				break;
925 			if (!t->speed_hz && !t->bits_per_word)
926 				par_override = 0;
927 		}
928 
929 		if (!cs_active) {
930 			omap2_mcspi_force_cs(spi, 1);
931 			cs_active = 1;
932 		}
933 
934 		chconf = mcspi_cached_chconf0(spi);
935 		chconf &= ~OMAP2_MCSPI_CHCONF_TRM_MASK;
936 		chconf &= ~OMAP2_MCSPI_CHCONF_TURBO;
937 
938 		if (t->tx_buf == NULL)
939 			chconf |= OMAP2_MCSPI_CHCONF_TRM_RX_ONLY;
940 		else if (t->rx_buf == NULL)
941 			chconf |= OMAP2_MCSPI_CHCONF_TRM_TX_ONLY;
942 
943 		if (cd && cd->turbo_mode && t->tx_buf == NULL) {
944 			/* Turbo mode is for more than one word */
945 			if (t->len > ((cs->word_len + 7) >> 3))
946 				chconf |= OMAP2_MCSPI_CHCONF_TURBO;
947 		}
948 
949 		mcspi_write_chconf0(spi, chconf);
950 
951 		if (t->len) {
952 			unsigned	count;
953 
954 			/* RX_ONLY mode needs dummy data in TX reg */
955 			if (t->tx_buf == NULL)
956 				__raw_writel(0, cs->base
957 						+ OMAP2_MCSPI_TX0);
958 
959 			if (m->is_dma_mapped || t->len >= DMA_MIN_BYTES)
960 				count = omap2_mcspi_txrx_dma(spi, t);
961 			else
962 				count = omap2_mcspi_txrx_pio(spi, t);
963 			m->actual_length += count;
964 
965 			if (count != t->len) {
966 				status = -EIO;
967 				break;
968 			}
969 		}
970 
971 		if (t->delay_usecs)
972 			udelay(t->delay_usecs);
973 
974 		/* ignore the "leave it on after last xfer" hint */
975 		if (t->cs_change) {
976 			omap2_mcspi_force_cs(spi, 0);
977 			cs_active = 0;
978 		}
979 	}
980 	/* Restore defaults if they were overriden */
981 	if (par_override) {
982 		par_override = 0;
983 		status = omap2_mcspi_setup_transfer(spi, NULL);
984 	}
985 
986 	if (cs_active)
987 		omap2_mcspi_force_cs(spi, 0);
988 
989 	omap2_mcspi_set_enable(spi, 0);
990 
991 	m->status = status;
992 
993 }
994 
995 static int omap2_mcspi_transfer_one_message(struct spi_master *master,
996 						struct spi_message *m)
997 {
998 	struct omap2_mcspi	*mcspi;
999 	struct spi_transfer	*t;
1000 
1001 	mcspi = spi_master_get_devdata(master);
1002 	m->actual_length = 0;
1003 	m->status = 0;
1004 
1005 	/* reject invalid messages and transfers */
1006 	if (list_empty(&m->transfers))
1007 		return -EINVAL;
1008 	list_for_each_entry(t, &m->transfers, transfer_list) {
1009 		const void	*tx_buf = t->tx_buf;
1010 		void		*rx_buf = t->rx_buf;
1011 		unsigned	len = t->len;
1012 
1013 		if (t->speed_hz > OMAP2_MCSPI_MAX_FREQ
1014 				|| (len && !(rx_buf || tx_buf))
1015 				|| (t->bits_per_word &&
1016 					(  t->bits_per_word < 4
1017 					|| t->bits_per_word > 32))) {
1018 			dev_dbg(mcspi->dev, "transfer: %d Hz, %d %s%s, %d bpw\n",
1019 					t->speed_hz,
1020 					len,
1021 					tx_buf ? "tx" : "",
1022 					rx_buf ? "rx" : "",
1023 					t->bits_per_word);
1024 			return -EINVAL;
1025 		}
1026 		if (t->speed_hz && t->speed_hz < (OMAP2_MCSPI_MAX_FREQ >> 15)) {
1027 			dev_dbg(mcspi->dev, "speed_hz %d below minimum %d Hz\n",
1028 				t->speed_hz,
1029 				OMAP2_MCSPI_MAX_FREQ >> 15);
1030 			return -EINVAL;
1031 		}
1032 
1033 		if (m->is_dma_mapped || len < DMA_MIN_BYTES)
1034 			continue;
1035 
1036 		if (tx_buf != NULL) {
1037 			t->tx_dma = dma_map_single(mcspi->dev, (void *) tx_buf,
1038 					len, DMA_TO_DEVICE);
1039 			if (dma_mapping_error(mcspi->dev, t->tx_dma)) {
1040 				dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
1041 						'T', len);
1042 				return -EINVAL;
1043 			}
1044 		}
1045 		if (rx_buf != NULL) {
1046 			t->rx_dma = dma_map_single(mcspi->dev, rx_buf, t->len,
1047 					DMA_FROM_DEVICE);
1048 			if (dma_mapping_error(mcspi->dev, t->rx_dma)) {
1049 				dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
1050 						'R', len);
1051 				if (tx_buf != NULL)
1052 					dma_unmap_single(mcspi->dev, t->tx_dma,
1053 							len, DMA_TO_DEVICE);
1054 				return -EINVAL;
1055 			}
1056 		}
1057 	}
1058 
1059 	omap2_mcspi_work(mcspi, m);
1060 	spi_finalize_current_message(master);
1061 	return 0;
1062 }
1063 
1064 static int __devinit omap2_mcspi_master_setup(struct omap2_mcspi *mcspi)
1065 {
1066 	struct spi_master	*master = mcspi->master;
1067 	struct omap2_mcspi_regs	*ctx = &mcspi->ctx;
1068 	int			ret = 0;
1069 
1070 	ret = omap2_mcspi_enable_clocks(mcspi);
1071 	if (ret < 0)
1072 		return ret;
1073 
1074 	mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE,
1075 				OMAP2_MCSPI_WAKEUPENABLE_WKEN);
1076 	ctx->wakeupenable = OMAP2_MCSPI_WAKEUPENABLE_WKEN;
1077 
1078 	omap2_mcspi_set_master_mode(master);
1079 	omap2_mcspi_disable_clocks(mcspi);
1080 	return 0;
1081 }
1082 
1083 static int omap_mcspi_runtime_resume(struct device *dev)
1084 {
1085 	struct omap2_mcspi	*mcspi;
1086 	struct spi_master	*master;
1087 
1088 	master = dev_get_drvdata(dev);
1089 	mcspi = spi_master_get_devdata(master);
1090 	omap2_mcspi_restore_ctx(mcspi);
1091 
1092 	return 0;
1093 }
1094 
1095 static struct omap2_mcspi_platform_config omap2_pdata = {
1096 	.regs_offset = 0,
1097 };
1098 
1099 static struct omap2_mcspi_platform_config omap4_pdata = {
1100 	.regs_offset = OMAP4_MCSPI_REG_OFFSET,
1101 };
1102 
1103 static const struct of_device_id omap_mcspi_of_match[] = {
1104 	{
1105 		.compatible = "ti,omap2-mcspi",
1106 		.data = &omap2_pdata,
1107 	},
1108 	{
1109 		.compatible = "ti,omap4-mcspi",
1110 		.data = &omap4_pdata,
1111 	},
1112 	{ },
1113 };
1114 MODULE_DEVICE_TABLE(of, omap_mcspi_of_match);
1115 
1116 static int __devinit omap2_mcspi_probe(struct platform_device *pdev)
1117 {
1118 	struct spi_master	*master;
1119 	struct omap2_mcspi_platform_config *pdata;
1120 	struct omap2_mcspi	*mcspi;
1121 	struct resource		*r;
1122 	int			status = 0, i;
1123 	u32			regs_offset = 0;
1124 	static int		bus_num = 1;
1125 	struct device_node	*node = pdev->dev.of_node;
1126 	const struct of_device_id *match;
1127 
1128 	master = spi_alloc_master(&pdev->dev, sizeof *mcspi);
1129 	if (master == NULL) {
1130 		dev_dbg(&pdev->dev, "master allocation failed\n");
1131 		return -ENOMEM;
1132 	}
1133 
1134 	/* the spi->mode bits understood by this driver: */
1135 	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
1136 
1137 	master->setup = omap2_mcspi_setup;
1138 	master->prepare_transfer_hardware = omap2_prepare_transfer;
1139 	master->unprepare_transfer_hardware = omap2_unprepare_transfer;
1140 	master->transfer_one_message = omap2_mcspi_transfer_one_message;
1141 	master->cleanup = omap2_mcspi_cleanup;
1142 	master->dev.of_node = node;
1143 
1144 	match = of_match_device(omap_mcspi_of_match, &pdev->dev);
1145 	if (match) {
1146 		u32 num_cs = 1; /* default number of chipselect */
1147 		pdata = match->data;
1148 
1149 		of_property_read_u32(node, "ti,spi-num-cs", &num_cs);
1150 		master->num_chipselect = num_cs;
1151 		master->bus_num = bus_num++;
1152 	} else {
1153 		pdata = pdev->dev.platform_data;
1154 		master->num_chipselect = pdata->num_cs;
1155 		if (pdev->id != -1)
1156 			master->bus_num = pdev->id;
1157 	}
1158 	regs_offset = pdata->regs_offset;
1159 
1160 	dev_set_drvdata(&pdev->dev, master);
1161 
1162 	mcspi = spi_master_get_devdata(master);
1163 	mcspi->master = master;
1164 
1165 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1166 	if (r == NULL) {
1167 		status = -ENODEV;
1168 		goto free_master;
1169 	}
1170 
1171 	r->start += regs_offset;
1172 	r->end += regs_offset;
1173 	mcspi->phys = r->start;
1174 
1175 	mcspi->base = devm_request_and_ioremap(&pdev->dev, r);
1176 	if (!mcspi->base) {
1177 		dev_dbg(&pdev->dev, "can't ioremap MCSPI\n");
1178 		status = -ENOMEM;
1179 		goto free_master;
1180 	}
1181 
1182 	mcspi->dev = &pdev->dev;
1183 
1184 	INIT_LIST_HEAD(&mcspi->ctx.cs);
1185 
1186 	mcspi->dma_channels = kcalloc(master->num_chipselect,
1187 			sizeof(struct omap2_mcspi_dma),
1188 			GFP_KERNEL);
1189 
1190 	if (mcspi->dma_channels == NULL)
1191 		goto free_master;
1192 
1193 	for (i = 0; i < master->num_chipselect; i++) {
1194 		char dma_ch_name[14];
1195 		struct resource *dma_res;
1196 
1197 		sprintf(dma_ch_name, "rx%d", i);
1198 		dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA,
1199 							dma_ch_name);
1200 		if (!dma_res) {
1201 			dev_dbg(&pdev->dev, "cannot get DMA RX channel\n");
1202 			status = -ENODEV;
1203 			break;
1204 		}
1205 
1206 		mcspi->dma_channels[i].dma_rx_sync_dev = dma_res->start;
1207 		sprintf(dma_ch_name, "tx%d", i);
1208 		dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA,
1209 							dma_ch_name);
1210 		if (!dma_res) {
1211 			dev_dbg(&pdev->dev, "cannot get DMA TX channel\n");
1212 			status = -ENODEV;
1213 			break;
1214 		}
1215 
1216 		mcspi->dma_channels[i].dma_tx_sync_dev = dma_res->start;
1217 	}
1218 
1219 	if (status < 0)
1220 		goto dma_chnl_free;
1221 
1222 	pm_runtime_use_autosuspend(&pdev->dev);
1223 	pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
1224 	pm_runtime_enable(&pdev->dev);
1225 
1226 	if (status || omap2_mcspi_master_setup(mcspi) < 0)
1227 		goto disable_pm;
1228 
1229 	status = spi_register_master(master);
1230 	if (status < 0)
1231 		goto disable_pm;
1232 
1233 	return status;
1234 
1235 disable_pm:
1236 	pm_runtime_disable(&pdev->dev);
1237 dma_chnl_free:
1238 	kfree(mcspi->dma_channels);
1239 free_master:
1240 	spi_master_put(master);
1241 	platform_set_drvdata(pdev, NULL);
1242 	return status;
1243 }
1244 
1245 static int __devexit omap2_mcspi_remove(struct platform_device *pdev)
1246 {
1247 	struct spi_master	*master;
1248 	struct omap2_mcspi	*mcspi;
1249 	struct omap2_mcspi_dma	*dma_channels;
1250 
1251 	master = dev_get_drvdata(&pdev->dev);
1252 	mcspi = spi_master_get_devdata(master);
1253 	dma_channels = mcspi->dma_channels;
1254 
1255 	omap2_mcspi_disable_clocks(mcspi);
1256 	pm_runtime_disable(&pdev->dev);
1257 
1258 	spi_unregister_master(master);
1259 	kfree(dma_channels);
1260 	platform_set_drvdata(pdev, NULL);
1261 
1262 	return 0;
1263 }
1264 
1265 /* work with hotplug and coldplug */
1266 MODULE_ALIAS("platform:omap2_mcspi");
1267 
1268 #ifdef	CONFIG_SUSPEND
1269 /*
1270  * When SPI wake up from off-mode, CS is in activate state. If it was in
1271  * unactive state when driver was suspend, then force it to unactive state at
1272  * wake up.
1273  */
1274 static int omap2_mcspi_resume(struct device *dev)
1275 {
1276 	struct spi_master	*master = dev_get_drvdata(dev);
1277 	struct omap2_mcspi	*mcspi = spi_master_get_devdata(master);
1278 	struct omap2_mcspi_regs	*ctx = &mcspi->ctx;
1279 	struct omap2_mcspi_cs	*cs;
1280 
1281 	omap2_mcspi_enable_clocks(mcspi);
1282 	list_for_each_entry(cs, &ctx->cs, node) {
1283 		if ((cs->chconf0 & OMAP2_MCSPI_CHCONF_FORCE) == 0) {
1284 			/*
1285 			 * We need to toggle CS state for OMAP take this
1286 			 * change in account.
1287 			 */
1288 			MOD_REG_BIT(cs->chconf0, OMAP2_MCSPI_CHCONF_FORCE, 1);
1289 			__raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
1290 			MOD_REG_BIT(cs->chconf0, OMAP2_MCSPI_CHCONF_FORCE, 0);
1291 			__raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
1292 		}
1293 	}
1294 	omap2_mcspi_disable_clocks(mcspi);
1295 	return 0;
1296 }
1297 #else
1298 #define	omap2_mcspi_resume	NULL
1299 #endif
1300 
1301 static const struct dev_pm_ops omap2_mcspi_pm_ops = {
1302 	.resume = omap2_mcspi_resume,
1303 	.runtime_resume	= omap_mcspi_runtime_resume,
1304 };
1305 
1306 static struct platform_driver omap2_mcspi_driver = {
1307 	.driver = {
1308 		.name =		"omap2_mcspi",
1309 		.owner =	THIS_MODULE,
1310 		.pm =		&omap2_mcspi_pm_ops,
1311 		.of_match_table = omap_mcspi_of_match,
1312 	},
1313 	.probe =	omap2_mcspi_probe,
1314 	.remove =	__devexit_p(omap2_mcspi_remove),
1315 };
1316 
1317 module_platform_driver(omap2_mcspi_driver);
1318 MODULE_LICENSE("GPL");
1319