xref: /openbmc/linux/drivers/spi/spi-omap2-mcspi.c (revision 81d67439)
1 /*
2  * OMAP2 McSPI controller driver
3  *
4  * Copyright (C) 2005, 2006 Nokia Corporation
5  * Author:	Samuel Ortiz <samuel.ortiz@nokia.com> and
6  *		Juha Yrj�l� <juha.yrjola@nokia.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21  *
22  */
23 
24 #include <linux/kernel.h>
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
27 #include <linux/module.h>
28 #include <linux/device.h>
29 #include <linux/delay.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/platform_device.h>
32 #include <linux/err.h>
33 #include <linux/clk.h>
34 #include <linux/io.h>
35 #include <linux/slab.h>
36 #include <linux/pm_runtime.h>
37 
38 #include <linux/spi/spi.h>
39 
40 #include <plat/dma.h>
41 #include <plat/clock.h>
42 #include <plat/mcspi.h>
43 
44 #define OMAP2_MCSPI_MAX_FREQ		48000000
45 
46 /* OMAP2 has 3 SPI controllers, while OMAP3 has 4 */
47 #define OMAP2_MCSPI_MAX_CTRL 		4
48 
49 #define OMAP2_MCSPI_REVISION		0x00
50 #define OMAP2_MCSPI_SYSSTATUS		0x14
51 #define OMAP2_MCSPI_IRQSTATUS		0x18
52 #define OMAP2_MCSPI_IRQENABLE		0x1c
53 #define OMAP2_MCSPI_WAKEUPENABLE	0x20
54 #define OMAP2_MCSPI_SYST		0x24
55 #define OMAP2_MCSPI_MODULCTRL		0x28
56 
57 /* per-channel banks, 0x14 bytes each, first is: */
58 #define OMAP2_MCSPI_CHCONF0		0x2c
59 #define OMAP2_MCSPI_CHSTAT0		0x30
60 #define OMAP2_MCSPI_CHCTRL0		0x34
61 #define OMAP2_MCSPI_TX0			0x38
62 #define OMAP2_MCSPI_RX0			0x3c
63 
64 /* per-register bitmasks: */
65 
66 #define OMAP2_MCSPI_MODULCTRL_SINGLE	BIT(0)
67 #define OMAP2_MCSPI_MODULCTRL_MS	BIT(2)
68 #define OMAP2_MCSPI_MODULCTRL_STEST	BIT(3)
69 
70 #define OMAP2_MCSPI_CHCONF_PHA		BIT(0)
71 #define OMAP2_MCSPI_CHCONF_POL		BIT(1)
72 #define OMAP2_MCSPI_CHCONF_CLKD_MASK	(0x0f << 2)
73 #define OMAP2_MCSPI_CHCONF_EPOL		BIT(6)
74 #define OMAP2_MCSPI_CHCONF_WL_MASK	(0x1f << 7)
75 #define OMAP2_MCSPI_CHCONF_TRM_RX_ONLY	BIT(12)
76 #define OMAP2_MCSPI_CHCONF_TRM_TX_ONLY	BIT(13)
77 #define OMAP2_MCSPI_CHCONF_TRM_MASK	(0x03 << 12)
78 #define OMAP2_MCSPI_CHCONF_DMAW		BIT(14)
79 #define OMAP2_MCSPI_CHCONF_DMAR		BIT(15)
80 #define OMAP2_MCSPI_CHCONF_DPE0		BIT(16)
81 #define OMAP2_MCSPI_CHCONF_DPE1		BIT(17)
82 #define OMAP2_MCSPI_CHCONF_IS		BIT(18)
83 #define OMAP2_MCSPI_CHCONF_TURBO	BIT(19)
84 #define OMAP2_MCSPI_CHCONF_FORCE	BIT(20)
85 
86 #define OMAP2_MCSPI_CHSTAT_RXS		BIT(0)
87 #define OMAP2_MCSPI_CHSTAT_TXS		BIT(1)
88 #define OMAP2_MCSPI_CHSTAT_EOT		BIT(2)
89 
90 #define OMAP2_MCSPI_CHCTRL_EN		BIT(0)
91 
92 #define OMAP2_MCSPI_WAKEUPENABLE_WKEN	BIT(0)
93 
94 /* We have 2 DMA channels per CS, one for RX and one for TX */
95 struct omap2_mcspi_dma {
96 	int dma_tx_channel;
97 	int dma_rx_channel;
98 
99 	int dma_tx_sync_dev;
100 	int dma_rx_sync_dev;
101 
102 	struct completion dma_tx_completion;
103 	struct completion dma_rx_completion;
104 };
105 
106 /* use PIO for small transfers, avoiding DMA setup/teardown overhead and
107  * cache operations; better heuristics consider wordsize and bitrate.
108  */
109 #define DMA_MIN_BYTES			160
110 
111 
112 struct omap2_mcspi {
113 	struct work_struct	work;
114 	/* lock protects queue and registers */
115 	spinlock_t		lock;
116 	struct list_head	msg_queue;
117 	struct spi_master	*master;
118 	/* Virtual base address of the controller */
119 	void __iomem		*base;
120 	unsigned long		phys;
121 	/* SPI1 has 4 channels, while SPI2 has 2 */
122 	struct omap2_mcspi_dma	*dma_channels;
123 	struct  device		*dev;
124 };
125 
126 struct omap2_mcspi_cs {
127 	void __iomem		*base;
128 	unsigned long		phys;
129 	int			word_len;
130 	struct list_head	node;
131 	/* Context save and restore shadow register */
132 	u32			chconf0;
133 };
134 
135 /* used for context save and restore, structure members to be updated whenever
136  * corresponding registers are modified.
137  */
138 struct omap2_mcspi_regs {
139 	u32 modulctrl;
140 	u32 wakeupenable;
141 	struct list_head cs;
142 };
143 
144 static struct omap2_mcspi_regs omap2_mcspi_ctx[OMAP2_MCSPI_MAX_CTRL];
145 
146 static struct workqueue_struct *omap2_mcspi_wq;
147 
148 #define MOD_REG_BIT(val, mask, set) do { \
149 	if (set) \
150 		val |= mask; \
151 	else \
152 		val &= ~mask; \
153 } while (0)
154 
155 static inline void mcspi_write_reg(struct spi_master *master,
156 		int idx, u32 val)
157 {
158 	struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
159 
160 	__raw_writel(val, mcspi->base + idx);
161 }
162 
163 static inline u32 mcspi_read_reg(struct spi_master *master, int idx)
164 {
165 	struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
166 
167 	return __raw_readl(mcspi->base + idx);
168 }
169 
170 static inline void mcspi_write_cs_reg(const struct spi_device *spi,
171 		int idx, u32 val)
172 {
173 	struct omap2_mcspi_cs	*cs = spi->controller_state;
174 
175 	__raw_writel(val, cs->base +  idx);
176 }
177 
178 static inline u32 mcspi_read_cs_reg(const struct spi_device *spi, int idx)
179 {
180 	struct omap2_mcspi_cs	*cs = spi->controller_state;
181 
182 	return __raw_readl(cs->base + idx);
183 }
184 
185 static inline u32 mcspi_cached_chconf0(const struct spi_device *spi)
186 {
187 	struct omap2_mcspi_cs *cs = spi->controller_state;
188 
189 	return cs->chconf0;
190 }
191 
192 static inline void mcspi_write_chconf0(const struct spi_device *spi, u32 val)
193 {
194 	struct omap2_mcspi_cs *cs = spi->controller_state;
195 
196 	cs->chconf0 = val;
197 	mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, val);
198 	mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0);
199 }
200 
201 static void omap2_mcspi_set_dma_req(const struct spi_device *spi,
202 		int is_read, int enable)
203 {
204 	u32 l, rw;
205 
206 	l = mcspi_cached_chconf0(spi);
207 
208 	if (is_read) /* 1 is read, 0 write */
209 		rw = OMAP2_MCSPI_CHCONF_DMAR;
210 	else
211 		rw = OMAP2_MCSPI_CHCONF_DMAW;
212 
213 	MOD_REG_BIT(l, rw, enable);
214 	mcspi_write_chconf0(spi, l);
215 }
216 
217 static void omap2_mcspi_set_enable(const struct spi_device *spi, int enable)
218 {
219 	u32 l;
220 
221 	l = enable ? OMAP2_MCSPI_CHCTRL_EN : 0;
222 	mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, l);
223 	/* Flash post-writes */
224 	mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCTRL0);
225 }
226 
227 static void omap2_mcspi_force_cs(struct spi_device *spi, int cs_active)
228 {
229 	u32 l;
230 
231 	l = mcspi_cached_chconf0(spi);
232 	MOD_REG_BIT(l, OMAP2_MCSPI_CHCONF_FORCE, cs_active);
233 	mcspi_write_chconf0(spi, l);
234 }
235 
236 static void omap2_mcspi_set_master_mode(struct spi_master *master)
237 {
238 	u32 l;
239 
240 	/* setup when switching from (reset default) slave mode
241 	 * to single-channel master mode
242 	 */
243 	l = mcspi_read_reg(master, OMAP2_MCSPI_MODULCTRL);
244 	MOD_REG_BIT(l, OMAP2_MCSPI_MODULCTRL_STEST, 0);
245 	MOD_REG_BIT(l, OMAP2_MCSPI_MODULCTRL_MS, 0);
246 	MOD_REG_BIT(l, OMAP2_MCSPI_MODULCTRL_SINGLE, 1);
247 	mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, l);
248 
249 	omap2_mcspi_ctx[master->bus_num - 1].modulctrl = l;
250 }
251 
252 static void omap2_mcspi_restore_ctx(struct omap2_mcspi *mcspi)
253 {
254 	struct spi_master *spi_cntrl;
255 	struct omap2_mcspi_cs *cs;
256 	spi_cntrl = mcspi->master;
257 
258 	/* McSPI: context restore */
259 	mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_MODULCTRL,
260 			omap2_mcspi_ctx[spi_cntrl->bus_num - 1].modulctrl);
261 
262 	mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_WAKEUPENABLE,
263 			omap2_mcspi_ctx[spi_cntrl->bus_num - 1].wakeupenable);
264 
265 	list_for_each_entry(cs, &omap2_mcspi_ctx[spi_cntrl->bus_num - 1].cs,
266 			node)
267 		__raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
268 }
269 static void omap2_mcspi_disable_clocks(struct omap2_mcspi *mcspi)
270 {
271 	pm_runtime_put_sync(mcspi->dev);
272 }
273 
274 static int omap2_mcspi_enable_clocks(struct omap2_mcspi *mcspi)
275 {
276 	return pm_runtime_get_sync(mcspi->dev);
277 }
278 
279 static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
280 {
281 	unsigned long timeout;
282 
283 	timeout = jiffies + msecs_to_jiffies(1000);
284 	while (!(__raw_readl(reg) & bit)) {
285 		if (time_after(jiffies, timeout))
286 			return -1;
287 		cpu_relax();
288 	}
289 	return 0;
290 }
291 
292 static unsigned
293 omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
294 {
295 	struct omap2_mcspi	*mcspi;
296 	struct omap2_mcspi_cs	*cs = spi->controller_state;
297 	struct omap2_mcspi_dma  *mcspi_dma;
298 	unsigned int		count, c;
299 	unsigned long		base, tx_reg, rx_reg;
300 	int			word_len, data_type, element_count;
301 	int			elements = 0;
302 	u32			l;
303 	u8			* rx;
304 	const u8		* tx;
305 	void __iomem		*chstat_reg;
306 
307 	mcspi = spi_master_get_devdata(spi->master);
308 	mcspi_dma = &mcspi->dma_channels[spi->chip_select];
309 	l = mcspi_cached_chconf0(spi);
310 
311 	chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
312 
313 	count = xfer->len;
314 	c = count;
315 	word_len = cs->word_len;
316 
317 	base = cs->phys;
318 	tx_reg = base + OMAP2_MCSPI_TX0;
319 	rx_reg = base + OMAP2_MCSPI_RX0;
320 	rx = xfer->rx_buf;
321 	tx = xfer->tx_buf;
322 
323 	if (word_len <= 8) {
324 		data_type = OMAP_DMA_DATA_TYPE_S8;
325 		element_count = count;
326 	} else if (word_len <= 16) {
327 		data_type = OMAP_DMA_DATA_TYPE_S16;
328 		element_count = count >> 1;
329 	} else /* word_len <= 32 */ {
330 		data_type = OMAP_DMA_DATA_TYPE_S32;
331 		element_count = count >> 2;
332 	}
333 
334 	if (tx != NULL) {
335 		omap_set_dma_transfer_params(mcspi_dma->dma_tx_channel,
336 				data_type, element_count, 1,
337 				OMAP_DMA_SYNC_ELEMENT,
338 				mcspi_dma->dma_tx_sync_dev, 0);
339 
340 		omap_set_dma_dest_params(mcspi_dma->dma_tx_channel, 0,
341 				OMAP_DMA_AMODE_CONSTANT,
342 				tx_reg, 0, 0);
343 
344 		omap_set_dma_src_params(mcspi_dma->dma_tx_channel, 0,
345 				OMAP_DMA_AMODE_POST_INC,
346 				xfer->tx_dma, 0, 0);
347 	}
348 
349 	if (rx != NULL) {
350 		elements = element_count - 1;
351 		if (l & OMAP2_MCSPI_CHCONF_TURBO)
352 			elements--;
353 
354 		omap_set_dma_transfer_params(mcspi_dma->dma_rx_channel,
355 				data_type, elements, 1,
356 				OMAP_DMA_SYNC_ELEMENT,
357 				mcspi_dma->dma_rx_sync_dev, 1);
358 
359 		omap_set_dma_src_params(mcspi_dma->dma_rx_channel, 0,
360 				OMAP_DMA_AMODE_CONSTANT,
361 				rx_reg, 0, 0);
362 
363 		omap_set_dma_dest_params(mcspi_dma->dma_rx_channel, 0,
364 				OMAP_DMA_AMODE_POST_INC,
365 				xfer->rx_dma, 0, 0);
366 	}
367 
368 	if (tx != NULL) {
369 		omap_start_dma(mcspi_dma->dma_tx_channel);
370 		omap2_mcspi_set_dma_req(spi, 0, 1);
371 	}
372 
373 	if (rx != NULL) {
374 		omap_start_dma(mcspi_dma->dma_rx_channel);
375 		omap2_mcspi_set_dma_req(spi, 1, 1);
376 	}
377 
378 	if (tx != NULL) {
379 		wait_for_completion(&mcspi_dma->dma_tx_completion);
380 		dma_unmap_single(&spi->dev, xfer->tx_dma, count, DMA_TO_DEVICE);
381 
382 		/* for TX_ONLY mode, be sure all words have shifted out */
383 		if (rx == NULL) {
384 			if (mcspi_wait_for_reg_bit(chstat_reg,
385 						OMAP2_MCSPI_CHSTAT_TXS) < 0)
386 				dev_err(&spi->dev, "TXS timed out\n");
387 			else if (mcspi_wait_for_reg_bit(chstat_reg,
388 						OMAP2_MCSPI_CHSTAT_EOT) < 0)
389 				dev_err(&spi->dev, "EOT timed out\n");
390 		}
391 	}
392 
393 	if (rx != NULL) {
394 		wait_for_completion(&mcspi_dma->dma_rx_completion);
395 		dma_unmap_single(&spi->dev, xfer->rx_dma, count, DMA_FROM_DEVICE);
396 		omap2_mcspi_set_enable(spi, 0);
397 
398 		if (l & OMAP2_MCSPI_CHCONF_TURBO) {
399 
400 			if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0)
401 				   & OMAP2_MCSPI_CHSTAT_RXS)) {
402 				u32 w;
403 
404 				w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
405 				if (word_len <= 8)
406 					((u8 *)xfer->rx_buf)[elements++] = w;
407 				else if (word_len <= 16)
408 					((u16 *)xfer->rx_buf)[elements++] = w;
409 				else /* word_len <= 32 */
410 					((u32 *)xfer->rx_buf)[elements++] = w;
411 			} else {
412 				dev_err(&spi->dev,
413 					"DMA RX penultimate word empty");
414 				count -= (word_len <= 8)  ? 2 :
415 					(word_len <= 16) ? 4 :
416 					/* word_len <= 32 */ 8;
417 				omap2_mcspi_set_enable(spi, 1);
418 				return count;
419 			}
420 		}
421 
422 		if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0)
423 				& OMAP2_MCSPI_CHSTAT_RXS)) {
424 			u32 w;
425 
426 			w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
427 			if (word_len <= 8)
428 				((u8 *)xfer->rx_buf)[elements] = w;
429 			else if (word_len <= 16)
430 				((u16 *)xfer->rx_buf)[elements] = w;
431 			else /* word_len <= 32 */
432 				((u32 *)xfer->rx_buf)[elements] = w;
433 		} else {
434 			dev_err(&spi->dev, "DMA RX last word empty");
435 			count -= (word_len <= 8)  ? 1 :
436 				 (word_len <= 16) ? 2 :
437 			       /* word_len <= 32 */ 4;
438 		}
439 		omap2_mcspi_set_enable(spi, 1);
440 	}
441 	return count;
442 }
443 
444 static unsigned
445 omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
446 {
447 	struct omap2_mcspi	*mcspi;
448 	struct omap2_mcspi_cs	*cs = spi->controller_state;
449 	unsigned int		count, c;
450 	u32			l;
451 	void __iomem		*base = cs->base;
452 	void __iomem		*tx_reg;
453 	void __iomem		*rx_reg;
454 	void __iomem		*chstat_reg;
455 	int			word_len;
456 
457 	mcspi = spi_master_get_devdata(spi->master);
458 	count = xfer->len;
459 	c = count;
460 	word_len = cs->word_len;
461 
462 	l = mcspi_cached_chconf0(spi);
463 
464 	/* We store the pre-calculated register addresses on stack to speed
465 	 * up the transfer loop. */
466 	tx_reg		= base + OMAP2_MCSPI_TX0;
467 	rx_reg		= base + OMAP2_MCSPI_RX0;
468 	chstat_reg	= base + OMAP2_MCSPI_CHSTAT0;
469 
470 	if (c < (word_len>>3))
471 		return 0;
472 
473 	if (word_len <= 8) {
474 		u8		*rx;
475 		const u8	*tx;
476 
477 		rx = xfer->rx_buf;
478 		tx = xfer->tx_buf;
479 
480 		do {
481 			c -= 1;
482 			if (tx != NULL) {
483 				if (mcspi_wait_for_reg_bit(chstat_reg,
484 						OMAP2_MCSPI_CHSTAT_TXS) < 0) {
485 					dev_err(&spi->dev, "TXS timed out\n");
486 					goto out;
487 				}
488 				dev_vdbg(&spi->dev, "write-%d %02x\n",
489 						word_len, *tx);
490 				__raw_writel(*tx++, tx_reg);
491 			}
492 			if (rx != NULL) {
493 				if (mcspi_wait_for_reg_bit(chstat_reg,
494 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
495 					dev_err(&spi->dev, "RXS timed out\n");
496 					goto out;
497 				}
498 
499 				if (c == 1 && tx == NULL &&
500 				    (l & OMAP2_MCSPI_CHCONF_TURBO)) {
501 					omap2_mcspi_set_enable(spi, 0);
502 					*rx++ = __raw_readl(rx_reg);
503 					dev_vdbg(&spi->dev, "read-%d %02x\n",
504 						    word_len, *(rx - 1));
505 					if (mcspi_wait_for_reg_bit(chstat_reg,
506 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
507 						dev_err(&spi->dev,
508 							"RXS timed out\n");
509 						goto out;
510 					}
511 					c = 0;
512 				} else if (c == 0 && tx == NULL) {
513 					omap2_mcspi_set_enable(spi, 0);
514 				}
515 
516 				*rx++ = __raw_readl(rx_reg);
517 				dev_vdbg(&spi->dev, "read-%d %02x\n",
518 						word_len, *(rx - 1));
519 			}
520 		} while (c);
521 	} else if (word_len <= 16) {
522 		u16		*rx;
523 		const u16	*tx;
524 
525 		rx = xfer->rx_buf;
526 		tx = xfer->tx_buf;
527 		do {
528 			c -= 2;
529 			if (tx != NULL) {
530 				if (mcspi_wait_for_reg_bit(chstat_reg,
531 						OMAP2_MCSPI_CHSTAT_TXS) < 0) {
532 					dev_err(&spi->dev, "TXS timed out\n");
533 					goto out;
534 				}
535 				dev_vdbg(&spi->dev, "write-%d %04x\n",
536 						word_len, *tx);
537 				__raw_writel(*tx++, tx_reg);
538 			}
539 			if (rx != NULL) {
540 				if (mcspi_wait_for_reg_bit(chstat_reg,
541 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
542 					dev_err(&spi->dev, "RXS timed out\n");
543 					goto out;
544 				}
545 
546 				if (c == 2 && tx == NULL &&
547 				    (l & OMAP2_MCSPI_CHCONF_TURBO)) {
548 					omap2_mcspi_set_enable(spi, 0);
549 					*rx++ = __raw_readl(rx_reg);
550 					dev_vdbg(&spi->dev, "read-%d %04x\n",
551 						    word_len, *(rx - 1));
552 					if (mcspi_wait_for_reg_bit(chstat_reg,
553 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
554 						dev_err(&spi->dev,
555 							"RXS timed out\n");
556 						goto out;
557 					}
558 					c = 0;
559 				} else if (c == 0 && tx == NULL) {
560 					omap2_mcspi_set_enable(spi, 0);
561 				}
562 
563 				*rx++ = __raw_readl(rx_reg);
564 				dev_vdbg(&spi->dev, "read-%d %04x\n",
565 						word_len, *(rx - 1));
566 			}
567 		} while (c >= 2);
568 	} else if (word_len <= 32) {
569 		u32		*rx;
570 		const u32	*tx;
571 
572 		rx = xfer->rx_buf;
573 		tx = xfer->tx_buf;
574 		do {
575 			c -= 4;
576 			if (tx != NULL) {
577 				if (mcspi_wait_for_reg_bit(chstat_reg,
578 						OMAP2_MCSPI_CHSTAT_TXS) < 0) {
579 					dev_err(&spi->dev, "TXS timed out\n");
580 					goto out;
581 				}
582 				dev_vdbg(&spi->dev, "write-%d %08x\n",
583 						word_len, *tx);
584 				__raw_writel(*tx++, tx_reg);
585 			}
586 			if (rx != NULL) {
587 				if (mcspi_wait_for_reg_bit(chstat_reg,
588 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
589 					dev_err(&spi->dev, "RXS timed out\n");
590 					goto out;
591 				}
592 
593 				if (c == 4 && tx == NULL &&
594 				    (l & OMAP2_MCSPI_CHCONF_TURBO)) {
595 					omap2_mcspi_set_enable(spi, 0);
596 					*rx++ = __raw_readl(rx_reg);
597 					dev_vdbg(&spi->dev, "read-%d %08x\n",
598 						    word_len, *(rx - 1));
599 					if (mcspi_wait_for_reg_bit(chstat_reg,
600 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
601 						dev_err(&spi->dev,
602 							"RXS timed out\n");
603 						goto out;
604 					}
605 					c = 0;
606 				} else if (c == 0 && tx == NULL) {
607 					omap2_mcspi_set_enable(spi, 0);
608 				}
609 
610 				*rx++ = __raw_readl(rx_reg);
611 				dev_vdbg(&spi->dev, "read-%d %08x\n",
612 						word_len, *(rx - 1));
613 			}
614 		} while (c >= 4);
615 	}
616 
617 	/* for TX_ONLY mode, be sure all words have shifted out */
618 	if (xfer->rx_buf == NULL) {
619 		if (mcspi_wait_for_reg_bit(chstat_reg,
620 				OMAP2_MCSPI_CHSTAT_TXS) < 0) {
621 			dev_err(&spi->dev, "TXS timed out\n");
622 		} else if (mcspi_wait_for_reg_bit(chstat_reg,
623 				OMAP2_MCSPI_CHSTAT_EOT) < 0)
624 			dev_err(&spi->dev, "EOT timed out\n");
625 
626 		/* disable chan to purge rx datas received in TX_ONLY transfer,
627 		 * otherwise these rx datas will affect the direct following
628 		 * RX_ONLY transfer.
629 		 */
630 		omap2_mcspi_set_enable(spi, 0);
631 	}
632 out:
633 	omap2_mcspi_set_enable(spi, 1);
634 	return count - c;
635 }
636 
637 static u32 omap2_mcspi_calc_divisor(u32 speed_hz)
638 {
639 	u32 div;
640 
641 	for (div = 0; div < 15; div++)
642 		if (speed_hz >= (OMAP2_MCSPI_MAX_FREQ >> div))
643 			return div;
644 
645 	return 15;
646 }
647 
648 /* called only when no transfer is active to this device */
649 static int omap2_mcspi_setup_transfer(struct spi_device *spi,
650 		struct spi_transfer *t)
651 {
652 	struct omap2_mcspi_cs *cs = spi->controller_state;
653 	struct omap2_mcspi *mcspi;
654 	struct spi_master *spi_cntrl;
655 	u32 l = 0, div = 0;
656 	u8 word_len = spi->bits_per_word;
657 	u32 speed_hz = spi->max_speed_hz;
658 
659 	mcspi = spi_master_get_devdata(spi->master);
660 	spi_cntrl = mcspi->master;
661 
662 	if (t != NULL && t->bits_per_word)
663 		word_len = t->bits_per_word;
664 
665 	cs->word_len = word_len;
666 
667 	if (t && t->speed_hz)
668 		speed_hz = t->speed_hz;
669 
670 	speed_hz = min_t(u32, speed_hz, OMAP2_MCSPI_MAX_FREQ);
671 	div = omap2_mcspi_calc_divisor(speed_hz);
672 
673 	l = mcspi_cached_chconf0(spi);
674 
675 	/* standard 4-wire master mode:  SCK, MOSI/out, MISO/in, nCS
676 	 * REVISIT: this controller could support SPI_3WIRE mode.
677 	 */
678 	l &= ~(OMAP2_MCSPI_CHCONF_IS|OMAP2_MCSPI_CHCONF_DPE1);
679 	l |= OMAP2_MCSPI_CHCONF_DPE0;
680 
681 	/* wordlength */
682 	l &= ~OMAP2_MCSPI_CHCONF_WL_MASK;
683 	l |= (word_len - 1) << 7;
684 
685 	/* set chipselect polarity; manage with FORCE */
686 	if (!(spi->mode & SPI_CS_HIGH))
687 		l |= OMAP2_MCSPI_CHCONF_EPOL;	/* active-low; normal */
688 	else
689 		l &= ~OMAP2_MCSPI_CHCONF_EPOL;
690 
691 	/* set clock divisor */
692 	l &= ~OMAP2_MCSPI_CHCONF_CLKD_MASK;
693 	l |= div << 2;
694 
695 	/* set SPI mode 0..3 */
696 	if (spi->mode & SPI_CPOL)
697 		l |= OMAP2_MCSPI_CHCONF_POL;
698 	else
699 		l &= ~OMAP2_MCSPI_CHCONF_POL;
700 	if (spi->mode & SPI_CPHA)
701 		l |= OMAP2_MCSPI_CHCONF_PHA;
702 	else
703 		l &= ~OMAP2_MCSPI_CHCONF_PHA;
704 
705 	mcspi_write_chconf0(spi, l);
706 
707 	dev_dbg(&spi->dev, "setup: speed %d, sample %s edge, clk %s\n",
708 			OMAP2_MCSPI_MAX_FREQ >> div,
709 			(spi->mode & SPI_CPHA) ? "trailing" : "leading",
710 			(spi->mode & SPI_CPOL) ? "inverted" : "normal");
711 
712 	return 0;
713 }
714 
715 static void omap2_mcspi_dma_rx_callback(int lch, u16 ch_status, void *data)
716 {
717 	struct spi_device	*spi = data;
718 	struct omap2_mcspi	*mcspi;
719 	struct omap2_mcspi_dma	*mcspi_dma;
720 
721 	mcspi = spi_master_get_devdata(spi->master);
722 	mcspi_dma = &(mcspi->dma_channels[spi->chip_select]);
723 
724 	complete(&mcspi_dma->dma_rx_completion);
725 
726 	/* We must disable the DMA RX request */
727 	omap2_mcspi_set_dma_req(spi, 1, 0);
728 }
729 
730 static void omap2_mcspi_dma_tx_callback(int lch, u16 ch_status, void *data)
731 {
732 	struct spi_device	*spi = data;
733 	struct omap2_mcspi	*mcspi;
734 	struct omap2_mcspi_dma	*mcspi_dma;
735 
736 	mcspi = spi_master_get_devdata(spi->master);
737 	mcspi_dma = &(mcspi->dma_channels[spi->chip_select]);
738 
739 	complete(&mcspi_dma->dma_tx_completion);
740 
741 	/* We must disable the DMA TX request */
742 	omap2_mcspi_set_dma_req(spi, 0, 0);
743 }
744 
745 static int omap2_mcspi_request_dma(struct spi_device *spi)
746 {
747 	struct spi_master	*master = spi->master;
748 	struct omap2_mcspi	*mcspi;
749 	struct omap2_mcspi_dma	*mcspi_dma;
750 
751 	mcspi = spi_master_get_devdata(master);
752 	mcspi_dma = mcspi->dma_channels + spi->chip_select;
753 
754 	if (omap_request_dma(mcspi_dma->dma_rx_sync_dev, "McSPI RX",
755 			omap2_mcspi_dma_rx_callback, spi,
756 			&mcspi_dma->dma_rx_channel)) {
757 		dev_err(&spi->dev, "no RX DMA channel for McSPI\n");
758 		return -EAGAIN;
759 	}
760 
761 	if (omap_request_dma(mcspi_dma->dma_tx_sync_dev, "McSPI TX",
762 			omap2_mcspi_dma_tx_callback, spi,
763 			&mcspi_dma->dma_tx_channel)) {
764 		omap_free_dma(mcspi_dma->dma_rx_channel);
765 		mcspi_dma->dma_rx_channel = -1;
766 		dev_err(&spi->dev, "no TX DMA channel for McSPI\n");
767 		return -EAGAIN;
768 	}
769 
770 	init_completion(&mcspi_dma->dma_rx_completion);
771 	init_completion(&mcspi_dma->dma_tx_completion);
772 
773 	return 0;
774 }
775 
776 static int omap2_mcspi_setup(struct spi_device *spi)
777 {
778 	int			ret;
779 	struct omap2_mcspi	*mcspi;
780 	struct omap2_mcspi_dma	*mcspi_dma;
781 	struct omap2_mcspi_cs	*cs = spi->controller_state;
782 
783 	if (spi->bits_per_word < 4 || spi->bits_per_word > 32) {
784 		dev_dbg(&spi->dev, "setup: unsupported %d bit words\n",
785 			spi->bits_per_word);
786 		return -EINVAL;
787 	}
788 
789 	mcspi = spi_master_get_devdata(spi->master);
790 	mcspi_dma = &mcspi->dma_channels[spi->chip_select];
791 
792 	if (!cs) {
793 		cs = kzalloc(sizeof *cs, GFP_KERNEL);
794 		if (!cs)
795 			return -ENOMEM;
796 		cs->base = mcspi->base + spi->chip_select * 0x14;
797 		cs->phys = mcspi->phys + spi->chip_select * 0x14;
798 		cs->chconf0 = 0;
799 		spi->controller_state = cs;
800 		/* Link this to context save list */
801 		list_add_tail(&cs->node,
802 			&omap2_mcspi_ctx[mcspi->master->bus_num - 1].cs);
803 	}
804 
805 	if (mcspi_dma->dma_rx_channel == -1
806 			|| mcspi_dma->dma_tx_channel == -1) {
807 		ret = omap2_mcspi_request_dma(spi);
808 		if (ret < 0)
809 			return ret;
810 	}
811 
812 	ret = omap2_mcspi_enable_clocks(mcspi);
813 	if (ret < 0)
814 		return ret;
815 
816 	ret = omap2_mcspi_setup_transfer(spi, NULL);
817 	omap2_mcspi_disable_clocks(mcspi);
818 
819 	return ret;
820 }
821 
822 static void omap2_mcspi_cleanup(struct spi_device *spi)
823 {
824 	struct omap2_mcspi	*mcspi;
825 	struct omap2_mcspi_dma	*mcspi_dma;
826 	struct omap2_mcspi_cs	*cs;
827 
828 	mcspi = spi_master_get_devdata(spi->master);
829 
830 	if (spi->controller_state) {
831 		/* Unlink controller state from context save list */
832 		cs = spi->controller_state;
833 		list_del(&cs->node);
834 
835 		kfree(spi->controller_state);
836 	}
837 
838 	if (spi->chip_select < spi->master->num_chipselect) {
839 		mcspi_dma = &mcspi->dma_channels[spi->chip_select];
840 
841 		if (mcspi_dma->dma_rx_channel != -1) {
842 			omap_free_dma(mcspi_dma->dma_rx_channel);
843 			mcspi_dma->dma_rx_channel = -1;
844 		}
845 		if (mcspi_dma->dma_tx_channel != -1) {
846 			omap_free_dma(mcspi_dma->dma_tx_channel);
847 			mcspi_dma->dma_tx_channel = -1;
848 		}
849 	}
850 }
851 
852 static void omap2_mcspi_work(struct work_struct *work)
853 {
854 	struct omap2_mcspi	*mcspi;
855 
856 	mcspi = container_of(work, struct omap2_mcspi, work);
857 
858 	if (omap2_mcspi_enable_clocks(mcspi) < 0)
859 		return;
860 
861 	spin_lock_irq(&mcspi->lock);
862 
863 	/* We only enable one channel at a time -- the one whose message is
864 	 * at the head of the queue -- although this controller would gladly
865 	 * arbitrate among multiple channels.  This corresponds to "single
866 	 * channel" master mode.  As a side effect, we need to manage the
867 	 * chipselect with the FORCE bit ... CS != channel enable.
868 	 */
869 	while (!list_empty(&mcspi->msg_queue)) {
870 		struct spi_message		*m;
871 		struct spi_device		*spi;
872 		struct spi_transfer		*t = NULL;
873 		int				cs_active = 0;
874 		struct omap2_mcspi_cs		*cs;
875 		struct omap2_mcspi_device_config *cd;
876 		int				par_override = 0;
877 		int				status = 0;
878 		u32				chconf;
879 
880 		m = container_of(mcspi->msg_queue.next, struct spi_message,
881 				 queue);
882 
883 		list_del_init(&m->queue);
884 		spin_unlock_irq(&mcspi->lock);
885 
886 		spi = m->spi;
887 		cs = spi->controller_state;
888 		cd = spi->controller_data;
889 
890 		omap2_mcspi_set_enable(spi, 1);
891 		list_for_each_entry(t, &m->transfers, transfer_list) {
892 			if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) {
893 				status = -EINVAL;
894 				break;
895 			}
896 			if (par_override || t->speed_hz || t->bits_per_word) {
897 				par_override = 1;
898 				status = omap2_mcspi_setup_transfer(spi, t);
899 				if (status < 0)
900 					break;
901 				if (!t->speed_hz && !t->bits_per_word)
902 					par_override = 0;
903 			}
904 
905 			if (!cs_active) {
906 				omap2_mcspi_force_cs(spi, 1);
907 				cs_active = 1;
908 			}
909 
910 			chconf = mcspi_cached_chconf0(spi);
911 			chconf &= ~OMAP2_MCSPI_CHCONF_TRM_MASK;
912 			chconf &= ~OMAP2_MCSPI_CHCONF_TURBO;
913 
914 			if (t->tx_buf == NULL)
915 				chconf |= OMAP2_MCSPI_CHCONF_TRM_RX_ONLY;
916 			else if (t->rx_buf == NULL)
917 				chconf |= OMAP2_MCSPI_CHCONF_TRM_TX_ONLY;
918 
919 			if (cd && cd->turbo_mode && t->tx_buf == NULL) {
920 				/* Turbo mode is for more than one word */
921 				if (t->len > ((cs->word_len + 7) >> 3))
922 					chconf |= OMAP2_MCSPI_CHCONF_TURBO;
923 			}
924 
925 			mcspi_write_chconf0(spi, chconf);
926 
927 			if (t->len) {
928 				unsigned	count;
929 
930 				/* RX_ONLY mode needs dummy data in TX reg */
931 				if (t->tx_buf == NULL)
932 					__raw_writel(0, cs->base
933 							+ OMAP2_MCSPI_TX0);
934 
935 				if (m->is_dma_mapped || t->len >= DMA_MIN_BYTES)
936 					count = omap2_mcspi_txrx_dma(spi, t);
937 				else
938 					count = omap2_mcspi_txrx_pio(spi, t);
939 				m->actual_length += count;
940 
941 				if (count != t->len) {
942 					status = -EIO;
943 					break;
944 				}
945 			}
946 
947 			if (t->delay_usecs)
948 				udelay(t->delay_usecs);
949 
950 			/* ignore the "leave it on after last xfer" hint */
951 			if (t->cs_change) {
952 				omap2_mcspi_force_cs(spi, 0);
953 				cs_active = 0;
954 			}
955 		}
956 
957 		/* Restore defaults if they were overriden */
958 		if (par_override) {
959 			par_override = 0;
960 			status = omap2_mcspi_setup_transfer(spi, NULL);
961 		}
962 
963 		if (cs_active)
964 			omap2_mcspi_force_cs(spi, 0);
965 
966 		omap2_mcspi_set_enable(spi, 0);
967 
968 		m->status = status;
969 		m->complete(m->context);
970 
971 		spin_lock_irq(&mcspi->lock);
972 	}
973 
974 	spin_unlock_irq(&mcspi->lock);
975 
976 	omap2_mcspi_disable_clocks(mcspi);
977 }
978 
979 static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m)
980 {
981 	struct omap2_mcspi	*mcspi;
982 	unsigned long		flags;
983 	struct spi_transfer	*t;
984 
985 	m->actual_length = 0;
986 	m->status = 0;
987 
988 	/* reject invalid messages and transfers */
989 	if (list_empty(&m->transfers) || !m->complete)
990 		return -EINVAL;
991 	list_for_each_entry(t, &m->transfers, transfer_list) {
992 		const void	*tx_buf = t->tx_buf;
993 		void		*rx_buf = t->rx_buf;
994 		unsigned	len = t->len;
995 
996 		if (t->speed_hz > OMAP2_MCSPI_MAX_FREQ
997 				|| (len && !(rx_buf || tx_buf))
998 				|| (t->bits_per_word &&
999 					(  t->bits_per_word < 4
1000 					|| t->bits_per_word > 32))) {
1001 			dev_dbg(&spi->dev, "transfer: %d Hz, %d %s%s, %d bpw\n",
1002 					t->speed_hz,
1003 					len,
1004 					tx_buf ? "tx" : "",
1005 					rx_buf ? "rx" : "",
1006 					t->bits_per_word);
1007 			return -EINVAL;
1008 		}
1009 		if (t->speed_hz && t->speed_hz < (OMAP2_MCSPI_MAX_FREQ >> 15)) {
1010 			dev_dbg(&spi->dev, "speed_hz %d below minimum %d Hz\n",
1011 				t->speed_hz,
1012 				OMAP2_MCSPI_MAX_FREQ >> 15);
1013 			return -EINVAL;
1014 		}
1015 
1016 		if (m->is_dma_mapped || len < DMA_MIN_BYTES)
1017 			continue;
1018 
1019 		if (tx_buf != NULL) {
1020 			t->tx_dma = dma_map_single(&spi->dev, (void *) tx_buf,
1021 					len, DMA_TO_DEVICE);
1022 			if (dma_mapping_error(&spi->dev, t->tx_dma)) {
1023 				dev_dbg(&spi->dev, "dma %cX %d bytes error\n",
1024 						'T', len);
1025 				return -EINVAL;
1026 			}
1027 		}
1028 		if (rx_buf != NULL) {
1029 			t->rx_dma = dma_map_single(&spi->dev, rx_buf, t->len,
1030 					DMA_FROM_DEVICE);
1031 			if (dma_mapping_error(&spi->dev, t->rx_dma)) {
1032 				dev_dbg(&spi->dev, "dma %cX %d bytes error\n",
1033 						'R', len);
1034 				if (tx_buf != NULL)
1035 					dma_unmap_single(&spi->dev, t->tx_dma,
1036 							len, DMA_TO_DEVICE);
1037 				return -EINVAL;
1038 			}
1039 		}
1040 	}
1041 
1042 	mcspi = spi_master_get_devdata(spi->master);
1043 
1044 	spin_lock_irqsave(&mcspi->lock, flags);
1045 	list_add_tail(&m->queue, &mcspi->msg_queue);
1046 	queue_work(omap2_mcspi_wq, &mcspi->work);
1047 	spin_unlock_irqrestore(&mcspi->lock, flags);
1048 
1049 	return 0;
1050 }
1051 
1052 static int __init omap2_mcspi_master_setup(struct omap2_mcspi *mcspi)
1053 {
1054 	struct spi_master	*master = mcspi->master;
1055 	u32			tmp;
1056 	int ret = 0;
1057 
1058 	ret = omap2_mcspi_enable_clocks(mcspi);
1059 	if (ret < 0)
1060 		return ret;
1061 
1062 	tmp = OMAP2_MCSPI_WAKEUPENABLE_WKEN;
1063 	mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE, tmp);
1064 	omap2_mcspi_ctx[master->bus_num - 1].wakeupenable = tmp;
1065 
1066 	omap2_mcspi_set_master_mode(master);
1067 	omap2_mcspi_disable_clocks(mcspi);
1068 	return 0;
1069 }
1070 
1071 static int omap_mcspi_runtime_resume(struct device *dev)
1072 {
1073 	struct omap2_mcspi	*mcspi;
1074 	struct spi_master	*master;
1075 
1076 	master = dev_get_drvdata(dev);
1077 	mcspi = spi_master_get_devdata(master);
1078 	omap2_mcspi_restore_ctx(mcspi);
1079 
1080 	return 0;
1081 }
1082 
1083 
1084 static int __init omap2_mcspi_probe(struct platform_device *pdev)
1085 {
1086 	struct spi_master	*master;
1087 	struct omap2_mcspi_platform_config *pdata = pdev->dev.platform_data;
1088 	struct omap2_mcspi	*mcspi;
1089 	struct resource		*r;
1090 	int			status = 0, i;
1091 
1092 	master = spi_alloc_master(&pdev->dev, sizeof *mcspi);
1093 	if (master == NULL) {
1094 		dev_dbg(&pdev->dev, "master allocation failed\n");
1095 		return -ENOMEM;
1096 	}
1097 
1098 	/* the spi->mode bits understood by this driver: */
1099 	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
1100 
1101 	if (pdev->id != -1)
1102 		master->bus_num = pdev->id;
1103 
1104 	master->setup = omap2_mcspi_setup;
1105 	master->transfer = omap2_mcspi_transfer;
1106 	master->cleanup = omap2_mcspi_cleanup;
1107 	master->num_chipselect = pdata->num_cs;
1108 
1109 	dev_set_drvdata(&pdev->dev, master);
1110 
1111 	mcspi = spi_master_get_devdata(master);
1112 	mcspi->master = master;
1113 
1114 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1115 	if (r == NULL) {
1116 		status = -ENODEV;
1117 		goto err1;
1118 	}
1119 	if (!request_mem_region(r->start, resource_size(r),
1120 				dev_name(&pdev->dev))) {
1121 		status = -EBUSY;
1122 		goto err1;
1123 	}
1124 
1125 	r->start += pdata->regs_offset;
1126 	r->end += pdata->regs_offset;
1127 	mcspi->phys = r->start;
1128 	mcspi->base = ioremap(r->start, resource_size(r));
1129 	if (!mcspi->base) {
1130 		dev_dbg(&pdev->dev, "can't ioremap MCSPI\n");
1131 		status = -ENOMEM;
1132 		goto err2;
1133 	}
1134 
1135 	mcspi->dev = &pdev->dev;
1136 	INIT_WORK(&mcspi->work, omap2_mcspi_work);
1137 
1138 	spin_lock_init(&mcspi->lock);
1139 	INIT_LIST_HEAD(&mcspi->msg_queue);
1140 	INIT_LIST_HEAD(&omap2_mcspi_ctx[master->bus_num - 1].cs);
1141 
1142 	mcspi->dma_channels = kcalloc(master->num_chipselect,
1143 			sizeof(struct omap2_mcspi_dma),
1144 			GFP_KERNEL);
1145 
1146 	if (mcspi->dma_channels == NULL)
1147 		goto err2;
1148 
1149 	for (i = 0; i < master->num_chipselect; i++) {
1150 		char dma_ch_name[14];
1151 		struct resource *dma_res;
1152 
1153 		sprintf(dma_ch_name, "rx%d", i);
1154 		dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA,
1155 							dma_ch_name);
1156 		if (!dma_res) {
1157 			dev_dbg(&pdev->dev, "cannot get DMA RX channel\n");
1158 			status = -ENODEV;
1159 			break;
1160 		}
1161 
1162 		mcspi->dma_channels[i].dma_rx_channel = -1;
1163 		mcspi->dma_channels[i].dma_rx_sync_dev = dma_res->start;
1164 		sprintf(dma_ch_name, "tx%d", i);
1165 		dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA,
1166 							dma_ch_name);
1167 		if (!dma_res) {
1168 			dev_dbg(&pdev->dev, "cannot get DMA TX channel\n");
1169 			status = -ENODEV;
1170 			break;
1171 		}
1172 
1173 		mcspi->dma_channels[i].dma_tx_channel = -1;
1174 		mcspi->dma_channels[i].dma_tx_sync_dev = dma_res->start;
1175 	}
1176 
1177 	pm_runtime_enable(&pdev->dev);
1178 
1179 	if (status || omap2_mcspi_master_setup(mcspi) < 0)
1180 		goto err3;
1181 
1182 	status = spi_register_master(master);
1183 	if (status < 0)
1184 		goto err4;
1185 
1186 	return status;
1187 
1188 err4:
1189 	spi_master_put(master);
1190 err3:
1191 	kfree(mcspi->dma_channels);
1192 err2:
1193 	release_mem_region(r->start, resource_size(r));
1194 	iounmap(mcspi->base);
1195 err1:
1196 	return status;
1197 }
1198 
1199 static int __exit omap2_mcspi_remove(struct platform_device *pdev)
1200 {
1201 	struct spi_master	*master;
1202 	struct omap2_mcspi	*mcspi;
1203 	struct omap2_mcspi_dma	*dma_channels;
1204 	struct resource		*r;
1205 	void __iomem *base;
1206 
1207 	master = dev_get_drvdata(&pdev->dev);
1208 	mcspi = spi_master_get_devdata(master);
1209 	dma_channels = mcspi->dma_channels;
1210 
1211 	omap2_mcspi_disable_clocks(mcspi);
1212 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1213 	release_mem_region(r->start, resource_size(r));
1214 
1215 	base = mcspi->base;
1216 	spi_unregister_master(master);
1217 	iounmap(base);
1218 	kfree(dma_channels);
1219 
1220 	return 0;
1221 }
1222 
1223 /* work with hotplug and coldplug */
1224 MODULE_ALIAS("platform:omap2_mcspi");
1225 
1226 #ifdef	CONFIG_SUSPEND
1227 /*
1228  * When SPI wake up from off-mode, CS is in activate state. If it was in
1229  * unactive state when driver was suspend, then force it to unactive state at
1230  * wake up.
1231  */
1232 static int omap2_mcspi_resume(struct device *dev)
1233 {
1234 	struct spi_master	*master = dev_get_drvdata(dev);
1235 	struct omap2_mcspi	*mcspi = spi_master_get_devdata(master);
1236 	struct omap2_mcspi_cs *cs;
1237 
1238 	omap2_mcspi_enable_clocks(mcspi);
1239 	list_for_each_entry(cs, &omap2_mcspi_ctx[master->bus_num - 1].cs,
1240 			    node) {
1241 		if ((cs->chconf0 & OMAP2_MCSPI_CHCONF_FORCE) == 0) {
1242 
1243 			/*
1244 			 * We need to toggle CS state for OMAP take this
1245 			 * change in account.
1246 			 */
1247 			MOD_REG_BIT(cs->chconf0, OMAP2_MCSPI_CHCONF_FORCE, 1);
1248 			__raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
1249 			MOD_REG_BIT(cs->chconf0, OMAP2_MCSPI_CHCONF_FORCE, 0);
1250 			__raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
1251 		}
1252 	}
1253 	omap2_mcspi_disable_clocks(mcspi);
1254 	return 0;
1255 }
1256 #else
1257 #define	omap2_mcspi_resume	NULL
1258 #endif
1259 
1260 static const struct dev_pm_ops omap2_mcspi_pm_ops = {
1261 	.resume = omap2_mcspi_resume,
1262 	.runtime_resume	= omap_mcspi_runtime_resume,
1263 };
1264 
1265 static struct platform_driver omap2_mcspi_driver = {
1266 	.driver = {
1267 		.name =		"omap2_mcspi",
1268 		.owner =	THIS_MODULE,
1269 		.pm =		&omap2_mcspi_pm_ops
1270 	},
1271 	.remove =	__exit_p(omap2_mcspi_remove),
1272 };
1273 
1274 
1275 static int __init omap2_mcspi_init(void)
1276 {
1277 	omap2_mcspi_wq = create_singlethread_workqueue(
1278 				omap2_mcspi_driver.driver.name);
1279 	if (omap2_mcspi_wq == NULL)
1280 		return -1;
1281 	return platform_driver_probe(&omap2_mcspi_driver, omap2_mcspi_probe);
1282 }
1283 subsys_initcall(omap2_mcspi_init);
1284 
1285 static void __exit omap2_mcspi_exit(void)
1286 {
1287 	platform_driver_unregister(&omap2_mcspi_driver);
1288 
1289 	destroy_workqueue(omap2_mcspi_wq);
1290 }
1291 module_exit(omap2_mcspi_exit);
1292 
1293 MODULE_LICENSE("GPL");
1294