xref: /openbmc/linux/drivers/spi/spi-omap2-mcspi.c (revision 9b9c2cd4)
1 /*
2  * OMAP2 McSPI controller driver
3  *
4  * Copyright (C) 2005, 2006 Nokia Corporation
5  * Author:	Samuel Ortiz <samuel.ortiz@nokia.com> and
6  *		Juha Yrj�l� <juha.yrjola@nokia.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  * GNU General Public License for more details.
17  */
18 
19 #include <linux/kernel.h>
20 #include <linux/interrupt.h>
21 #include <linux/module.h>
22 #include <linux/device.h>
23 #include <linux/delay.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/dmaengine.h>
26 #include <linux/omap-dma.h>
27 #include <linux/platform_device.h>
28 #include <linux/err.h>
29 #include <linux/clk.h>
30 #include <linux/io.h>
31 #include <linux/slab.h>
32 #include <linux/pm_runtime.h>
33 #include <linux/of.h>
34 #include <linux/of_device.h>
35 #include <linux/gcd.h>
36 
37 #include <linux/spi/spi.h>
38 #include <linux/gpio.h>
39 
40 #include <linux/platform_data/spi-omap2-mcspi.h>
41 
42 #define OMAP2_MCSPI_MAX_FREQ		48000000
43 #define OMAP2_MCSPI_MAX_DIVIDER		4096
44 #define OMAP2_MCSPI_MAX_FIFODEPTH	64
45 #define OMAP2_MCSPI_MAX_FIFOWCNT	0xFFFF
46 #define SPI_AUTOSUSPEND_TIMEOUT		2000
47 
48 #define OMAP2_MCSPI_REVISION		0x00
49 #define OMAP2_MCSPI_SYSSTATUS		0x14
50 #define OMAP2_MCSPI_IRQSTATUS		0x18
51 #define OMAP2_MCSPI_IRQENABLE		0x1c
52 #define OMAP2_MCSPI_WAKEUPENABLE	0x20
53 #define OMAP2_MCSPI_SYST		0x24
54 #define OMAP2_MCSPI_MODULCTRL		0x28
55 #define OMAP2_MCSPI_XFERLEVEL		0x7c
56 
57 /* per-channel banks, 0x14 bytes each, first is: */
58 #define OMAP2_MCSPI_CHCONF0		0x2c
59 #define OMAP2_MCSPI_CHSTAT0		0x30
60 #define OMAP2_MCSPI_CHCTRL0		0x34
61 #define OMAP2_MCSPI_TX0			0x38
62 #define OMAP2_MCSPI_RX0			0x3c
63 
64 /* per-register bitmasks: */
65 #define OMAP2_MCSPI_IRQSTATUS_EOW	BIT(17)
66 
67 #define OMAP2_MCSPI_MODULCTRL_SINGLE	BIT(0)
68 #define OMAP2_MCSPI_MODULCTRL_MS	BIT(2)
69 #define OMAP2_MCSPI_MODULCTRL_STEST	BIT(3)
70 
71 #define OMAP2_MCSPI_CHCONF_PHA		BIT(0)
72 #define OMAP2_MCSPI_CHCONF_POL		BIT(1)
73 #define OMAP2_MCSPI_CHCONF_CLKD_MASK	(0x0f << 2)
74 #define OMAP2_MCSPI_CHCONF_EPOL		BIT(6)
75 #define OMAP2_MCSPI_CHCONF_WL_MASK	(0x1f << 7)
76 #define OMAP2_MCSPI_CHCONF_TRM_RX_ONLY	BIT(12)
77 #define OMAP2_MCSPI_CHCONF_TRM_TX_ONLY	BIT(13)
78 #define OMAP2_MCSPI_CHCONF_TRM_MASK	(0x03 << 12)
79 #define OMAP2_MCSPI_CHCONF_DMAW		BIT(14)
80 #define OMAP2_MCSPI_CHCONF_DMAR		BIT(15)
81 #define OMAP2_MCSPI_CHCONF_DPE0		BIT(16)
82 #define OMAP2_MCSPI_CHCONF_DPE1		BIT(17)
83 #define OMAP2_MCSPI_CHCONF_IS		BIT(18)
84 #define OMAP2_MCSPI_CHCONF_TURBO	BIT(19)
85 #define OMAP2_MCSPI_CHCONF_FORCE	BIT(20)
86 #define OMAP2_MCSPI_CHCONF_FFET		BIT(27)
87 #define OMAP2_MCSPI_CHCONF_FFER		BIT(28)
88 #define OMAP2_MCSPI_CHCONF_CLKG		BIT(29)
89 
90 #define OMAP2_MCSPI_CHSTAT_RXS		BIT(0)
91 #define OMAP2_MCSPI_CHSTAT_TXS		BIT(1)
92 #define OMAP2_MCSPI_CHSTAT_EOT		BIT(2)
93 #define OMAP2_MCSPI_CHSTAT_TXFFE	BIT(3)
94 
95 #define OMAP2_MCSPI_CHCTRL_EN		BIT(0)
96 #define OMAP2_MCSPI_CHCTRL_EXTCLK_MASK	(0xff << 8)
97 
98 #define OMAP2_MCSPI_WAKEUPENABLE_WKEN	BIT(0)
99 
100 /* We have 2 DMA channels per CS, one for RX and one for TX */
101 struct omap2_mcspi_dma {
102 	struct dma_chan *dma_tx;
103 	struct dma_chan *dma_rx;
104 
105 	int dma_tx_sync_dev;
106 	int dma_rx_sync_dev;
107 
108 	struct completion dma_tx_completion;
109 	struct completion dma_rx_completion;
110 
111 	char dma_rx_ch_name[14];
112 	char dma_tx_ch_name[14];
113 };
114 
115 /* use PIO for small transfers, avoiding DMA setup/teardown overhead and
116  * cache operations; better heuristics consider wordsize and bitrate.
117  */
118 #define DMA_MIN_BYTES			160
119 
120 
121 /*
122  * Used for context save and restore, structure members to be updated whenever
123  * corresponding registers are modified.
124  */
125 struct omap2_mcspi_regs {
126 	u32 modulctrl;
127 	u32 wakeupenable;
128 	struct list_head cs;
129 };
130 
131 struct omap2_mcspi {
132 	struct spi_master	*master;
133 	/* Virtual base address of the controller */
134 	void __iomem		*base;
135 	unsigned long		phys;
136 	/* SPI1 has 4 channels, while SPI2 has 2 */
137 	struct omap2_mcspi_dma	*dma_channels;
138 	struct device		*dev;
139 	struct omap2_mcspi_regs ctx;
140 	int			fifo_depth;
141 	unsigned int		pin_dir:1;
142 };
143 
144 struct omap2_mcspi_cs {
145 	void __iomem		*base;
146 	unsigned long		phys;
147 	int			word_len;
148 	u16			mode;
149 	struct list_head	node;
150 	/* Context save and restore shadow register */
151 	u32			chconf0, chctrl0;
152 };
153 
154 static inline void mcspi_write_reg(struct spi_master *master,
155 		int idx, u32 val)
156 {
157 	struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
158 
159 	writel_relaxed(val, mcspi->base + idx);
160 }
161 
162 static inline u32 mcspi_read_reg(struct spi_master *master, int idx)
163 {
164 	struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
165 
166 	return readl_relaxed(mcspi->base + idx);
167 }
168 
169 static inline void mcspi_write_cs_reg(const struct spi_device *spi,
170 		int idx, u32 val)
171 {
172 	struct omap2_mcspi_cs	*cs = spi->controller_state;
173 
174 	writel_relaxed(val, cs->base +  idx);
175 }
176 
177 static inline u32 mcspi_read_cs_reg(const struct spi_device *spi, int idx)
178 {
179 	struct omap2_mcspi_cs	*cs = spi->controller_state;
180 
181 	return readl_relaxed(cs->base + idx);
182 }
183 
184 static inline u32 mcspi_cached_chconf0(const struct spi_device *spi)
185 {
186 	struct omap2_mcspi_cs *cs = spi->controller_state;
187 
188 	return cs->chconf0;
189 }
190 
191 static inline void mcspi_write_chconf0(const struct spi_device *spi, u32 val)
192 {
193 	struct omap2_mcspi_cs *cs = spi->controller_state;
194 
195 	cs->chconf0 = val;
196 	mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, val);
197 	mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0);
198 }
199 
200 static inline int mcspi_bytes_per_word(int word_len)
201 {
202 	if (word_len <= 8)
203 		return 1;
204 	else if (word_len <= 16)
205 		return 2;
206 	else /* word_len <= 32 */
207 		return 4;
208 }
209 
210 static void omap2_mcspi_set_dma_req(const struct spi_device *spi,
211 		int is_read, int enable)
212 {
213 	u32 l, rw;
214 
215 	l = mcspi_cached_chconf0(spi);
216 
217 	if (is_read) /* 1 is read, 0 write */
218 		rw = OMAP2_MCSPI_CHCONF_DMAR;
219 	else
220 		rw = OMAP2_MCSPI_CHCONF_DMAW;
221 
222 	if (enable)
223 		l |= rw;
224 	else
225 		l &= ~rw;
226 
227 	mcspi_write_chconf0(spi, l);
228 }
229 
230 static void omap2_mcspi_set_enable(const struct spi_device *spi, int enable)
231 {
232 	struct omap2_mcspi_cs *cs = spi->controller_state;
233 	u32 l;
234 
235 	l = cs->chctrl0;
236 	if (enable)
237 		l |= OMAP2_MCSPI_CHCTRL_EN;
238 	else
239 		l &= ~OMAP2_MCSPI_CHCTRL_EN;
240 	cs->chctrl0 = l;
241 	mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, cs->chctrl0);
242 	/* Flash post-writes */
243 	mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCTRL0);
244 }
245 
246 static void omap2_mcspi_set_cs(struct spi_device *spi, bool enable)
247 {
248 	struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
249 	u32 l;
250 
251 	/* The controller handles the inverted chip selects
252 	 * using the OMAP2_MCSPI_CHCONF_EPOL bit so revert
253 	 * the inversion from the core spi_set_cs function.
254 	 */
255 	if (spi->mode & SPI_CS_HIGH)
256 		enable = !enable;
257 
258 	if (spi->controller_state) {
259 		int err = pm_runtime_get_sync(mcspi->dev);
260 		if (err < 0) {
261 			dev_err(mcspi->dev, "failed to get sync: %d\n", err);
262 			return;
263 		}
264 
265 		l = mcspi_cached_chconf0(spi);
266 
267 		if (enable)
268 			l &= ~OMAP2_MCSPI_CHCONF_FORCE;
269 		else
270 			l |= OMAP2_MCSPI_CHCONF_FORCE;
271 
272 		mcspi_write_chconf0(spi, l);
273 
274 		pm_runtime_mark_last_busy(mcspi->dev);
275 		pm_runtime_put_autosuspend(mcspi->dev);
276 	}
277 }
278 
279 static void omap2_mcspi_set_master_mode(struct spi_master *master)
280 {
281 	struct omap2_mcspi	*mcspi = spi_master_get_devdata(master);
282 	struct omap2_mcspi_regs	*ctx = &mcspi->ctx;
283 	u32 l;
284 
285 	/*
286 	 * Setup when switching from (reset default) slave mode
287 	 * to single-channel master mode
288 	 */
289 	l = mcspi_read_reg(master, OMAP2_MCSPI_MODULCTRL);
290 	l &= ~(OMAP2_MCSPI_MODULCTRL_STEST | OMAP2_MCSPI_MODULCTRL_MS);
291 	l |= OMAP2_MCSPI_MODULCTRL_SINGLE;
292 	mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, l);
293 
294 	ctx->modulctrl = l;
295 }
296 
297 static void omap2_mcspi_set_fifo(const struct spi_device *spi,
298 				struct spi_transfer *t, int enable)
299 {
300 	struct spi_master *master = spi->master;
301 	struct omap2_mcspi_cs *cs = spi->controller_state;
302 	struct omap2_mcspi *mcspi;
303 	unsigned int wcnt;
304 	int max_fifo_depth, fifo_depth, bytes_per_word;
305 	u32 chconf, xferlevel;
306 
307 	mcspi = spi_master_get_devdata(master);
308 
309 	chconf = mcspi_cached_chconf0(spi);
310 	if (enable) {
311 		bytes_per_word = mcspi_bytes_per_word(cs->word_len);
312 		if (t->len % bytes_per_word != 0)
313 			goto disable_fifo;
314 
315 		if (t->rx_buf != NULL && t->tx_buf != NULL)
316 			max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH / 2;
317 		else
318 			max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH;
319 
320 		fifo_depth = gcd(t->len, max_fifo_depth);
321 		if (fifo_depth < 2 || fifo_depth % bytes_per_word != 0)
322 			goto disable_fifo;
323 
324 		wcnt = t->len / bytes_per_word;
325 		if (wcnt > OMAP2_MCSPI_MAX_FIFOWCNT)
326 			goto disable_fifo;
327 
328 		xferlevel = wcnt << 16;
329 		if (t->rx_buf != NULL) {
330 			chconf |= OMAP2_MCSPI_CHCONF_FFER;
331 			xferlevel |= (fifo_depth - 1) << 8;
332 		}
333 		if (t->tx_buf != NULL) {
334 			chconf |= OMAP2_MCSPI_CHCONF_FFET;
335 			xferlevel |= fifo_depth - 1;
336 		}
337 
338 		mcspi_write_reg(master, OMAP2_MCSPI_XFERLEVEL, xferlevel);
339 		mcspi_write_chconf0(spi, chconf);
340 		mcspi->fifo_depth = fifo_depth;
341 
342 		return;
343 	}
344 
345 disable_fifo:
346 	if (t->rx_buf != NULL)
347 		chconf &= ~OMAP2_MCSPI_CHCONF_FFER;
348 
349 	if (t->tx_buf != NULL)
350 		chconf &= ~OMAP2_MCSPI_CHCONF_FFET;
351 
352 	mcspi_write_chconf0(spi, chconf);
353 	mcspi->fifo_depth = 0;
354 }
355 
356 static void omap2_mcspi_restore_ctx(struct omap2_mcspi *mcspi)
357 {
358 	struct spi_master	*spi_cntrl = mcspi->master;
359 	struct omap2_mcspi_regs	*ctx = &mcspi->ctx;
360 	struct omap2_mcspi_cs	*cs;
361 
362 	/* McSPI: context restore */
363 	mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_MODULCTRL, ctx->modulctrl);
364 	mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_WAKEUPENABLE, ctx->wakeupenable);
365 
366 	list_for_each_entry(cs, &ctx->cs, node)
367 		writel_relaxed(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
368 }
369 
370 static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
371 {
372 	unsigned long timeout;
373 
374 	timeout = jiffies + msecs_to_jiffies(1000);
375 	while (!(readl_relaxed(reg) & bit)) {
376 		if (time_after(jiffies, timeout)) {
377 			if (!(readl_relaxed(reg) & bit))
378 				return -ETIMEDOUT;
379 			else
380 				return 0;
381 		}
382 		cpu_relax();
383 	}
384 	return 0;
385 }
386 
387 static void omap2_mcspi_rx_callback(void *data)
388 {
389 	struct spi_device *spi = data;
390 	struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
391 	struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select];
392 
393 	/* We must disable the DMA RX request */
394 	omap2_mcspi_set_dma_req(spi, 1, 0);
395 
396 	complete(&mcspi_dma->dma_rx_completion);
397 }
398 
399 static void omap2_mcspi_tx_callback(void *data)
400 {
401 	struct spi_device *spi = data;
402 	struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
403 	struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select];
404 
405 	/* We must disable the DMA TX request */
406 	omap2_mcspi_set_dma_req(spi, 0, 0);
407 
408 	complete(&mcspi_dma->dma_tx_completion);
409 }
410 
411 static void omap2_mcspi_tx_dma(struct spi_device *spi,
412 				struct spi_transfer *xfer,
413 				struct dma_slave_config cfg)
414 {
415 	struct omap2_mcspi	*mcspi;
416 	struct omap2_mcspi_dma  *mcspi_dma;
417 	unsigned int		count;
418 
419 	mcspi = spi_master_get_devdata(spi->master);
420 	mcspi_dma = &mcspi->dma_channels[spi->chip_select];
421 	count = xfer->len;
422 
423 	if (mcspi_dma->dma_tx) {
424 		struct dma_async_tx_descriptor *tx;
425 		struct scatterlist sg;
426 
427 		dmaengine_slave_config(mcspi_dma->dma_tx, &cfg);
428 
429 		sg_init_table(&sg, 1);
430 		sg_dma_address(&sg) = xfer->tx_dma;
431 		sg_dma_len(&sg) = xfer->len;
432 
433 		tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, &sg, 1,
434 		DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
435 		if (tx) {
436 			tx->callback = omap2_mcspi_tx_callback;
437 			tx->callback_param = spi;
438 			dmaengine_submit(tx);
439 		} else {
440 			/* FIXME: fall back to PIO? */
441 		}
442 	}
443 	dma_async_issue_pending(mcspi_dma->dma_tx);
444 	omap2_mcspi_set_dma_req(spi, 0, 1);
445 
446 }
447 
448 static unsigned
449 omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
450 				struct dma_slave_config cfg,
451 				unsigned es)
452 {
453 	struct omap2_mcspi	*mcspi;
454 	struct omap2_mcspi_dma  *mcspi_dma;
455 	unsigned int		count, dma_count;
456 	u32			l;
457 	int			elements = 0;
458 	int			word_len, element_count;
459 	struct omap2_mcspi_cs	*cs = spi->controller_state;
460 	mcspi = spi_master_get_devdata(spi->master);
461 	mcspi_dma = &mcspi->dma_channels[spi->chip_select];
462 	count = xfer->len;
463 	dma_count = xfer->len;
464 
465 	if (mcspi->fifo_depth == 0)
466 		dma_count -= es;
467 
468 	word_len = cs->word_len;
469 	l = mcspi_cached_chconf0(spi);
470 
471 	if (word_len <= 8)
472 		element_count = count;
473 	else if (word_len <= 16)
474 		element_count = count >> 1;
475 	else /* word_len <= 32 */
476 		element_count = count >> 2;
477 
478 	if (mcspi_dma->dma_rx) {
479 		struct dma_async_tx_descriptor *tx;
480 		struct scatterlist sg;
481 
482 		dmaengine_slave_config(mcspi_dma->dma_rx, &cfg);
483 
484 		if ((l & OMAP2_MCSPI_CHCONF_TURBO) && mcspi->fifo_depth == 0)
485 			dma_count -= es;
486 
487 		sg_init_table(&sg, 1);
488 		sg_dma_address(&sg) = xfer->rx_dma;
489 		sg_dma_len(&sg) = dma_count;
490 
491 		tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, &sg, 1,
492 				DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT |
493 				DMA_CTRL_ACK);
494 		if (tx) {
495 			tx->callback = omap2_mcspi_rx_callback;
496 			tx->callback_param = spi;
497 			dmaengine_submit(tx);
498 		} else {
499 				/* FIXME: fall back to PIO? */
500 		}
501 	}
502 
503 	dma_async_issue_pending(mcspi_dma->dma_rx);
504 	omap2_mcspi_set_dma_req(spi, 1, 1);
505 
506 	wait_for_completion(&mcspi_dma->dma_rx_completion);
507 	dma_unmap_single(mcspi->dev, xfer->rx_dma, count,
508 			 DMA_FROM_DEVICE);
509 
510 	if (mcspi->fifo_depth > 0)
511 		return count;
512 
513 	omap2_mcspi_set_enable(spi, 0);
514 
515 	elements = element_count - 1;
516 
517 	if (l & OMAP2_MCSPI_CHCONF_TURBO) {
518 		elements--;
519 
520 		if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0)
521 				   & OMAP2_MCSPI_CHSTAT_RXS)) {
522 			u32 w;
523 
524 			w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
525 			if (word_len <= 8)
526 				((u8 *)xfer->rx_buf)[elements++] = w;
527 			else if (word_len <= 16)
528 				((u16 *)xfer->rx_buf)[elements++] = w;
529 			else /* word_len <= 32 */
530 				((u32 *)xfer->rx_buf)[elements++] = w;
531 		} else {
532 			int bytes_per_word = mcspi_bytes_per_word(word_len);
533 			dev_err(&spi->dev, "DMA RX penultimate word empty\n");
534 			count -= (bytes_per_word << 1);
535 			omap2_mcspi_set_enable(spi, 1);
536 			return count;
537 		}
538 	}
539 	if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0)
540 				& OMAP2_MCSPI_CHSTAT_RXS)) {
541 		u32 w;
542 
543 		w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
544 		if (word_len <= 8)
545 			((u8 *)xfer->rx_buf)[elements] = w;
546 		else if (word_len <= 16)
547 			((u16 *)xfer->rx_buf)[elements] = w;
548 		else /* word_len <= 32 */
549 			((u32 *)xfer->rx_buf)[elements] = w;
550 	} else {
551 		dev_err(&spi->dev, "DMA RX last word empty\n");
552 		count -= mcspi_bytes_per_word(word_len);
553 	}
554 	omap2_mcspi_set_enable(spi, 1);
555 	return count;
556 }
557 
558 static unsigned
559 omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
560 {
561 	struct omap2_mcspi	*mcspi;
562 	struct omap2_mcspi_cs	*cs = spi->controller_state;
563 	struct omap2_mcspi_dma  *mcspi_dma;
564 	unsigned int		count;
565 	u32			l;
566 	u8			*rx;
567 	const u8		*tx;
568 	struct dma_slave_config	cfg;
569 	enum dma_slave_buswidth width;
570 	unsigned es;
571 	u32			burst;
572 	void __iomem		*chstat_reg;
573 	void __iomem            *irqstat_reg;
574 	int			wait_res;
575 
576 	mcspi = spi_master_get_devdata(spi->master);
577 	mcspi_dma = &mcspi->dma_channels[spi->chip_select];
578 	l = mcspi_cached_chconf0(spi);
579 
580 
581 	if (cs->word_len <= 8) {
582 		width = DMA_SLAVE_BUSWIDTH_1_BYTE;
583 		es = 1;
584 	} else if (cs->word_len <= 16) {
585 		width = DMA_SLAVE_BUSWIDTH_2_BYTES;
586 		es = 2;
587 	} else {
588 		width = DMA_SLAVE_BUSWIDTH_4_BYTES;
589 		es = 4;
590 	}
591 
592 	count = xfer->len;
593 	burst = 1;
594 
595 	if (mcspi->fifo_depth > 0) {
596 		if (count > mcspi->fifo_depth)
597 			burst = mcspi->fifo_depth / es;
598 		else
599 			burst = count / es;
600 	}
601 
602 	memset(&cfg, 0, sizeof(cfg));
603 	cfg.src_addr = cs->phys + OMAP2_MCSPI_RX0;
604 	cfg.dst_addr = cs->phys + OMAP2_MCSPI_TX0;
605 	cfg.src_addr_width = width;
606 	cfg.dst_addr_width = width;
607 	cfg.src_maxburst = burst;
608 	cfg.dst_maxburst = burst;
609 
610 	rx = xfer->rx_buf;
611 	tx = xfer->tx_buf;
612 
613 	if (tx != NULL)
614 		omap2_mcspi_tx_dma(spi, xfer, cfg);
615 
616 	if (rx != NULL)
617 		count = omap2_mcspi_rx_dma(spi, xfer, cfg, es);
618 
619 	if (tx != NULL) {
620 		wait_for_completion(&mcspi_dma->dma_tx_completion);
621 		dma_unmap_single(mcspi->dev, xfer->tx_dma, xfer->len,
622 				 DMA_TO_DEVICE);
623 
624 		if (mcspi->fifo_depth > 0) {
625 			irqstat_reg = mcspi->base + OMAP2_MCSPI_IRQSTATUS;
626 
627 			if (mcspi_wait_for_reg_bit(irqstat_reg,
628 						OMAP2_MCSPI_IRQSTATUS_EOW) < 0)
629 				dev_err(&spi->dev, "EOW timed out\n");
630 
631 			mcspi_write_reg(mcspi->master, OMAP2_MCSPI_IRQSTATUS,
632 					OMAP2_MCSPI_IRQSTATUS_EOW);
633 		}
634 
635 		/* for TX_ONLY mode, be sure all words have shifted out */
636 		if (rx == NULL) {
637 			chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
638 			if (mcspi->fifo_depth > 0) {
639 				wait_res = mcspi_wait_for_reg_bit(chstat_reg,
640 						OMAP2_MCSPI_CHSTAT_TXFFE);
641 				if (wait_res < 0)
642 					dev_err(&spi->dev, "TXFFE timed out\n");
643 			} else {
644 				wait_res = mcspi_wait_for_reg_bit(chstat_reg,
645 						OMAP2_MCSPI_CHSTAT_TXS);
646 				if (wait_res < 0)
647 					dev_err(&spi->dev, "TXS timed out\n");
648 			}
649 			if (wait_res >= 0 &&
650 				(mcspi_wait_for_reg_bit(chstat_reg,
651 					OMAP2_MCSPI_CHSTAT_EOT) < 0))
652 				dev_err(&spi->dev, "EOT timed out\n");
653 		}
654 	}
655 	return count;
656 }
657 
658 static unsigned
659 omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
660 {
661 	struct omap2_mcspi	*mcspi;
662 	struct omap2_mcspi_cs	*cs = spi->controller_state;
663 	unsigned int		count, c;
664 	u32			l;
665 	void __iomem		*base = cs->base;
666 	void __iomem		*tx_reg;
667 	void __iomem		*rx_reg;
668 	void __iomem		*chstat_reg;
669 	int			word_len;
670 
671 	mcspi = spi_master_get_devdata(spi->master);
672 	count = xfer->len;
673 	c = count;
674 	word_len = cs->word_len;
675 
676 	l = mcspi_cached_chconf0(spi);
677 
678 	/* We store the pre-calculated register addresses on stack to speed
679 	 * up the transfer loop. */
680 	tx_reg		= base + OMAP2_MCSPI_TX0;
681 	rx_reg		= base + OMAP2_MCSPI_RX0;
682 	chstat_reg	= base + OMAP2_MCSPI_CHSTAT0;
683 
684 	if (c < (word_len>>3))
685 		return 0;
686 
687 	if (word_len <= 8) {
688 		u8		*rx;
689 		const u8	*tx;
690 
691 		rx = xfer->rx_buf;
692 		tx = xfer->tx_buf;
693 
694 		do {
695 			c -= 1;
696 			if (tx != NULL) {
697 				if (mcspi_wait_for_reg_bit(chstat_reg,
698 						OMAP2_MCSPI_CHSTAT_TXS) < 0) {
699 					dev_err(&spi->dev, "TXS timed out\n");
700 					goto out;
701 				}
702 				dev_vdbg(&spi->dev, "write-%d %02x\n",
703 						word_len, *tx);
704 				writel_relaxed(*tx++, tx_reg);
705 			}
706 			if (rx != NULL) {
707 				if (mcspi_wait_for_reg_bit(chstat_reg,
708 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
709 					dev_err(&spi->dev, "RXS timed out\n");
710 					goto out;
711 				}
712 
713 				if (c == 1 && tx == NULL &&
714 				    (l & OMAP2_MCSPI_CHCONF_TURBO)) {
715 					omap2_mcspi_set_enable(spi, 0);
716 					*rx++ = readl_relaxed(rx_reg);
717 					dev_vdbg(&spi->dev, "read-%d %02x\n",
718 						    word_len, *(rx - 1));
719 					if (mcspi_wait_for_reg_bit(chstat_reg,
720 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
721 						dev_err(&spi->dev,
722 							"RXS timed out\n");
723 						goto out;
724 					}
725 					c = 0;
726 				} else if (c == 0 && tx == NULL) {
727 					omap2_mcspi_set_enable(spi, 0);
728 				}
729 
730 				*rx++ = readl_relaxed(rx_reg);
731 				dev_vdbg(&spi->dev, "read-%d %02x\n",
732 						word_len, *(rx - 1));
733 			}
734 		} while (c);
735 	} else if (word_len <= 16) {
736 		u16		*rx;
737 		const u16	*tx;
738 
739 		rx = xfer->rx_buf;
740 		tx = xfer->tx_buf;
741 		do {
742 			c -= 2;
743 			if (tx != NULL) {
744 				if (mcspi_wait_for_reg_bit(chstat_reg,
745 						OMAP2_MCSPI_CHSTAT_TXS) < 0) {
746 					dev_err(&spi->dev, "TXS timed out\n");
747 					goto out;
748 				}
749 				dev_vdbg(&spi->dev, "write-%d %04x\n",
750 						word_len, *tx);
751 				writel_relaxed(*tx++, tx_reg);
752 			}
753 			if (rx != NULL) {
754 				if (mcspi_wait_for_reg_bit(chstat_reg,
755 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
756 					dev_err(&spi->dev, "RXS timed out\n");
757 					goto out;
758 				}
759 
760 				if (c == 2 && tx == NULL &&
761 				    (l & OMAP2_MCSPI_CHCONF_TURBO)) {
762 					omap2_mcspi_set_enable(spi, 0);
763 					*rx++ = readl_relaxed(rx_reg);
764 					dev_vdbg(&spi->dev, "read-%d %04x\n",
765 						    word_len, *(rx - 1));
766 					if (mcspi_wait_for_reg_bit(chstat_reg,
767 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
768 						dev_err(&spi->dev,
769 							"RXS timed out\n");
770 						goto out;
771 					}
772 					c = 0;
773 				} else if (c == 0 && tx == NULL) {
774 					omap2_mcspi_set_enable(spi, 0);
775 				}
776 
777 				*rx++ = readl_relaxed(rx_reg);
778 				dev_vdbg(&spi->dev, "read-%d %04x\n",
779 						word_len, *(rx - 1));
780 			}
781 		} while (c >= 2);
782 	} else if (word_len <= 32) {
783 		u32		*rx;
784 		const u32	*tx;
785 
786 		rx = xfer->rx_buf;
787 		tx = xfer->tx_buf;
788 		do {
789 			c -= 4;
790 			if (tx != NULL) {
791 				if (mcspi_wait_for_reg_bit(chstat_reg,
792 						OMAP2_MCSPI_CHSTAT_TXS) < 0) {
793 					dev_err(&spi->dev, "TXS timed out\n");
794 					goto out;
795 				}
796 				dev_vdbg(&spi->dev, "write-%d %08x\n",
797 						word_len, *tx);
798 				writel_relaxed(*tx++, tx_reg);
799 			}
800 			if (rx != NULL) {
801 				if (mcspi_wait_for_reg_bit(chstat_reg,
802 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
803 					dev_err(&spi->dev, "RXS timed out\n");
804 					goto out;
805 				}
806 
807 				if (c == 4 && tx == NULL &&
808 				    (l & OMAP2_MCSPI_CHCONF_TURBO)) {
809 					omap2_mcspi_set_enable(spi, 0);
810 					*rx++ = readl_relaxed(rx_reg);
811 					dev_vdbg(&spi->dev, "read-%d %08x\n",
812 						    word_len, *(rx - 1));
813 					if (mcspi_wait_for_reg_bit(chstat_reg,
814 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
815 						dev_err(&spi->dev,
816 							"RXS timed out\n");
817 						goto out;
818 					}
819 					c = 0;
820 				} else if (c == 0 && tx == NULL) {
821 					omap2_mcspi_set_enable(spi, 0);
822 				}
823 
824 				*rx++ = readl_relaxed(rx_reg);
825 				dev_vdbg(&spi->dev, "read-%d %08x\n",
826 						word_len, *(rx - 1));
827 			}
828 		} while (c >= 4);
829 	}
830 
831 	/* for TX_ONLY mode, be sure all words have shifted out */
832 	if (xfer->rx_buf == NULL) {
833 		if (mcspi_wait_for_reg_bit(chstat_reg,
834 				OMAP2_MCSPI_CHSTAT_TXS) < 0) {
835 			dev_err(&spi->dev, "TXS timed out\n");
836 		} else if (mcspi_wait_for_reg_bit(chstat_reg,
837 				OMAP2_MCSPI_CHSTAT_EOT) < 0)
838 			dev_err(&spi->dev, "EOT timed out\n");
839 
840 		/* disable chan to purge rx datas received in TX_ONLY transfer,
841 		 * otherwise these rx datas will affect the direct following
842 		 * RX_ONLY transfer.
843 		 */
844 		omap2_mcspi_set_enable(spi, 0);
845 	}
846 out:
847 	omap2_mcspi_set_enable(spi, 1);
848 	return count - c;
849 }
850 
851 static u32 omap2_mcspi_calc_divisor(u32 speed_hz)
852 {
853 	u32 div;
854 
855 	for (div = 0; div < 15; div++)
856 		if (speed_hz >= (OMAP2_MCSPI_MAX_FREQ >> div))
857 			return div;
858 
859 	return 15;
860 }
861 
862 /* called only when no transfer is active to this device */
863 static int omap2_mcspi_setup_transfer(struct spi_device *spi,
864 		struct spi_transfer *t)
865 {
866 	struct omap2_mcspi_cs *cs = spi->controller_state;
867 	struct omap2_mcspi *mcspi;
868 	struct spi_master *spi_cntrl;
869 	u32 l = 0, clkd = 0, div, extclk = 0, clkg = 0;
870 	u8 word_len = spi->bits_per_word;
871 	u32 speed_hz = spi->max_speed_hz;
872 
873 	mcspi = spi_master_get_devdata(spi->master);
874 	spi_cntrl = mcspi->master;
875 
876 	if (t != NULL && t->bits_per_word)
877 		word_len = t->bits_per_word;
878 
879 	cs->word_len = word_len;
880 
881 	if (t && t->speed_hz)
882 		speed_hz = t->speed_hz;
883 
884 	speed_hz = min_t(u32, speed_hz, OMAP2_MCSPI_MAX_FREQ);
885 	if (speed_hz < (OMAP2_MCSPI_MAX_FREQ / OMAP2_MCSPI_MAX_DIVIDER)) {
886 		clkd = omap2_mcspi_calc_divisor(speed_hz);
887 		speed_hz = OMAP2_MCSPI_MAX_FREQ >> clkd;
888 		clkg = 0;
889 	} else {
890 		div = (OMAP2_MCSPI_MAX_FREQ + speed_hz - 1) / speed_hz;
891 		speed_hz = OMAP2_MCSPI_MAX_FREQ / div;
892 		clkd = (div - 1) & 0xf;
893 		extclk = (div - 1) >> 4;
894 		clkg = OMAP2_MCSPI_CHCONF_CLKG;
895 	}
896 
897 	l = mcspi_cached_chconf0(spi);
898 
899 	/* standard 4-wire master mode:  SCK, MOSI/out, MISO/in, nCS
900 	 * REVISIT: this controller could support SPI_3WIRE mode.
901 	 */
902 	if (mcspi->pin_dir == MCSPI_PINDIR_D0_IN_D1_OUT) {
903 		l &= ~OMAP2_MCSPI_CHCONF_IS;
904 		l &= ~OMAP2_MCSPI_CHCONF_DPE1;
905 		l |= OMAP2_MCSPI_CHCONF_DPE0;
906 	} else {
907 		l |= OMAP2_MCSPI_CHCONF_IS;
908 		l |= OMAP2_MCSPI_CHCONF_DPE1;
909 		l &= ~OMAP2_MCSPI_CHCONF_DPE0;
910 	}
911 
912 	/* wordlength */
913 	l &= ~OMAP2_MCSPI_CHCONF_WL_MASK;
914 	l |= (word_len - 1) << 7;
915 
916 	/* set chipselect polarity; manage with FORCE */
917 	if (!(spi->mode & SPI_CS_HIGH))
918 		l |= OMAP2_MCSPI_CHCONF_EPOL;	/* active-low; normal */
919 	else
920 		l &= ~OMAP2_MCSPI_CHCONF_EPOL;
921 
922 	/* set clock divisor */
923 	l &= ~OMAP2_MCSPI_CHCONF_CLKD_MASK;
924 	l |= clkd << 2;
925 
926 	/* set clock granularity */
927 	l &= ~OMAP2_MCSPI_CHCONF_CLKG;
928 	l |= clkg;
929 	if (clkg) {
930 		cs->chctrl0 &= ~OMAP2_MCSPI_CHCTRL_EXTCLK_MASK;
931 		cs->chctrl0 |= extclk << 8;
932 		mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, cs->chctrl0);
933 	}
934 
935 	/* set SPI mode 0..3 */
936 	if (spi->mode & SPI_CPOL)
937 		l |= OMAP2_MCSPI_CHCONF_POL;
938 	else
939 		l &= ~OMAP2_MCSPI_CHCONF_POL;
940 	if (spi->mode & SPI_CPHA)
941 		l |= OMAP2_MCSPI_CHCONF_PHA;
942 	else
943 		l &= ~OMAP2_MCSPI_CHCONF_PHA;
944 
945 	mcspi_write_chconf0(spi, l);
946 
947 	cs->mode = spi->mode;
948 
949 	dev_dbg(&spi->dev, "setup: speed %d, sample %s edge, clk %s\n",
950 			speed_hz,
951 			(spi->mode & SPI_CPHA) ? "trailing" : "leading",
952 			(spi->mode & SPI_CPOL) ? "inverted" : "normal");
953 
954 	return 0;
955 }
956 
957 /*
958  * Note that we currently allow DMA only if we get a channel
959  * for both rx and tx. Otherwise we'll do PIO for both rx and tx.
960  */
961 static int omap2_mcspi_request_dma(struct spi_device *spi)
962 {
963 	struct spi_master	*master = spi->master;
964 	struct omap2_mcspi	*mcspi;
965 	struct omap2_mcspi_dma	*mcspi_dma;
966 	dma_cap_mask_t mask;
967 	unsigned sig;
968 
969 	mcspi = spi_master_get_devdata(master);
970 	mcspi_dma = mcspi->dma_channels + spi->chip_select;
971 
972 	init_completion(&mcspi_dma->dma_rx_completion);
973 	init_completion(&mcspi_dma->dma_tx_completion);
974 
975 	dma_cap_zero(mask);
976 	dma_cap_set(DMA_SLAVE, mask);
977 	sig = mcspi_dma->dma_rx_sync_dev;
978 
979 	mcspi_dma->dma_rx =
980 		dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
981 						 &sig, &master->dev,
982 						 mcspi_dma->dma_rx_ch_name);
983 	if (!mcspi_dma->dma_rx)
984 		goto no_dma;
985 
986 	sig = mcspi_dma->dma_tx_sync_dev;
987 	mcspi_dma->dma_tx =
988 		dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
989 						 &sig, &master->dev,
990 						 mcspi_dma->dma_tx_ch_name);
991 
992 	if (!mcspi_dma->dma_tx) {
993 		dma_release_channel(mcspi_dma->dma_rx);
994 		mcspi_dma->dma_rx = NULL;
995 		goto no_dma;
996 	}
997 
998 	return 0;
999 
1000 no_dma:
1001 	dev_warn(&spi->dev, "not using DMA for McSPI\n");
1002 	return -EAGAIN;
1003 }
1004 
1005 static int omap2_mcspi_setup(struct spi_device *spi)
1006 {
1007 	int			ret;
1008 	struct omap2_mcspi	*mcspi = spi_master_get_devdata(spi->master);
1009 	struct omap2_mcspi_regs	*ctx = &mcspi->ctx;
1010 	struct omap2_mcspi_dma	*mcspi_dma;
1011 	struct omap2_mcspi_cs	*cs = spi->controller_state;
1012 
1013 	mcspi_dma = &mcspi->dma_channels[spi->chip_select];
1014 
1015 	if (!cs) {
1016 		cs = kzalloc(sizeof *cs, GFP_KERNEL);
1017 		if (!cs)
1018 			return -ENOMEM;
1019 		cs->base = mcspi->base + spi->chip_select * 0x14;
1020 		cs->phys = mcspi->phys + spi->chip_select * 0x14;
1021 		cs->mode = 0;
1022 		cs->chconf0 = 0;
1023 		cs->chctrl0 = 0;
1024 		spi->controller_state = cs;
1025 		/* Link this to context save list */
1026 		list_add_tail(&cs->node, &ctx->cs);
1027 	}
1028 
1029 	if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx) {
1030 		ret = omap2_mcspi_request_dma(spi);
1031 		if (ret < 0 && ret != -EAGAIN)
1032 			return ret;
1033 	}
1034 
1035 	if (gpio_is_valid(spi->cs_gpio)) {
1036 		ret = gpio_request(spi->cs_gpio, dev_name(&spi->dev));
1037 		if (ret) {
1038 			dev_err(&spi->dev, "failed to request gpio\n");
1039 			return ret;
1040 		}
1041 		gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
1042 	}
1043 
1044 	ret = pm_runtime_get_sync(mcspi->dev);
1045 	if (ret < 0)
1046 		return ret;
1047 
1048 	ret = omap2_mcspi_setup_transfer(spi, NULL);
1049 	pm_runtime_mark_last_busy(mcspi->dev);
1050 	pm_runtime_put_autosuspend(mcspi->dev);
1051 
1052 	return ret;
1053 }
1054 
1055 static void omap2_mcspi_cleanup(struct spi_device *spi)
1056 {
1057 	struct omap2_mcspi	*mcspi;
1058 	struct omap2_mcspi_dma	*mcspi_dma;
1059 	struct omap2_mcspi_cs	*cs;
1060 
1061 	mcspi = spi_master_get_devdata(spi->master);
1062 
1063 	if (spi->controller_state) {
1064 		/* Unlink controller state from context save list */
1065 		cs = spi->controller_state;
1066 		list_del(&cs->node);
1067 
1068 		kfree(cs);
1069 	}
1070 
1071 	if (spi->chip_select < spi->master->num_chipselect) {
1072 		mcspi_dma = &mcspi->dma_channels[spi->chip_select];
1073 
1074 		if (mcspi_dma->dma_rx) {
1075 			dma_release_channel(mcspi_dma->dma_rx);
1076 			mcspi_dma->dma_rx = NULL;
1077 		}
1078 		if (mcspi_dma->dma_tx) {
1079 			dma_release_channel(mcspi_dma->dma_tx);
1080 			mcspi_dma->dma_tx = NULL;
1081 		}
1082 	}
1083 
1084 	if (gpio_is_valid(spi->cs_gpio))
1085 		gpio_free(spi->cs_gpio);
1086 }
1087 
1088 static int omap2_mcspi_work_one(struct omap2_mcspi *mcspi,
1089 		struct spi_device *spi, struct spi_transfer *t)
1090 {
1091 
1092 	/* We only enable one channel at a time -- the one whose message is
1093 	 * -- although this controller would gladly
1094 	 * arbitrate among multiple channels.  This corresponds to "single
1095 	 * channel" master mode.  As a side effect, we need to manage the
1096 	 * chipselect with the FORCE bit ... CS != channel enable.
1097 	 */
1098 
1099 	struct spi_master		*master;
1100 	struct omap2_mcspi_dma		*mcspi_dma;
1101 	struct omap2_mcspi_cs		*cs;
1102 	struct omap2_mcspi_device_config *cd;
1103 	int				par_override = 0;
1104 	int				status = 0;
1105 	u32				chconf;
1106 
1107 	master = spi->master;
1108 	mcspi_dma = mcspi->dma_channels + spi->chip_select;
1109 	cs = spi->controller_state;
1110 	cd = spi->controller_data;
1111 
1112 	/*
1113 	 * The slave driver could have changed spi->mode in which case
1114 	 * it will be different from cs->mode (the current hardware setup).
1115 	 * If so, set par_override (even though its not a parity issue) so
1116 	 * omap2_mcspi_setup_transfer will be called to configure the hardware
1117 	 * with the correct mode on the first iteration of the loop below.
1118 	 */
1119 	if (spi->mode != cs->mode)
1120 		par_override = 1;
1121 
1122 	omap2_mcspi_set_enable(spi, 0);
1123 
1124 	if (gpio_is_valid(spi->cs_gpio))
1125 		omap2_mcspi_set_cs(spi, spi->mode & SPI_CS_HIGH);
1126 
1127 	if (par_override ||
1128 	    (t->speed_hz != spi->max_speed_hz) ||
1129 	    (t->bits_per_word != spi->bits_per_word)) {
1130 		par_override = 1;
1131 		status = omap2_mcspi_setup_transfer(spi, t);
1132 		if (status < 0)
1133 			goto out;
1134 		if (t->speed_hz == spi->max_speed_hz &&
1135 		    t->bits_per_word == spi->bits_per_word)
1136 			par_override = 0;
1137 	}
1138 	if (cd && cd->cs_per_word) {
1139 		chconf = mcspi->ctx.modulctrl;
1140 		chconf &= ~OMAP2_MCSPI_MODULCTRL_SINGLE;
1141 		mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, chconf);
1142 		mcspi->ctx.modulctrl =
1143 			mcspi_read_cs_reg(spi, OMAP2_MCSPI_MODULCTRL);
1144 	}
1145 
1146 	chconf = mcspi_cached_chconf0(spi);
1147 	chconf &= ~OMAP2_MCSPI_CHCONF_TRM_MASK;
1148 	chconf &= ~OMAP2_MCSPI_CHCONF_TURBO;
1149 
1150 	if (t->tx_buf == NULL)
1151 		chconf |= OMAP2_MCSPI_CHCONF_TRM_RX_ONLY;
1152 	else if (t->rx_buf == NULL)
1153 		chconf |= OMAP2_MCSPI_CHCONF_TRM_TX_ONLY;
1154 
1155 	if (cd && cd->turbo_mode && t->tx_buf == NULL) {
1156 		/* Turbo mode is for more than one word */
1157 		if (t->len > ((cs->word_len + 7) >> 3))
1158 			chconf |= OMAP2_MCSPI_CHCONF_TURBO;
1159 	}
1160 
1161 	mcspi_write_chconf0(spi, chconf);
1162 
1163 	if (t->len) {
1164 		unsigned	count;
1165 
1166 		if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) &&
1167 		    (t->len >= DMA_MIN_BYTES))
1168 			omap2_mcspi_set_fifo(spi, t, 1);
1169 
1170 		omap2_mcspi_set_enable(spi, 1);
1171 
1172 		/* RX_ONLY mode needs dummy data in TX reg */
1173 		if (t->tx_buf == NULL)
1174 			writel_relaxed(0, cs->base
1175 					+ OMAP2_MCSPI_TX0);
1176 
1177 		if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) &&
1178 		    (t->len >= DMA_MIN_BYTES))
1179 			count = omap2_mcspi_txrx_dma(spi, t);
1180 		else
1181 			count = omap2_mcspi_txrx_pio(spi, t);
1182 
1183 		if (count != t->len) {
1184 			status = -EIO;
1185 			goto out;
1186 		}
1187 	}
1188 
1189 	omap2_mcspi_set_enable(spi, 0);
1190 
1191 	if (mcspi->fifo_depth > 0)
1192 		omap2_mcspi_set_fifo(spi, t, 0);
1193 
1194 out:
1195 	/* Restore defaults if they were overriden */
1196 	if (par_override) {
1197 		par_override = 0;
1198 		status = omap2_mcspi_setup_transfer(spi, NULL);
1199 	}
1200 
1201 	if (cd && cd->cs_per_word) {
1202 		chconf = mcspi->ctx.modulctrl;
1203 		chconf |= OMAP2_MCSPI_MODULCTRL_SINGLE;
1204 		mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, chconf);
1205 		mcspi->ctx.modulctrl =
1206 			mcspi_read_cs_reg(spi, OMAP2_MCSPI_MODULCTRL);
1207 	}
1208 
1209 	omap2_mcspi_set_enable(spi, 0);
1210 
1211 	if (gpio_is_valid(spi->cs_gpio))
1212 		omap2_mcspi_set_cs(spi, !(spi->mode & SPI_CS_HIGH));
1213 
1214 	if (mcspi->fifo_depth > 0 && t)
1215 		omap2_mcspi_set_fifo(spi, t, 0);
1216 
1217 	return status;
1218 }
1219 
1220 static int omap2_mcspi_prepare_message(struct spi_master *master,
1221 				       struct spi_message *msg)
1222 {
1223 	struct omap2_mcspi	*mcspi = spi_master_get_devdata(master);
1224 	struct omap2_mcspi_regs	*ctx = &mcspi->ctx;
1225 	struct omap2_mcspi_cs	*cs;
1226 
1227 	/* Only a single channel can have the FORCE bit enabled
1228 	 * in its chconf0 register.
1229 	 * Scan all channels and disable them except the current one.
1230 	 * A FORCE can remain from a last transfer having cs_change enabled
1231 	 */
1232 	list_for_each_entry(cs, &ctx->cs, node) {
1233 		if (msg->spi->controller_state == cs)
1234 			continue;
1235 
1236 		if ((cs->chconf0 & OMAP2_MCSPI_CHCONF_FORCE)) {
1237 			cs->chconf0 &= ~OMAP2_MCSPI_CHCONF_FORCE;
1238 			writel_relaxed(cs->chconf0,
1239 					cs->base + OMAP2_MCSPI_CHCONF0);
1240 			readl_relaxed(cs->base + OMAP2_MCSPI_CHCONF0);
1241 		}
1242 	}
1243 
1244 	return 0;
1245 }
1246 
1247 static int omap2_mcspi_transfer_one(struct spi_master *master,
1248 		struct spi_device *spi, struct spi_transfer *t)
1249 {
1250 	struct omap2_mcspi	*mcspi;
1251 	struct omap2_mcspi_dma	*mcspi_dma;
1252 	const void	*tx_buf = t->tx_buf;
1253 	void		*rx_buf = t->rx_buf;
1254 	unsigned	len = t->len;
1255 
1256 	mcspi = spi_master_get_devdata(master);
1257 	mcspi_dma = mcspi->dma_channels + spi->chip_select;
1258 
1259 	if ((len && !(rx_buf || tx_buf))) {
1260 		dev_dbg(mcspi->dev, "transfer: %d Hz, %d %s%s, %d bpw\n",
1261 				t->speed_hz,
1262 				len,
1263 				tx_buf ? "tx" : "",
1264 				rx_buf ? "rx" : "",
1265 				t->bits_per_word);
1266 		return -EINVAL;
1267 	}
1268 
1269 	if (len < DMA_MIN_BYTES)
1270 		goto skip_dma_map;
1271 
1272 	if (mcspi_dma->dma_tx && tx_buf != NULL) {
1273 		t->tx_dma = dma_map_single(mcspi->dev, (void *) tx_buf,
1274 				len, DMA_TO_DEVICE);
1275 		if (dma_mapping_error(mcspi->dev, t->tx_dma)) {
1276 			dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
1277 					'T', len);
1278 			return -EINVAL;
1279 		}
1280 	}
1281 	if (mcspi_dma->dma_rx && rx_buf != NULL) {
1282 		t->rx_dma = dma_map_single(mcspi->dev, rx_buf, t->len,
1283 				DMA_FROM_DEVICE);
1284 		if (dma_mapping_error(mcspi->dev, t->rx_dma)) {
1285 			dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
1286 					'R', len);
1287 			if (tx_buf != NULL)
1288 				dma_unmap_single(mcspi->dev, t->tx_dma,
1289 						len, DMA_TO_DEVICE);
1290 			return -EINVAL;
1291 		}
1292 	}
1293 
1294 skip_dma_map:
1295 	return omap2_mcspi_work_one(mcspi, spi, t);
1296 }
1297 
1298 static int omap2_mcspi_master_setup(struct omap2_mcspi *mcspi)
1299 {
1300 	struct spi_master	*master = mcspi->master;
1301 	struct omap2_mcspi_regs	*ctx = &mcspi->ctx;
1302 	int			ret = 0;
1303 
1304 	ret = pm_runtime_get_sync(mcspi->dev);
1305 	if (ret < 0)
1306 		return ret;
1307 
1308 	mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE,
1309 			OMAP2_MCSPI_WAKEUPENABLE_WKEN);
1310 	ctx->wakeupenable = OMAP2_MCSPI_WAKEUPENABLE_WKEN;
1311 
1312 	omap2_mcspi_set_master_mode(master);
1313 	pm_runtime_mark_last_busy(mcspi->dev);
1314 	pm_runtime_put_autosuspend(mcspi->dev);
1315 	return 0;
1316 }
1317 
1318 static int omap_mcspi_runtime_resume(struct device *dev)
1319 {
1320 	struct omap2_mcspi	*mcspi;
1321 	struct spi_master	*master;
1322 
1323 	master = dev_get_drvdata(dev);
1324 	mcspi = spi_master_get_devdata(master);
1325 	omap2_mcspi_restore_ctx(mcspi);
1326 
1327 	return 0;
1328 }
1329 
1330 static struct omap2_mcspi_platform_config omap2_pdata = {
1331 	.regs_offset = 0,
1332 };
1333 
1334 static struct omap2_mcspi_platform_config omap4_pdata = {
1335 	.regs_offset = OMAP4_MCSPI_REG_OFFSET,
1336 };
1337 
1338 static const struct of_device_id omap_mcspi_of_match[] = {
1339 	{
1340 		.compatible = "ti,omap2-mcspi",
1341 		.data = &omap2_pdata,
1342 	},
1343 	{
1344 		.compatible = "ti,omap4-mcspi",
1345 		.data = &omap4_pdata,
1346 	},
1347 	{ },
1348 };
1349 MODULE_DEVICE_TABLE(of, omap_mcspi_of_match);
1350 
1351 static int omap2_mcspi_probe(struct platform_device *pdev)
1352 {
1353 	struct spi_master	*master;
1354 	const struct omap2_mcspi_platform_config *pdata;
1355 	struct omap2_mcspi	*mcspi;
1356 	struct resource		*r;
1357 	int			status = 0, i;
1358 	u32			regs_offset = 0;
1359 	static int		bus_num = 1;
1360 	struct device_node	*node = pdev->dev.of_node;
1361 	const struct of_device_id *match;
1362 
1363 	master = spi_alloc_master(&pdev->dev, sizeof *mcspi);
1364 	if (master == NULL) {
1365 		dev_dbg(&pdev->dev, "master allocation failed\n");
1366 		return -ENOMEM;
1367 	}
1368 
1369 	/* the spi->mode bits understood by this driver: */
1370 	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
1371 	master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
1372 	master->setup = omap2_mcspi_setup;
1373 	master->auto_runtime_pm = true;
1374 	master->prepare_message = omap2_mcspi_prepare_message;
1375 	master->transfer_one = omap2_mcspi_transfer_one;
1376 	master->set_cs = omap2_mcspi_set_cs;
1377 	master->cleanup = omap2_mcspi_cleanup;
1378 	master->dev.of_node = node;
1379 	master->max_speed_hz = OMAP2_MCSPI_MAX_FREQ;
1380 	master->min_speed_hz = OMAP2_MCSPI_MAX_FREQ >> 15;
1381 
1382 	platform_set_drvdata(pdev, master);
1383 
1384 	mcspi = spi_master_get_devdata(master);
1385 	mcspi->master = master;
1386 
1387 	match = of_match_device(omap_mcspi_of_match, &pdev->dev);
1388 	if (match) {
1389 		u32 num_cs = 1; /* default number of chipselect */
1390 		pdata = match->data;
1391 
1392 		of_property_read_u32(node, "ti,spi-num-cs", &num_cs);
1393 		master->num_chipselect = num_cs;
1394 		master->bus_num = bus_num++;
1395 		if (of_get_property(node, "ti,pindir-d0-out-d1-in", NULL))
1396 			mcspi->pin_dir = MCSPI_PINDIR_D0_OUT_D1_IN;
1397 	} else {
1398 		pdata = dev_get_platdata(&pdev->dev);
1399 		master->num_chipselect = pdata->num_cs;
1400 		if (pdev->id != -1)
1401 			master->bus_num = pdev->id;
1402 		mcspi->pin_dir = pdata->pin_dir;
1403 	}
1404 	regs_offset = pdata->regs_offset;
1405 
1406 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1407 	if (r == NULL) {
1408 		status = -ENODEV;
1409 		goto free_master;
1410 	}
1411 
1412 	r->start += regs_offset;
1413 	r->end += regs_offset;
1414 	mcspi->phys = r->start;
1415 
1416 	mcspi->base = devm_ioremap_resource(&pdev->dev, r);
1417 	if (IS_ERR(mcspi->base)) {
1418 		status = PTR_ERR(mcspi->base);
1419 		goto free_master;
1420 	}
1421 
1422 	mcspi->dev = &pdev->dev;
1423 
1424 	INIT_LIST_HEAD(&mcspi->ctx.cs);
1425 
1426 	mcspi->dma_channels = devm_kcalloc(&pdev->dev, master->num_chipselect,
1427 					   sizeof(struct omap2_mcspi_dma),
1428 					   GFP_KERNEL);
1429 	if (mcspi->dma_channels == NULL) {
1430 		status = -ENOMEM;
1431 		goto free_master;
1432 	}
1433 
1434 	for (i = 0; i < master->num_chipselect; i++) {
1435 		char *dma_rx_ch_name = mcspi->dma_channels[i].dma_rx_ch_name;
1436 		char *dma_tx_ch_name = mcspi->dma_channels[i].dma_tx_ch_name;
1437 		struct resource *dma_res;
1438 
1439 		sprintf(dma_rx_ch_name, "rx%d", i);
1440 		if (!pdev->dev.of_node) {
1441 			dma_res =
1442 				platform_get_resource_byname(pdev,
1443 							     IORESOURCE_DMA,
1444 							     dma_rx_ch_name);
1445 			if (!dma_res) {
1446 				dev_dbg(&pdev->dev,
1447 					"cannot get DMA RX channel\n");
1448 				status = -ENODEV;
1449 				break;
1450 			}
1451 
1452 			mcspi->dma_channels[i].dma_rx_sync_dev =
1453 				dma_res->start;
1454 		}
1455 		sprintf(dma_tx_ch_name, "tx%d", i);
1456 		if (!pdev->dev.of_node) {
1457 			dma_res =
1458 				platform_get_resource_byname(pdev,
1459 							     IORESOURCE_DMA,
1460 							     dma_tx_ch_name);
1461 			if (!dma_res) {
1462 				dev_dbg(&pdev->dev,
1463 					"cannot get DMA TX channel\n");
1464 				status = -ENODEV;
1465 				break;
1466 			}
1467 
1468 			mcspi->dma_channels[i].dma_tx_sync_dev =
1469 				dma_res->start;
1470 		}
1471 	}
1472 
1473 	if (status < 0)
1474 		goto free_master;
1475 
1476 	pm_runtime_use_autosuspend(&pdev->dev);
1477 	pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
1478 	pm_runtime_enable(&pdev->dev);
1479 
1480 	status = omap2_mcspi_master_setup(mcspi);
1481 	if (status < 0)
1482 		goto disable_pm;
1483 
1484 	status = devm_spi_register_master(&pdev->dev, master);
1485 	if (status < 0)
1486 		goto disable_pm;
1487 
1488 	return status;
1489 
1490 disable_pm:
1491 	pm_runtime_disable(&pdev->dev);
1492 free_master:
1493 	spi_master_put(master);
1494 	return status;
1495 }
1496 
1497 static int omap2_mcspi_remove(struct platform_device *pdev)
1498 {
1499 	struct spi_master *master = platform_get_drvdata(pdev);
1500 	struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
1501 
1502 	pm_runtime_put_sync(mcspi->dev);
1503 	pm_runtime_disable(&pdev->dev);
1504 
1505 	return 0;
1506 }
1507 
1508 /* work with hotplug and coldplug */
1509 MODULE_ALIAS("platform:omap2_mcspi");
1510 
1511 #ifdef	CONFIG_SUSPEND
1512 /*
1513  * When SPI wake up from off-mode, CS is in activate state. If it was in
1514  * unactive state when driver was suspend, then force it to unactive state at
1515  * wake up.
1516  */
1517 static int omap2_mcspi_resume(struct device *dev)
1518 {
1519 	struct spi_master	*master = dev_get_drvdata(dev);
1520 	struct omap2_mcspi	*mcspi = spi_master_get_devdata(master);
1521 	struct omap2_mcspi_regs	*ctx = &mcspi->ctx;
1522 	struct omap2_mcspi_cs	*cs;
1523 
1524 	pm_runtime_get_sync(mcspi->dev);
1525 	list_for_each_entry(cs, &ctx->cs, node) {
1526 		if ((cs->chconf0 & OMAP2_MCSPI_CHCONF_FORCE) == 0) {
1527 			/*
1528 			 * We need to toggle CS state for OMAP take this
1529 			 * change in account.
1530 			 */
1531 			cs->chconf0 |= OMAP2_MCSPI_CHCONF_FORCE;
1532 			writel_relaxed(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
1533 			cs->chconf0 &= ~OMAP2_MCSPI_CHCONF_FORCE;
1534 			writel_relaxed(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
1535 		}
1536 	}
1537 	pm_runtime_mark_last_busy(mcspi->dev);
1538 	pm_runtime_put_autosuspend(mcspi->dev);
1539 	return 0;
1540 }
1541 #else
1542 #define	omap2_mcspi_resume	NULL
1543 #endif
1544 
1545 static const struct dev_pm_ops omap2_mcspi_pm_ops = {
1546 	.resume = omap2_mcspi_resume,
1547 	.runtime_resume	= omap_mcspi_runtime_resume,
1548 };
1549 
1550 static struct platform_driver omap2_mcspi_driver = {
1551 	.driver = {
1552 		.name =		"omap2_mcspi",
1553 		.pm =		&omap2_mcspi_pm_ops,
1554 		.of_match_table = omap_mcspi_of_match,
1555 	},
1556 	.probe =	omap2_mcspi_probe,
1557 	.remove =	omap2_mcspi_remove,
1558 };
1559 
1560 module_platform_driver(omap2_mcspi_driver);
1561 MODULE_LICENSE("GPL");
1562