xref: /openbmc/linux/drivers/spi/spi-s3c64xx.c (revision 206204a1)
1 /*
2  * Copyright (C) 2009 Samsung Electronics Ltd.
3  *	Jaswinder Singh <jassi.brar@samsung.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18  */
19 
20 #include <linux/init.h>
21 #include <linux/module.h>
22 #include <linux/interrupt.h>
23 #include <linux/delay.h>
24 #include <linux/clk.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/dmaengine.h>
27 #include <linux/platform_device.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/spi/spi.h>
30 #include <linux/gpio.h>
31 #include <linux/of.h>
32 #include <linux/of_gpio.h>
33 
34 #include <linux/platform_data/spi-s3c64xx.h>
35 
36 #define MAX_SPI_PORTS		3
37 #define S3C64XX_SPI_QUIRK_POLL		(1 << 0)
38 
39 /* Registers and bit-fields */
40 
41 #define S3C64XX_SPI_CH_CFG		0x00
42 #define S3C64XX_SPI_CLK_CFG		0x04
43 #define S3C64XX_SPI_MODE_CFG	0x08
44 #define S3C64XX_SPI_SLAVE_SEL	0x0C
45 #define S3C64XX_SPI_INT_EN		0x10
46 #define S3C64XX_SPI_STATUS		0x14
47 #define S3C64XX_SPI_TX_DATA		0x18
48 #define S3C64XX_SPI_RX_DATA		0x1C
49 #define S3C64XX_SPI_PACKET_CNT	0x20
50 #define S3C64XX_SPI_PENDING_CLR	0x24
51 #define S3C64XX_SPI_SWAP_CFG	0x28
52 #define S3C64XX_SPI_FB_CLK		0x2C
53 
54 #define S3C64XX_SPI_CH_HS_EN		(1<<6)	/* High Speed Enable */
55 #define S3C64XX_SPI_CH_SW_RST		(1<<5)
56 #define S3C64XX_SPI_CH_SLAVE		(1<<4)
57 #define S3C64XX_SPI_CPOL_L		(1<<3)
58 #define S3C64XX_SPI_CPHA_B		(1<<2)
59 #define S3C64XX_SPI_CH_RXCH_ON		(1<<1)
60 #define S3C64XX_SPI_CH_TXCH_ON		(1<<0)
61 
62 #define S3C64XX_SPI_CLKSEL_SRCMSK	(3<<9)
63 #define S3C64XX_SPI_CLKSEL_SRCSHFT	9
64 #define S3C64XX_SPI_ENCLK_ENABLE	(1<<8)
65 #define S3C64XX_SPI_PSR_MASK		0xff
66 
67 #define S3C64XX_SPI_MODE_CH_TSZ_BYTE		(0<<29)
68 #define S3C64XX_SPI_MODE_CH_TSZ_HALFWORD	(1<<29)
69 #define S3C64XX_SPI_MODE_CH_TSZ_WORD		(2<<29)
70 #define S3C64XX_SPI_MODE_CH_TSZ_MASK		(3<<29)
71 #define S3C64XX_SPI_MODE_BUS_TSZ_BYTE		(0<<17)
72 #define S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD	(1<<17)
73 #define S3C64XX_SPI_MODE_BUS_TSZ_WORD		(2<<17)
74 #define S3C64XX_SPI_MODE_BUS_TSZ_MASK		(3<<17)
75 #define S3C64XX_SPI_MODE_RXDMA_ON		(1<<2)
76 #define S3C64XX_SPI_MODE_TXDMA_ON		(1<<1)
77 #define S3C64XX_SPI_MODE_4BURST			(1<<0)
78 
79 #define S3C64XX_SPI_SLAVE_AUTO			(1<<1)
80 #define S3C64XX_SPI_SLAVE_SIG_INACT		(1<<0)
81 
82 #define S3C64XX_SPI_INT_TRAILING_EN		(1<<6)
83 #define S3C64XX_SPI_INT_RX_OVERRUN_EN		(1<<5)
84 #define S3C64XX_SPI_INT_RX_UNDERRUN_EN		(1<<4)
85 #define S3C64XX_SPI_INT_TX_OVERRUN_EN		(1<<3)
86 #define S3C64XX_SPI_INT_TX_UNDERRUN_EN		(1<<2)
87 #define S3C64XX_SPI_INT_RX_FIFORDY_EN		(1<<1)
88 #define S3C64XX_SPI_INT_TX_FIFORDY_EN		(1<<0)
89 
90 #define S3C64XX_SPI_ST_RX_OVERRUN_ERR		(1<<5)
91 #define S3C64XX_SPI_ST_RX_UNDERRUN_ERR	(1<<4)
92 #define S3C64XX_SPI_ST_TX_OVERRUN_ERR		(1<<3)
93 #define S3C64XX_SPI_ST_TX_UNDERRUN_ERR	(1<<2)
94 #define S3C64XX_SPI_ST_RX_FIFORDY		(1<<1)
95 #define S3C64XX_SPI_ST_TX_FIFORDY		(1<<0)
96 
97 #define S3C64XX_SPI_PACKET_CNT_EN		(1<<16)
98 
99 #define S3C64XX_SPI_PND_TX_UNDERRUN_CLR		(1<<4)
100 #define S3C64XX_SPI_PND_TX_OVERRUN_CLR		(1<<3)
101 #define S3C64XX_SPI_PND_RX_UNDERRUN_CLR		(1<<2)
102 #define S3C64XX_SPI_PND_RX_OVERRUN_CLR		(1<<1)
103 #define S3C64XX_SPI_PND_TRAILING_CLR		(1<<0)
104 
105 #define S3C64XX_SPI_SWAP_RX_HALF_WORD		(1<<7)
106 #define S3C64XX_SPI_SWAP_RX_BYTE		(1<<6)
107 #define S3C64XX_SPI_SWAP_RX_BIT			(1<<5)
108 #define S3C64XX_SPI_SWAP_RX_EN			(1<<4)
109 #define S3C64XX_SPI_SWAP_TX_HALF_WORD		(1<<3)
110 #define S3C64XX_SPI_SWAP_TX_BYTE		(1<<2)
111 #define S3C64XX_SPI_SWAP_TX_BIT			(1<<1)
112 #define S3C64XX_SPI_SWAP_TX_EN			(1<<0)
113 
114 #define S3C64XX_SPI_FBCLK_MSK		(3<<0)
115 
116 #define FIFO_LVL_MASK(i) ((i)->port_conf->fifo_lvl_mask[i->port_id])
117 #define S3C64XX_SPI_ST_TX_DONE(v, i) (((v) & \
118 				(1 << (i)->port_conf->tx_st_done)) ? 1 : 0)
119 #define TX_FIFO_LVL(v, i) (((v) >> 6) & FIFO_LVL_MASK(i))
120 #define RX_FIFO_LVL(v, i) (((v) >> (i)->port_conf->rx_lvl_offset) & \
121 					FIFO_LVL_MASK(i))
122 
123 #define S3C64XX_SPI_MAX_TRAILCNT	0x3ff
124 #define S3C64XX_SPI_TRAILCNT_OFF	19
125 
126 #define S3C64XX_SPI_TRAILCNT		S3C64XX_SPI_MAX_TRAILCNT
127 
128 #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
129 #define is_polling(x)	(x->port_conf->quirks & S3C64XX_SPI_QUIRK_POLL)
130 
131 #define RXBUSY    (1<<2)
132 #define TXBUSY    (1<<3)
133 
134 struct s3c64xx_spi_dma_data {
135 	struct dma_chan *ch;
136 	enum dma_transfer_direction direction;
137 	unsigned int dmach;
138 };
139 
140 /**
141  * struct s3c64xx_spi_info - SPI Controller hardware info
142  * @fifo_lvl_mask: Bit-mask for {TX|RX}_FIFO_LVL bits in SPI_STATUS register.
143  * @rx_lvl_offset: Bit offset of RX_FIFO_LVL bits in SPI_STATUS regiter.
144  * @tx_st_done: Bit offset of TX_DONE bit in SPI_STATUS regiter.
145  * @high_speed: True, if the controller supports HIGH_SPEED_EN bit.
146  * @clk_from_cmu: True, if the controller does not include a clock mux and
147  *	prescaler unit.
148  *
149  * The Samsung s3c64xx SPI controller are used on various Samsung SoC's but
150  * differ in some aspects such as the size of the fifo and spi bus clock
151  * setup. Such differences are specified to the driver using this structure
152  * which is provided as driver data to the driver.
153  */
154 struct s3c64xx_spi_port_config {
155 	int	fifo_lvl_mask[MAX_SPI_PORTS];
156 	int	rx_lvl_offset;
157 	int	tx_st_done;
158 	int	quirks;
159 	bool	high_speed;
160 	bool	clk_from_cmu;
161 };
162 
163 /**
164  * struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver.
165  * @clk: Pointer to the spi clock.
166  * @src_clk: Pointer to the clock used to generate SPI signals.
167  * @master: Pointer to the SPI Protocol master.
168  * @cntrlr_info: Platform specific data for the controller this driver manages.
169  * @tgl_spi: Pointer to the last CS left untoggled by the cs_change hint.
170  * @lock: Controller specific lock.
171  * @state: Set of FLAGS to indicate status.
172  * @rx_dmach: Controller's DMA channel for Rx.
173  * @tx_dmach: Controller's DMA channel for Tx.
174  * @sfr_start: BUS address of SPI controller regs.
175  * @regs: Pointer to ioremap'ed controller registers.
176  * @irq: interrupt
177  * @xfer_completion: To indicate completion of xfer task.
178  * @cur_mode: Stores the active configuration of the controller.
179  * @cur_bpw: Stores the active bits per word settings.
180  * @cur_speed: Stores the active xfer clock speed.
181  */
182 struct s3c64xx_spi_driver_data {
183 	void __iomem                    *regs;
184 	struct clk                      *clk;
185 	struct clk                      *src_clk;
186 	struct platform_device          *pdev;
187 	struct spi_master               *master;
188 	struct s3c64xx_spi_info  *cntrlr_info;
189 	struct spi_device               *tgl_spi;
190 	spinlock_t                      lock;
191 	unsigned long                   sfr_start;
192 	struct completion               xfer_completion;
193 	unsigned                        state;
194 	unsigned                        cur_mode, cur_bpw;
195 	unsigned                        cur_speed;
196 	struct s3c64xx_spi_dma_data	rx_dma;
197 	struct s3c64xx_spi_dma_data	tx_dma;
198 	struct s3c64xx_spi_port_config	*port_conf;
199 	unsigned int			port_id;
200 	bool				cs_gpio;
201 };
202 
203 static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
204 {
205 	void __iomem *regs = sdd->regs;
206 	unsigned long loops;
207 	u32 val;
208 
209 	writel(0, regs + S3C64XX_SPI_PACKET_CNT);
210 
211 	val = readl(regs + S3C64XX_SPI_CH_CFG);
212 	val &= ~(S3C64XX_SPI_CH_RXCH_ON | S3C64XX_SPI_CH_TXCH_ON);
213 	writel(val, regs + S3C64XX_SPI_CH_CFG);
214 
215 	val = readl(regs + S3C64XX_SPI_CH_CFG);
216 	val |= S3C64XX_SPI_CH_SW_RST;
217 	val &= ~S3C64XX_SPI_CH_HS_EN;
218 	writel(val, regs + S3C64XX_SPI_CH_CFG);
219 
220 	/* Flush TxFIFO*/
221 	loops = msecs_to_loops(1);
222 	do {
223 		val = readl(regs + S3C64XX_SPI_STATUS);
224 	} while (TX_FIFO_LVL(val, sdd) && loops--);
225 
226 	if (loops == 0)
227 		dev_warn(&sdd->pdev->dev, "Timed out flushing TX FIFO\n");
228 
229 	/* Flush RxFIFO*/
230 	loops = msecs_to_loops(1);
231 	do {
232 		val = readl(regs + S3C64XX_SPI_STATUS);
233 		if (RX_FIFO_LVL(val, sdd))
234 			readl(regs + S3C64XX_SPI_RX_DATA);
235 		else
236 			break;
237 	} while (loops--);
238 
239 	if (loops == 0)
240 		dev_warn(&sdd->pdev->dev, "Timed out flushing RX FIFO\n");
241 
242 	val = readl(regs + S3C64XX_SPI_CH_CFG);
243 	val &= ~S3C64XX_SPI_CH_SW_RST;
244 	writel(val, regs + S3C64XX_SPI_CH_CFG);
245 
246 	val = readl(regs + S3C64XX_SPI_MODE_CFG);
247 	val &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
248 	writel(val, regs + S3C64XX_SPI_MODE_CFG);
249 }
250 
251 static void s3c64xx_spi_dmacb(void *data)
252 {
253 	struct s3c64xx_spi_driver_data *sdd;
254 	struct s3c64xx_spi_dma_data *dma = data;
255 	unsigned long flags;
256 
257 	if (dma->direction == DMA_DEV_TO_MEM)
258 		sdd = container_of(data,
259 			struct s3c64xx_spi_driver_data, rx_dma);
260 	else
261 		sdd = container_of(data,
262 			struct s3c64xx_spi_driver_data, tx_dma);
263 
264 	spin_lock_irqsave(&sdd->lock, flags);
265 
266 	if (dma->direction == DMA_DEV_TO_MEM) {
267 		sdd->state &= ~RXBUSY;
268 		if (!(sdd->state & TXBUSY))
269 			complete(&sdd->xfer_completion);
270 	} else {
271 		sdd->state &= ~TXBUSY;
272 		if (!(sdd->state & RXBUSY))
273 			complete(&sdd->xfer_completion);
274 	}
275 
276 	spin_unlock_irqrestore(&sdd->lock, flags);
277 }
278 
279 static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
280 			struct sg_table *sgt)
281 {
282 	struct s3c64xx_spi_driver_data *sdd;
283 	struct dma_slave_config config;
284 	struct dma_async_tx_descriptor *desc;
285 
286 	memset(&config, 0, sizeof(config));
287 
288 	if (dma->direction == DMA_DEV_TO_MEM) {
289 		sdd = container_of((void *)dma,
290 			struct s3c64xx_spi_driver_data, rx_dma);
291 		config.direction = dma->direction;
292 		config.src_addr = sdd->sfr_start + S3C64XX_SPI_RX_DATA;
293 		config.src_addr_width = sdd->cur_bpw / 8;
294 		config.src_maxburst = 1;
295 		dmaengine_slave_config(dma->ch, &config);
296 	} else {
297 		sdd = container_of((void *)dma,
298 			struct s3c64xx_spi_driver_data, tx_dma);
299 		config.direction = dma->direction;
300 		config.dst_addr = sdd->sfr_start + S3C64XX_SPI_TX_DATA;
301 		config.dst_addr_width = sdd->cur_bpw / 8;
302 		config.dst_maxburst = 1;
303 		dmaengine_slave_config(dma->ch, &config);
304 	}
305 
306 	desc = dmaengine_prep_slave_sg(dma->ch, sgt->sgl, sgt->nents,
307 				       dma->direction, DMA_PREP_INTERRUPT);
308 
309 	desc->callback = s3c64xx_spi_dmacb;
310 	desc->callback_param = dma;
311 
312 	dmaengine_submit(desc);
313 	dma_async_issue_pending(dma->ch);
314 }
315 
316 static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
317 {
318 	struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
319 	dma_filter_fn filter = sdd->cntrlr_info->filter;
320 	struct device *dev = &sdd->pdev->dev;
321 	dma_cap_mask_t mask;
322 	int ret;
323 
324 	if (!is_polling(sdd)) {
325 		dma_cap_zero(mask);
326 		dma_cap_set(DMA_SLAVE, mask);
327 
328 		/* Acquire DMA channels */
329 		sdd->rx_dma.ch = dma_request_slave_channel_compat(mask, filter,
330 				   (void *)sdd->rx_dma.dmach, dev, "rx");
331 		if (!sdd->rx_dma.ch) {
332 			dev_err(dev, "Failed to get RX DMA channel\n");
333 			ret = -EBUSY;
334 			goto out;
335 		}
336 		spi->dma_rx = sdd->rx_dma.ch;
337 
338 		sdd->tx_dma.ch = dma_request_slave_channel_compat(mask, filter,
339 				   (void *)sdd->tx_dma.dmach, dev, "tx");
340 		if (!sdd->tx_dma.ch) {
341 			dev_err(dev, "Failed to get TX DMA channel\n");
342 			ret = -EBUSY;
343 			goto out_rx;
344 		}
345 		spi->dma_tx = sdd->tx_dma.ch;
346 	}
347 
348 	ret = pm_runtime_get_sync(&sdd->pdev->dev);
349 	if (ret < 0) {
350 		dev_err(dev, "Failed to enable device: %d\n", ret);
351 		goto out_tx;
352 	}
353 
354 	return 0;
355 
356 out_tx:
357 	dma_release_channel(sdd->tx_dma.ch);
358 out_rx:
359 	dma_release_channel(sdd->rx_dma.ch);
360 out:
361 	return ret;
362 }
363 
364 static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)
365 {
366 	struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
367 
368 	/* Free DMA channels */
369 	if (!is_polling(sdd)) {
370 		dma_release_channel(sdd->rx_dma.ch);
371 		dma_release_channel(sdd->tx_dma.ch);
372 	}
373 
374 	pm_runtime_put(&sdd->pdev->dev);
375 	return 0;
376 }
377 
378 static bool s3c64xx_spi_can_dma(struct spi_master *master,
379 				struct spi_device *spi,
380 				struct spi_transfer *xfer)
381 {
382 	struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
383 
384 	return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1;
385 }
386 
387 static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
388 				struct spi_device *spi,
389 				struct spi_transfer *xfer, int dma_mode)
390 {
391 	void __iomem *regs = sdd->regs;
392 	u32 modecfg, chcfg;
393 
394 	modecfg = readl(regs + S3C64XX_SPI_MODE_CFG);
395 	modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
396 
397 	chcfg = readl(regs + S3C64XX_SPI_CH_CFG);
398 	chcfg &= ~S3C64XX_SPI_CH_TXCH_ON;
399 
400 	if (dma_mode) {
401 		chcfg &= ~S3C64XX_SPI_CH_RXCH_ON;
402 	} else {
403 		/* Always shift in data in FIFO, even if xfer is Tx only,
404 		 * this helps setting PCKT_CNT value for generating clocks
405 		 * as exactly needed.
406 		 */
407 		chcfg |= S3C64XX_SPI_CH_RXCH_ON;
408 		writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
409 					| S3C64XX_SPI_PACKET_CNT_EN,
410 					regs + S3C64XX_SPI_PACKET_CNT);
411 	}
412 
413 	if (xfer->tx_buf != NULL) {
414 		sdd->state |= TXBUSY;
415 		chcfg |= S3C64XX_SPI_CH_TXCH_ON;
416 		if (dma_mode) {
417 			modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
418 			prepare_dma(&sdd->tx_dma, &xfer->tx_sg);
419 		} else {
420 			switch (sdd->cur_bpw) {
421 			case 32:
422 				iowrite32_rep(regs + S3C64XX_SPI_TX_DATA,
423 					xfer->tx_buf, xfer->len / 4);
424 				break;
425 			case 16:
426 				iowrite16_rep(regs + S3C64XX_SPI_TX_DATA,
427 					xfer->tx_buf, xfer->len / 2);
428 				break;
429 			default:
430 				iowrite8_rep(regs + S3C64XX_SPI_TX_DATA,
431 					xfer->tx_buf, xfer->len);
432 				break;
433 			}
434 		}
435 	}
436 
437 	if (xfer->rx_buf != NULL) {
438 		sdd->state |= RXBUSY;
439 
440 		if (sdd->port_conf->high_speed && sdd->cur_speed >= 30000000UL
441 					&& !(sdd->cur_mode & SPI_CPHA))
442 			chcfg |= S3C64XX_SPI_CH_HS_EN;
443 
444 		if (dma_mode) {
445 			modecfg |= S3C64XX_SPI_MODE_RXDMA_ON;
446 			chcfg |= S3C64XX_SPI_CH_RXCH_ON;
447 			writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
448 					| S3C64XX_SPI_PACKET_CNT_EN,
449 					regs + S3C64XX_SPI_PACKET_CNT);
450 			prepare_dma(&sdd->rx_dma, &xfer->rx_sg);
451 		}
452 	}
453 
454 	writel(modecfg, regs + S3C64XX_SPI_MODE_CFG);
455 	writel(chcfg, regs + S3C64XX_SPI_CH_CFG);
456 }
457 
458 static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd,
459 					int timeout_ms)
460 {
461 	void __iomem *regs = sdd->regs;
462 	unsigned long val = 1;
463 	u32 status;
464 
465 	/* max fifo depth available */
466 	u32 max_fifo = (FIFO_LVL_MASK(sdd) >> 1) + 1;
467 
468 	if (timeout_ms)
469 		val = msecs_to_loops(timeout_ms);
470 
471 	do {
472 		status = readl(regs + S3C64XX_SPI_STATUS);
473 	} while (RX_FIFO_LVL(status, sdd) < max_fifo && --val);
474 
475 	/* return the actual received data length */
476 	return RX_FIFO_LVL(status, sdd);
477 }
478 
479 static int wait_for_dma(struct s3c64xx_spi_driver_data *sdd,
480 			struct spi_transfer *xfer)
481 {
482 	void __iomem *regs = sdd->regs;
483 	unsigned long val;
484 	u32 status;
485 	int ms;
486 
487 	/* millisecs to xfer 'len' bytes @ 'cur_speed' */
488 	ms = xfer->len * 8 * 1000 / sdd->cur_speed;
489 	ms += 10; /* some tolerance */
490 
491 	val = msecs_to_jiffies(ms) + 10;
492 	val = wait_for_completion_timeout(&sdd->xfer_completion, val);
493 
494 	/*
495 	 * If the previous xfer was completed within timeout, then
496 	 * proceed further else return -EIO.
497 	 * DmaTx returns after simply writing data in the FIFO,
498 	 * w/o waiting for real transmission on the bus to finish.
499 	 * DmaRx returns only after Dma read data from FIFO which
500 	 * needs bus transmission to finish, so we don't worry if
501 	 * Xfer involved Rx(with or without Tx).
502 	 */
503 	if (val && !xfer->rx_buf) {
504 		val = msecs_to_loops(10);
505 		status = readl(regs + S3C64XX_SPI_STATUS);
506 		while ((TX_FIFO_LVL(status, sdd)
507 			|| !S3C64XX_SPI_ST_TX_DONE(status, sdd))
508 		       && --val) {
509 			cpu_relax();
510 			status = readl(regs + S3C64XX_SPI_STATUS);
511 		}
512 
513 	}
514 
515 	/* If timed out while checking rx/tx status return error */
516 	if (!val)
517 		return -EIO;
518 
519 	return 0;
520 }
521 
522 static int wait_for_pio(struct s3c64xx_spi_driver_data *sdd,
523 			struct spi_transfer *xfer)
524 {
525 	void __iomem *regs = sdd->regs;
526 	unsigned long val;
527 	u32 status;
528 	int loops;
529 	u32 cpy_len;
530 	u8 *buf;
531 	int ms;
532 
533 	/* millisecs to xfer 'len' bytes @ 'cur_speed' */
534 	ms = xfer->len * 8 * 1000 / sdd->cur_speed;
535 	ms += 10; /* some tolerance */
536 
537 	val = msecs_to_loops(ms);
538 	do {
539 		status = readl(regs + S3C64XX_SPI_STATUS);
540 	} while (RX_FIFO_LVL(status, sdd) < xfer->len && --val);
541 
542 
543 	/* If it was only Tx */
544 	if (!xfer->rx_buf) {
545 		sdd->state &= ~TXBUSY;
546 		return 0;
547 	}
548 
549 	/*
550 	 * If the receive length is bigger than the controller fifo
551 	 * size, calculate the loops and read the fifo as many times.
552 	 * loops = length / max fifo size (calculated by using the
553 	 * fifo mask).
554 	 * For any size less than the fifo size the below code is
555 	 * executed atleast once.
556 	 */
557 	loops = xfer->len / ((FIFO_LVL_MASK(sdd) >> 1) + 1);
558 	buf = xfer->rx_buf;
559 	do {
560 		/* wait for data to be received in the fifo */
561 		cpy_len = s3c64xx_spi_wait_for_timeout(sdd,
562 						       (loops ? ms : 0));
563 
564 		switch (sdd->cur_bpw) {
565 		case 32:
566 			ioread32_rep(regs + S3C64XX_SPI_RX_DATA,
567 				     buf, cpy_len / 4);
568 			break;
569 		case 16:
570 			ioread16_rep(regs + S3C64XX_SPI_RX_DATA,
571 				     buf, cpy_len / 2);
572 			break;
573 		default:
574 			ioread8_rep(regs + S3C64XX_SPI_RX_DATA,
575 				    buf, cpy_len);
576 			break;
577 		}
578 
579 		buf = buf + cpy_len;
580 	} while (loops--);
581 	sdd->state &= ~RXBUSY;
582 
583 	return 0;
584 }
585 
586 static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
587 {
588 	void __iomem *regs = sdd->regs;
589 	u32 val;
590 
591 	/* Disable Clock */
592 	if (sdd->port_conf->clk_from_cmu) {
593 		clk_disable_unprepare(sdd->src_clk);
594 	} else {
595 		val = readl(regs + S3C64XX_SPI_CLK_CFG);
596 		val &= ~S3C64XX_SPI_ENCLK_ENABLE;
597 		writel(val, regs + S3C64XX_SPI_CLK_CFG);
598 	}
599 
600 	/* Set Polarity and Phase */
601 	val = readl(regs + S3C64XX_SPI_CH_CFG);
602 	val &= ~(S3C64XX_SPI_CH_SLAVE |
603 			S3C64XX_SPI_CPOL_L |
604 			S3C64XX_SPI_CPHA_B);
605 
606 	if (sdd->cur_mode & SPI_CPOL)
607 		val |= S3C64XX_SPI_CPOL_L;
608 
609 	if (sdd->cur_mode & SPI_CPHA)
610 		val |= S3C64XX_SPI_CPHA_B;
611 
612 	writel(val, regs + S3C64XX_SPI_CH_CFG);
613 
614 	/* Set Channel & DMA Mode */
615 	val = readl(regs + S3C64XX_SPI_MODE_CFG);
616 	val &= ~(S3C64XX_SPI_MODE_BUS_TSZ_MASK
617 			| S3C64XX_SPI_MODE_CH_TSZ_MASK);
618 
619 	switch (sdd->cur_bpw) {
620 	case 32:
621 		val |= S3C64XX_SPI_MODE_BUS_TSZ_WORD;
622 		val |= S3C64XX_SPI_MODE_CH_TSZ_WORD;
623 		break;
624 	case 16:
625 		val |= S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD;
626 		val |= S3C64XX_SPI_MODE_CH_TSZ_HALFWORD;
627 		break;
628 	default:
629 		val |= S3C64XX_SPI_MODE_BUS_TSZ_BYTE;
630 		val |= S3C64XX_SPI_MODE_CH_TSZ_BYTE;
631 		break;
632 	}
633 
634 	writel(val, regs + S3C64XX_SPI_MODE_CFG);
635 
636 	if (sdd->port_conf->clk_from_cmu) {
637 		/* Configure Clock */
638 		/* There is half-multiplier before the SPI */
639 		clk_set_rate(sdd->src_clk, sdd->cur_speed * 2);
640 		/* Enable Clock */
641 		clk_prepare_enable(sdd->src_clk);
642 	} else {
643 		/* Configure Clock */
644 		val = readl(regs + S3C64XX_SPI_CLK_CFG);
645 		val &= ~S3C64XX_SPI_PSR_MASK;
646 		val |= ((clk_get_rate(sdd->src_clk) / sdd->cur_speed / 2 - 1)
647 				& S3C64XX_SPI_PSR_MASK);
648 		writel(val, regs + S3C64XX_SPI_CLK_CFG);
649 
650 		/* Enable Clock */
651 		val = readl(regs + S3C64XX_SPI_CLK_CFG);
652 		val |= S3C64XX_SPI_ENCLK_ENABLE;
653 		writel(val, regs + S3C64XX_SPI_CLK_CFG);
654 	}
655 }
656 
657 #define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)
658 
659 static int s3c64xx_spi_prepare_message(struct spi_master *master,
660 				       struct spi_message *msg)
661 {
662 	struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
663 	struct spi_device *spi = msg->spi;
664 	struct s3c64xx_spi_csinfo *cs = spi->controller_data;
665 
666 	/* If Master's(controller) state differs from that needed by Slave */
667 	if (sdd->cur_speed != spi->max_speed_hz
668 			|| sdd->cur_mode != spi->mode
669 			|| sdd->cur_bpw != spi->bits_per_word) {
670 		sdd->cur_bpw = spi->bits_per_word;
671 		sdd->cur_speed = spi->max_speed_hz;
672 		sdd->cur_mode = spi->mode;
673 		s3c64xx_spi_config(sdd);
674 	}
675 
676 	/* Configure feedback delay */
677 	writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK);
678 
679 	return 0;
680 }
681 
682 static int s3c64xx_spi_transfer_one(struct spi_master *master,
683 				    struct spi_device *spi,
684 				    struct spi_transfer *xfer)
685 {
686 	struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
687 	int status;
688 	u32 speed;
689 	u8 bpw;
690 	unsigned long flags;
691 	int use_dma;
692 
693 	reinit_completion(&sdd->xfer_completion);
694 
695 	/* Only BPW and Speed may change across transfers */
696 	bpw = xfer->bits_per_word;
697 	speed = xfer->speed_hz ? : spi->max_speed_hz;
698 
699 	if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) {
700 		sdd->cur_bpw = bpw;
701 		sdd->cur_speed = speed;
702 		s3c64xx_spi_config(sdd);
703 	}
704 
705 	/* Polling method for xfers not bigger than FIFO capacity */
706 	use_dma = 0;
707 	if (!is_polling(sdd) &&
708 	    (sdd->rx_dma.ch && sdd->tx_dma.ch &&
709 	     (xfer->len > ((FIFO_LVL_MASK(sdd) >> 1) + 1))))
710 		use_dma = 1;
711 
712 	spin_lock_irqsave(&sdd->lock, flags);
713 
714 	/* Pending only which is to be done */
715 	sdd->state &= ~RXBUSY;
716 	sdd->state &= ~TXBUSY;
717 
718 	enable_datapath(sdd, spi, xfer, use_dma);
719 
720 	/* Start the signals */
721 	writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
722 
723 	spin_unlock_irqrestore(&sdd->lock, flags);
724 
725 	if (use_dma)
726 		status = wait_for_dma(sdd, xfer);
727 	else
728 		status = wait_for_pio(sdd, xfer);
729 
730 	if (status) {
731 		dev_err(&spi->dev, "I/O Error: rx-%d tx-%d res:rx-%c tx-%c len-%d\n",
732 			xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0,
733 			(sdd->state & RXBUSY) ? 'f' : 'p',
734 			(sdd->state & TXBUSY) ? 'f' : 'p',
735 			xfer->len);
736 
737 		if (use_dma) {
738 			if (xfer->tx_buf != NULL
739 			    && (sdd->state & TXBUSY))
740 				dmaengine_terminate_all(sdd->tx_dma.ch);
741 			if (xfer->rx_buf != NULL
742 			    && (sdd->state & RXBUSY))
743 				dmaengine_terminate_all(sdd->rx_dma.ch);
744 		}
745 	} else {
746 		flush_fifo(sdd);
747 	}
748 
749 	return status;
750 }
751 
752 static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata(
753 				struct spi_device *spi)
754 {
755 	struct s3c64xx_spi_csinfo *cs;
756 	struct device_node *slave_np, *data_np = NULL;
757 	struct s3c64xx_spi_driver_data *sdd;
758 	u32 fb_delay = 0;
759 
760 	sdd = spi_master_get_devdata(spi->master);
761 	slave_np = spi->dev.of_node;
762 	if (!slave_np) {
763 		dev_err(&spi->dev, "device node not found\n");
764 		return ERR_PTR(-EINVAL);
765 	}
766 
767 	data_np = of_get_child_by_name(slave_np, "controller-data");
768 	if (!data_np) {
769 		dev_err(&spi->dev, "child node 'controller-data' not found\n");
770 		return ERR_PTR(-EINVAL);
771 	}
772 
773 	cs = kzalloc(sizeof(*cs), GFP_KERNEL);
774 	if (!cs) {
775 		of_node_put(data_np);
776 		return ERR_PTR(-ENOMEM);
777 	}
778 
779 	/* The CS line is asserted/deasserted by the gpio pin */
780 	if (sdd->cs_gpio)
781 		cs->line = of_get_named_gpio(data_np, "cs-gpio", 0);
782 
783 	if (!gpio_is_valid(cs->line)) {
784 		dev_err(&spi->dev, "chip select gpio is not specified or invalid\n");
785 		kfree(cs);
786 		of_node_put(data_np);
787 		return ERR_PTR(-EINVAL);
788 	}
789 
790 	of_property_read_u32(data_np, "samsung,spi-feedback-delay", &fb_delay);
791 	cs->fb_delay = fb_delay;
792 	of_node_put(data_np);
793 	return cs;
794 }
795 
796 /*
797  * Here we only check the validity of requested configuration
798  * and save the configuration in a local data-structure.
799  * The controller is actually configured only just before we
800  * get a message to transfer.
801  */
802 static int s3c64xx_spi_setup(struct spi_device *spi)
803 {
804 	struct s3c64xx_spi_csinfo *cs = spi->controller_data;
805 	struct s3c64xx_spi_driver_data *sdd;
806 	struct s3c64xx_spi_info *sci;
807 	int err;
808 
809 	sdd = spi_master_get_devdata(spi->master);
810 	if (!cs && spi->dev.of_node) {
811 		cs = s3c64xx_get_slave_ctrldata(spi);
812 		spi->controller_data = cs;
813 	}
814 
815 	if (IS_ERR_OR_NULL(cs)) {
816 		dev_err(&spi->dev, "No CS for SPI(%d)\n", spi->chip_select);
817 		return -ENODEV;
818 	}
819 
820 	if (!spi_get_ctldata(spi)) {
821 		/* Request gpio only if cs line is asserted by gpio pins */
822 		if (sdd->cs_gpio) {
823 			err = gpio_request_one(cs->line, GPIOF_OUT_INIT_HIGH,
824 					dev_name(&spi->dev));
825 			if (err) {
826 				dev_err(&spi->dev,
827 					"Failed to get /CS gpio [%d]: %d\n",
828 					cs->line, err);
829 				goto err_gpio_req;
830 			}
831 
832 			spi->cs_gpio = cs->line;
833 		}
834 
835 		spi_set_ctldata(spi, cs);
836 	}
837 
838 	sci = sdd->cntrlr_info;
839 
840 	pm_runtime_get_sync(&sdd->pdev->dev);
841 
842 	/* Check if we can provide the requested rate */
843 	if (!sdd->port_conf->clk_from_cmu) {
844 		u32 psr, speed;
845 
846 		/* Max possible */
847 		speed = clk_get_rate(sdd->src_clk) / 2 / (0 + 1);
848 
849 		if (spi->max_speed_hz > speed)
850 			spi->max_speed_hz = speed;
851 
852 		psr = clk_get_rate(sdd->src_clk) / 2 / spi->max_speed_hz - 1;
853 		psr &= S3C64XX_SPI_PSR_MASK;
854 		if (psr == S3C64XX_SPI_PSR_MASK)
855 			psr--;
856 
857 		speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1);
858 		if (spi->max_speed_hz < speed) {
859 			if (psr+1 < S3C64XX_SPI_PSR_MASK) {
860 				psr++;
861 			} else {
862 				err = -EINVAL;
863 				goto setup_exit;
864 			}
865 		}
866 
867 		speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1);
868 		if (spi->max_speed_hz >= speed) {
869 			spi->max_speed_hz = speed;
870 		} else {
871 			dev_err(&spi->dev, "Can't set %dHz transfer speed\n",
872 				spi->max_speed_hz);
873 			err = -EINVAL;
874 			goto setup_exit;
875 		}
876 	}
877 
878 	pm_runtime_put(&sdd->pdev->dev);
879 	writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
880 	return 0;
881 
882 setup_exit:
883 	pm_runtime_put(&sdd->pdev->dev);
884 	/* setup() returns with device de-selected */
885 	writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
886 
887 	gpio_free(cs->line);
888 	spi_set_ctldata(spi, NULL);
889 
890 err_gpio_req:
891 	if (spi->dev.of_node)
892 		kfree(cs);
893 
894 	return err;
895 }
896 
897 static void s3c64xx_spi_cleanup(struct spi_device *spi)
898 {
899 	struct s3c64xx_spi_csinfo *cs = spi_get_ctldata(spi);
900 	struct s3c64xx_spi_driver_data *sdd;
901 
902 	sdd = spi_master_get_devdata(spi->master);
903 	if (spi->cs_gpio) {
904 		gpio_free(spi->cs_gpio);
905 		if (spi->dev.of_node)
906 			kfree(cs);
907 	}
908 	spi_set_ctldata(spi, NULL);
909 }
910 
911 static irqreturn_t s3c64xx_spi_irq(int irq, void *data)
912 {
913 	struct s3c64xx_spi_driver_data *sdd = data;
914 	struct spi_master *spi = sdd->master;
915 	unsigned int val, clr = 0;
916 
917 	val = readl(sdd->regs + S3C64XX_SPI_STATUS);
918 
919 	if (val & S3C64XX_SPI_ST_RX_OVERRUN_ERR) {
920 		clr = S3C64XX_SPI_PND_RX_OVERRUN_CLR;
921 		dev_err(&spi->dev, "RX overrun\n");
922 	}
923 	if (val & S3C64XX_SPI_ST_RX_UNDERRUN_ERR) {
924 		clr |= S3C64XX_SPI_PND_RX_UNDERRUN_CLR;
925 		dev_err(&spi->dev, "RX underrun\n");
926 	}
927 	if (val & S3C64XX_SPI_ST_TX_OVERRUN_ERR) {
928 		clr |= S3C64XX_SPI_PND_TX_OVERRUN_CLR;
929 		dev_err(&spi->dev, "TX overrun\n");
930 	}
931 	if (val & S3C64XX_SPI_ST_TX_UNDERRUN_ERR) {
932 		clr |= S3C64XX_SPI_PND_TX_UNDERRUN_CLR;
933 		dev_err(&spi->dev, "TX underrun\n");
934 	}
935 
936 	/* Clear the pending irq by setting and then clearing it */
937 	writel(clr, sdd->regs + S3C64XX_SPI_PENDING_CLR);
938 	writel(0, sdd->regs + S3C64XX_SPI_PENDING_CLR);
939 
940 	return IRQ_HANDLED;
941 }
942 
943 static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel)
944 {
945 	struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
946 	void __iomem *regs = sdd->regs;
947 	unsigned int val;
948 
949 	sdd->cur_speed = 0;
950 
951 	writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
952 
953 	/* Disable Interrupts - we use Polling if not DMA mode */
954 	writel(0, regs + S3C64XX_SPI_INT_EN);
955 
956 	if (!sdd->port_conf->clk_from_cmu)
957 		writel(sci->src_clk_nr << S3C64XX_SPI_CLKSEL_SRCSHFT,
958 				regs + S3C64XX_SPI_CLK_CFG);
959 	writel(0, regs + S3C64XX_SPI_MODE_CFG);
960 	writel(0, regs + S3C64XX_SPI_PACKET_CNT);
961 
962 	/* Clear any irq pending bits, should set and clear the bits */
963 	val = S3C64XX_SPI_PND_RX_OVERRUN_CLR |
964 		S3C64XX_SPI_PND_RX_UNDERRUN_CLR |
965 		S3C64XX_SPI_PND_TX_OVERRUN_CLR |
966 		S3C64XX_SPI_PND_TX_UNDERRUN_CLR;
967 	writel(val, regs + S3C64XX_SPI_PENDING_CLR);
968 	writel(0, regs + S3C64XX_SPI_PENDING_CLR);
969 
970 	writel(0, regs + S3C64XX_SPI_SWAP_CFG);
971 
972 	val = readl(regs + S3C64XX_SPI_MODE_CFG);
973 	val &= ~S3C64XX_SPI_MODE_4BURST;
974 	val &= ~(S3C64XX_SPI_MAX_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
975 	val |= (S3C64XX_SPI_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
976 	writel(val, regs + S3C64XX_SPI_MODE_CFG);
977 
978 	flush_fifo(sdd);
979 }
980 
981 #ifdef CONFIG_OF
982 static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)
983 {
984 	struct s3c64xx_spi_info *sci;
985 	u32 temp;
986 
987 	sci = devm_kzalloc(dev, sizeof(*sci), GFP_KERNEL);
988 	if (!sci)
989 		return ERR_PTR(-ENOMEM);
990 
991 	if (of_property_read_u32(dev->of_node, "samsung,spi-src-clk", &temp)) {
992 		dev_warn(dev, "spi bus clock parent not specified, using clock at index 0 as parent\n");
993 		sci->src_clk_nr = 0;
994 	} else {
995 		sci->src_clk_nr = temp;
996 	}
997 
998 	if (of_property_read_u32(dev->of_node, "num-cs", &temp)) {
999 		dev_warn(dev, "number of chip select lines not specified, assuming 1 chip select line\n");
1000 		sci->num_cs = 1;
1001 	} else {
1002 		sci->num_cs = temp;
1003 	}
1004 
1005 	return sci;
1006 }
1007 #else
1008 static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)
1009 {
1010 	return dev_get_platdata(dev);
1011 }
1012 #endif
1013 
1014 static const struct of_device_id s3c64xx_spi_dt_match[];
1015 
1016 static inline struct s3c64xx_spi_port_config *s3c64xx_spi_get_port_config(
1017 						struct platform_device *pdev)
1018 {
1019 #ifdef CONFIG_OF
1020 	if (pdev->dev.of_node) {
1021 		const struct of_device_id *match;
1022 		match = of_match_node(s3c64xx_spi_dt_match, pdev->dev.of_node);
1023 		return (struct s3c64xx_spi_port_config *)match->data;
1024 	}
1025 #endif
1026 	return (struct s3c64xx_spi_port_config *)
1027 			 platform_get_device_id(pdev)->driver_data;
1028 }
1029 
1030 static int s3c64xx_spi_probe(struct platform_device *pdev)
1031 {
1032 	struct resource	*mem_res;
1033 	struct resource	*res;
1034 	struct s3c64xx_spi_driver_data *sdd;
1035 	struct s3c64xx_spi_info *sci = dev_get_platdata(&pdev->dev);
1036 	struct spi_master *master;
1037 	int ret, irq;
1038 	char clk_name[16];
1039 
1040 	if (!sci && pdev->dev.of_node) {
1041 		sci = s3c64xx_spi_parse_dt(&pdev->dev);
1042 		if (IS_ERR(sci))
1043 			return PTR_ERR(sci);
1044 	}
1045 
1046 	if (!sci) {
1047 		dev_err(&pdev->dev, "platform_data missing!\n");
1048 		return -ENODEV;
1049 	}
1050 
1051 	mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1052 	if (mem_res == NULL) {
1053 		dev_err(&pdev->dev, "Unable to get SPI MEM resource\n");
1054 		return -ENXIO;
1055 	}
1056 
1057 	irq = platform_get_irq(pdev, 0);
1058 	if (irq < 0) {
1059 		dev_warn(&pdev->dev, "Failed to get IRQ: %d\n", irq);
1060 		return irq;
1061 	}
1062 
1063 	master = spi_alloc_master(&pdev->dev,
1064 				sizeof(struct s3c64xx_spi_driver_data));
1065 	if (master == NULL) {
1066 		dev_err(&pdev->dev, "Unable to allocate SPI Master\n");
1067 		return -ENOMEM;
1068 	}
1069 
1070 	platform_set_drvdata(pdev, master);
1071 
1072 	sdd = spi_master_get_devdata(master);
1073 	sdd->port_conf = s3c64xx_spi_get_port_config(pdev);
1074 	sdd->master = master;
1075 	sdd->cntrlr_info = sci;
1076 	sdd->pdev = pdev;
1077 	sdd->sfr_start = mem_res->start;
1078 	sdd->cs_gpio = true;
1079 	if (pdev->dev.of_node) {
1080 		if (!of_find_property(pdev->dev.of_node, "cs-gpio", NULL))
1081 			sdd->cs_gpio = false;
1082 
1083 		ret = of_alias_get_id(pdev->dev.of_node, "spi");
1084 		if (ret < 0) {
1085 			dev_err(&pdev->dev, "failed to get alias id, errno %d\n",
1086 				ret);
1087 			goto err0;
1088 		}
1089 		sdd->port_id = ret;
1090 	} else {
1091 		sdd->port_id = pdev->id;
1092 	}
1093 
1094 	sdd->cur_bpw = 8;
1095 
1096 	if (!sdd->pdev->dev.of_node) {
1097 		res = platform_get_resource(pdev, IORESOURCE_DMA,  0);
1098 		if (!res) {
1099 			dev_warn(&pdev->dev, "Unable to get SPI tx dma resource. Switching to poll mode\n");
1100 			sdd->port_conf->quirks = S3C64XX_SPI_QUIRK_POLL;
1101 		} else
1102 			sdd->tx_dma.dmach = res->start;
1103 
1104 		res = platform_get_resource(pdev, IORESOURCE_DMA,  1);
1105 		if (!res) {
1106 			dev_warn(&pdev->dev, "Unable to get SPI rx dma resource. Switching to poll mode\n");
1107 			sdd->port_conf->quirks = S3C64XX_SPI_QUIRK_POLL;
1108 		} else
1109 			sdd->rx_dma.dmach = res->start;
1110 	}
1111 
1112 	sdd->tx_dma.direction = DMA_MEM_TO_DEV;
1113 	sdd->rx_dma.direction = DMA_DEV_TO_MEM;
1114 
1115 	master->dev.of_node = pdev->dev.of_node;
1116 	master->bus_num = sdd->port_id;
1117 	master->setup = s3c64xx_spi_setup;
1118 	master->cleanup = s3c64xx_spi_cleanup;
1119 	master->prepare_transfer_hardware = s3c64xx_spi_prepare_transfer;
1120 	master->prepare_message = s3c64xx_spi_prepare_message;
1121 	master->transfer_one = s3c64xx_spi_transfer_one;
1122 	master->unprepare_transfer_hardware = s3c64xx_spi_unprepare_transfer;
1123 	master->num_chipselect = sci->num_cs;
1124 	master->dma_alignment = 8;
1125 	master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
1126 					SPI_BPW_MASK(8);
1127 	/* the spi->mode bits understood by this driver: */
1128 	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
1129 	master->auto_runtime_pm = true;
1130 	if (!is_polling(sdd))
1131 		master->can_dma = s3c64xx_spi_can_dma;
1132 
1133 	sdd->regs = devm_ioremap_resource(&pdev->dev, mem_res);
1134 	if (IS_ERR(sdd->regs)) {
1135 		ret = PTR_ERR(sdd->regs);
1136 		goto err0;
1137 	}
1138 
1139 	if (sci->cfg_gpio && sci->cfg_gpio()) {
1140 		dev_err(&pdev->dev, "Unable to config gpio\n");
1141 		ret = -EBUSY;
1142 		goto err0;
1143 	}
1144 
1145 	/* Setup clocks */
1146 	sdd->clk = devm_clk_get(&pdev->dev, "spi");
1147 	if (IS_ERR(sdd->clk)) {
1148 		dev_err(&pdev->dev, "Unable to acquire clock 'spi'\n");
1149 		ret = PTR_ERR(sdd->clk);
1150 		goto err0;
1151 	}
1152 
1153 	if (clk_prepare_enable(sdd->clk)) {
1154 		dev_err(&pdev->dev, "Couldn't enable clock 'spi'\n");
1155 		ret = -EBUSY;
1156 		goto err0;
1157 	}
1158 
1159 	sprintf(clk_name, "spi_busclk%d", sci->src_clk_nr);
1160 	sdd->src_clk = devm_clk_get(&pdev->dev, clk_name);
1161 	if (IS_ERR(sdd->src_clk)) {
1162 		dev_err(&pdev->dev,
1163 			"Unable to acquire clock '%s'\n", clk_name);
1164 		ret = PTR_ERR(sdd->src_clk);
1165 		goto err2;
1166 	}
1167 
1168 	if (clk_prepare_enable(sdd->src_clk)) {
1169 		dev_err(&pdev->dev, "Couldn't enable clock '%s'\n", clk_name);
1170 		ret = -EBUSY;
1171 		goto err2;
1172 	}
1173 
1174 	/* Setup Deufult Mode */
1175 	s3c64xx_spi_hwinit(sdd, sdd->port_id);
1176 
1177 	spin_lock_init(&sdd->lock);
1178 	init_completion(&sdd->xfer_completion);
1179 
1180 	ret = devm_request_irq(&pdev->dev, irq, s3c64xx_spi_irq, 0,
1181 				"spi-s3c64xx", sdd);
1182 	if (ret != 0) {
1183 		dev_err(&pdev->dev, "Failed to request IRQ %d: %d\n",
1184 			irq, ret);
1185 		goto err3;
1186 	}
1187 
1188 	writel(S3C64XX_SPI_INT_RX_OVERRUN_EN | S3C64XX_SPI_INT_RX_UNDERRUN_EN |
1189 	       S3C64XX_SPI_INT_TX_OVERRUN_EN | S3C64XX_SPI_INT_TX_UNDERRUN_EN,
1190 	       sdd->regs + S3C64XX_SPI_INT_EN);
1191 
1192 	pm_runtime_set_active(&pdev->dev);
1193 	pm_runtime_enable(&pdev->dev);
1194 
1195 	ret = devm_spi_register_master(&pdev->dev, master);
1196 	if (ret != 0) {
1197 		dev_err(&pdev->dev, "cannot register SPI master: %d\n", ret);
1198 		goto err3;
1199 	}
1200 
1201 	dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d with %d Slaves attached\n",
1202 					sdd->port_id, master->num_chipselect);
1203 	dev_dbg(&pdev->dev, "\tIOmem=[%pR]\tDMA=[Rx-%d, Tx-%d]\n",
1204 					mem_res,
1205 					sdd->rx_dma.dmach, sdd->tx_dma.dmach);
1206 
1207 	return 0;
1208 
1209 err3:
1210 	clk_disable_unprepare(sdd->src_clk);
1211 err2:
1212 	clk_disable_unprepare(sdd->clk);
1213 err0:
1214 	spi_master_put(master);
1215 
1216 	return ret;
1217 }
1218 
1219 static int s3c64xx_spi_remove(struct platform_device *pdev)
1220 {
1221 	struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
1222 	struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1223 
1224 	pm_runtime_disable(&pdev->dev);
1225 
1226 	writel(0, sdd->regs + S3C64XX_SPI_INT_EN);
1227 
1228 	clk_disable_unprepare(sdd->src_clk);
1229 
1230 	clk_disable_unprepare(sdd->clk);
1231 
1232 	return 0;
1233 }
1234 
1235 #ifdef CONFIG_PM_SLEEP
1236 static int s3c64xx_spi_suspend(struct device *dev)
1237 {
1238 	struct spi_master *master = dev_get_drvdata(dev);
1239 	struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1240 
1241 	int ret = spi_master_suspend(master);
1242 	if (ret)
1243 		return ret;
1244 
1245 	if (!pm_runtime_suspended(dev)) {
1246 		clk_disable_unprepare(sdd->clk);
1247 		clk_disable_unprepare(sdd->src_clk);
1248 	}
1249 
1250 	sdd->cur_speed = 0; /* Output Clock is stopped */
1251 
1252 	return 0;
1253 }
1254 
1255 static int s3c64xx_spi_resume(struct device *dev)
1256 {
1257 	struct spi_master *master = dev_get_drvdata(dev);
1258 	struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1259 	struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
1260 
1261 	if (sci->cfg_gpio)
1262 		sci->cfg_gpio();
1263 
1264 	if (!pm_runtime_suspended(dev)) {
1265 		clk_prepare_enable(sdd->src_clk);
1266 		clk_prepare_enable(sdd->clk);
1267 	}
1268 
1269 	s3c64xx_spi_hwinit(sdd, sdd->port_id);
1270 
1271 	return spi_master_resume(master);
1272 }
1273 #endif /* CONFIG_PM_SLEEP */
1274 
1275 #ifdef CONFIG_PM_RUNTIME
1276 static int s3c64xx_spi_runtime_suspend(struct device *dev)
1277 {
1278 	struct spi_master *master = dev_get_drvdata(dev);
1279 	struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1280 
1281 	clk_disable_unprepare(sdd->clk);
1282 	clk_disable_unprepare(sdd->src_clk);
1283 
1284 	return 0;
1285 }
1286 
1287 static int s3c64xx_spi_runtime_resume(struct device *dev)
1288 {
1289 	struct spi_master *master = dev_get_drvdata(dev);
1290 	struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1291 	int ret;
1292 
1293 	ret = clk_prepare_enable(sdd->src_clk);
1294 	if (ret != 0)
1295 		return ret;
1296 
1297 	ret = clk_prepare_enable(sdd->clk);
1298 	if (ret != 0) {
1299 		clk_disable_unprepare(sdd->src_clk);
1300 		return ret;
1301 	}
1302 
1303 	return 0;
1304 }
1305 #endif /* CONFIG_PM_RUNTIME */
1306 
1307 static const struct dev_pm_ops s3c64xx_spi_pm = {
1308 	SET_SYSTEM_SLEEP_PM_OPS(s3c64xx_spi_suspend, s3c64xx_spi_resume)
1309 	SET_RUNTIME_PM_OPS(s3c64xx_spi_runtime_suspend,
1310 			   s3c64xx_spi_runtime_resume, NULL)
1311 };
1312 
1313 static struct s3c64xx_spi_port_config s3c2443_spi_port_config = {
1314 	.fifo_lvl_mask	= { 0x7f },
1315 	.rx_lvl_offset	= 13,
1316 	.tx_st_done	= 21,
1317 	.high_speed	= true,
1318 };
1319 
1320 static struct s3c64xx_spi_port_config s3c6410_spi_port_config = {
1321 	.fifo_lvl_mask	= { 0x7f, 0x7F },
1322 	.rx_lvl_offset	= 13,
1323 	.tx_st_done	= 21,
1324 };
1325 
1326 static struct s3c64xx_spi_port_config s5p64x0_spi_port_config = {
1327 	.fifo_lvl_mask	= { 0x1ff, 0x7F },
1328 	.rx_lvl_offset	= 15,
1329 	.tx_st_done	= 25,
1330 };
1331 
1332 static struct s3c64xx_spi_port_config s5pc100_spi_port_config = {
1333 	.fifo_lvl_mask	= { 0x7f, 0x7F },
1334 	.rx_lvl_offset	= 13,
1335 	.tx_st_done	= 21,
1336 	.high_speed	= true,
1337 };
1338 
1339 static struct s3c64xx_spi_port_config s5pv210_spi_port_config = {
1340 	.fifo_lvl_mask	= { 0x1ff, 0x7F },
1341 	.rx_lvl_offset	= 15,
1342 	.tx_st_done	= 25,
1343 	.high_speed	= true,
1344 };
1345 
1346 static struct s3c64xx_spi_port_config exynos4_spi_port_config = {
1347 	.fifo_lvl_mask	= { 0x1ff, 0x7F, 0x7F },
1348 	.rx_lvl_offset	= 15,
1349 	.tx_st_done	= 25,
1350 	.high_speed	= true,
1351 	.clk_from_cmu	= true,
1352 };
1353 
1354 static struct s3c64xx_spi_port_config exynos5440_spi_port_config = {
1355 	.fifo_lvl_mask	= { 0x1ff },
1356 	.rx_lvl_offset	= 15,
1357 	.tx_st_done	= 25,
1358 	.high_speed	= true,
1359 	.clk_from_cmu	= true,
1360 	.quirks		= S3C64XX_SPI_QUIRK_POLL,
1361 };
1362 
1363 static struct platform_device_id s3c64xx_spi_driver_ids[] = {
1364 	{
1365 		.name		= "s3c2443-spi",
1366 		.driver_data	= (kernel_ulong_t)&s3c2443_spi_port_config,
1367 	}, {
1368 		.name		= "s3c6410-spi",
1369 		.driver_data	= (kernel_ulong_t)&s3c6410_spi_port_config,
1370 	}, {
1371 		.name		= "s5p64x0-spi",
1372 		.driver_data	= (kernel_ulong_t)&s5p64x0_spi_port_config,
1373 	}, {
1374 		.name		= "s5pc100-spi",
1375 		.driver_data	= (kernel_ulong_t)&s5pc100_spi_port_config,
1376 	}, {
1377 		.name		= "s5pv210-spi",
1378 		.driver_data	= (kernel_ulong_t)&s5pv210_spi_port_config,
1379 	}, {
1380 		.name		= "exynos4210-spi",
1381 		.driver_data	= (kernel_ulong_t)&exynos4_spi_port_config,
1382 	},
1383 	{ },
1384 };
1385 
1386 static const struct of_device_id s3c64xx_spi_dt_match[] = {
1387 	{ .compatible = "samsung,s3c2443-spi",
1388 			.data = (void *)&s3c2443_spi_port_config,
1389 	},
1390 	{ .compatible = "samsung,s3c6410-spi",
1391 			.data = (void *)&s3c6410_spi_port_config,
1392 	},
1393 	{ .compatible = "samsung,s5pc100-spi",
1394 			.data = (void *)&s5pc100_spi_port_config,
1395 	},
1396 	{ .compatible = "samsung,s5pv210-spi",
1397 			.data = (void *)&s5pv210_spi_port_config,
1398 	},
1399 	{ .compatible = "samsung,exynos4210-spi",
1400 			.data = (void *)&exynos4_spi_port_config,
1401 	},
1402 	{ .compatible = "samsung,exynos5440-spi",
1403 			.data = (void *)&exynos5440_spi_port_config,
1404 	},
1405 	{ },
1406 };
1407 MODULE_DEVICE_TABLE(of, s3c64xx_spi_dt_match);
1408 
1409 static struct platform_driver s3c64xx_spi_driver = {
1410 	.driver = {
1411 		.name	= "s3c64xx-spi",
1412 		.owner = THIS_MODULE,
1413 		.pm = &s3c64xx_spi_pm,
1414 		.of_match_table = of_match_ptr(s3c64xx_spi_dt_match),
1415 	},
1416 	.probe = s3c64xx_spi_probe,
1417 	.remove = s3c64xx_spi_remove,
1418 	.id_table = s3c64xx_spi_driver_ids,
1419 };
1420 MODULE_ALIAS("platform:s3c64xx-spi");
1421 
1422 module_platform_driver(s3c64xx_spi_driver);
1423 
1424 MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
1425 MODULE_DESCRIPTION("S3C64XX SPI Controller Driver");
1426 MODULE_LICENSE("GPL");
1427