1 /*
2  * SPI driver for Nvidia's Tegra20/Tegra30 SLINK Controller.
3  *
4  * Copyright (c) 2012, NVIDIA CORPORATION.  All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 #include <linux/clk.h>
20 #include <linux/completion.h>
21 #include <linux/delay.h>
22 #include <linux/dmaengine.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/dmapool.h>
25 #include <linux/err.h>
26 #include <linux/init.h>
27 #include <linux/interrupt.h>
28 #include <linux/io.h>
29 #include <linux/kernel.h>
30 #include <linux/kthread.h>
31 #include <linux/module.h>
32 #include <linux/platform_device.h>
33 #include <linux/pm_runtime.h>
34 #include <linux/of.h>
35 #include <linux/of_device.h>
36 #include <linux/spi/spi.h>
37 #include <linux/spi/spi-tegra.h>
38 #include <mach/clk.h>
39 
40 #define SLINK_COMMAND			0x000
41 #define SLINK_BIT_LENGTH(x)		(((x) & 0x1f) << 0)
42 #define SLINK_WORD_SIZE(x)		(((x) & 0x1f) << 5)
43 #define SLINK_BOTH_EN			(1 << 10)
44 #define SLINK_CS_SW			(1 << 11)
45 #define SLINK_CS_VALUE			(1 << 12)
46 #define SLINK_CS_POLARITY		(1 << 13)
47 #define SLINK_IDLE_SDA_DRIVE_LOW	(0 << 16)
48 #define SLINK_IDLE_SDA_DRIVE_HIGH	(1 << 16)
49 #define SLINK_IDLE_SDA_PULL_LOW		(2 << 16)
50 #define SLINK_IDLE_SDA_PULL_HIGH	(3 << 16)
51 #define SLINK_IDLE_SDA_MASK		(3 << 16)
52 #define SLINK_CS_POLARITY1		(1 << 20)
53 #define SLINK_CK_SDA			(1 << 21)
54 #define SLINK_CS_POLARITY2		(1 << 22)
55 #define SLINK_CS_POLARITY3		(1 << 23)
56 #define SLINK_IDLE_SCLK_DRIVE_LOW	(0 << 24)
57 #define SLINK_IDLE_SCLK_DRIVE_HIGH	(1 << 24)
58 #define SLINK_IDLE_SCLK_PULL_LOW	(2 << 24)
59 #define SLINK_IDLE_SCLK_PULL_HIGH	(3 << 24)
60 #define SLINK_IDLE_SCLK_MASK		(3 << 24)
61 #define SLINK_M_S			(1 << 28)
62 #define SLINK_WAIT			(1 << 29)
63 #define SLINK_GO			(1 << 30)
64 #define SLINK_ENB			(1 << 31)
65 
66 #define SLINK_MODES			(SLINK_IDLE_SCLK_MASK | SLINK_CK_SDA)
67 
68 #define SLINK_COMMAND2			0x004
69 #define SLINK_LSBFE			(1 << 0)
70 #define SLINK_SSOE			(1 << 1)
71 #define SLINK_SPIE			(1 << 4)
72 #define SLINK_BIDIROE			(1 << 6)
73 #define SLINK_MODFEN			(1 << 7)
74 #define SLINK_INT_SIZE(x)		(((x) & 0x1f) << 8)
75 #define SLINK_CS_ACTIVE_BETWEEN		(1 << 17)
76 #define SLINK_SS_EN_CS(x)		(((x) & 0x3) << 18)
77 #define SLINK_SS_SETUP(x)		(((x) & 0x3) << 20)
78 #define SLINK_FIFO_REFILLS_0		(0 << 22)
79 #define SLINK_FIFO_REFILLS_1		(1 << 22)
80 #define SLINK_FIFO_REFILLS_2		(2 << 22)
81 #define SLINK_FIFO_REFILLS_3		(3 << 22)
82 #define SLINK_FIFO_REFILLS_MASK		(3 << 22)
83 #define SLINK_WAIT_PACK_INT(x)		(((x) & 0x7) << 26)
84 #define SLINK_SPC0			(1 << 29)
85 #define SLINK_TXEN			(1 << 30)
86 #define SLINK_RXEN			(1 << 31)
87 
88 #define SLINK_STATUS			0x008
89 #define SLINK_COUNT(val)		(((val) >> 0) & 0x1f)
90 #define SLINK_WORD(val)			(((val) >> 5) & 0x1f)
91 #define SLINK_BLK_CNT(val)		(((val) >> 0) & 0xffff)
92 #define SLINK_MODF			(1 << 16)
93 #define SLINK_RX_UNF			(1 << 18)
94 #define SLINK_TX_OVF			(1 << 19)
95 #define SLINK_TX_FULL			(1 << 20)
96 #define SLINK_TX_EMPTY			(1 << 21)
97 #define SLINK_RX_FULL			(1 << 22)
98 #define SLINK_RX_EMPTY			(1 << 23)
99 #define SLINK_TX_UNF			(1 << 24)
100 #define SLINK_RX_OVF			(1 << 25)
101 #define SLINK_TX_FLUSH			(1 << 26)
102 #define SLINK_RX_FLUSH			(1 << 27)
103 #define SLINK_SCLK			(1 << 28)
104 #define SLINK_ERR			(1 << 29)
105 #define SLINK_RDY			(1 << 30)
106 #define SLINK_BSY			(1 << 31)
107 #define SLINK_FIFO_ERROR		(SLINK_TX_OVF | SLINK_RX_UNF |	\
108 					SLINK_TX_UNF | SLINK_RX_OVF)
109 
110 #define SLINK_FIFO_EMPTY		(SLINK_TX_EMPTY | SLINK_RX_EMPTY)
111 
112 #define SLINK_MAS_DATA			0x010
113 #define SLINK_SLAVE_DATA		0x014
114 
115 #define SLINK_DMA_CTL			0x018
116 #define SLINK_DMA_BLOCK_SIZE(x)		(((x) & 0xffff) << 0)
117 #define SLINK_TX_TRIG_1			(0 << 16)
118 #define SLINK_TX_TRIG_4			(1 << 16)
119 #define SLINK_TX_TRIG_8			(2 << 16)
120 #define SLINK_TX_TRIG_16		(3 << 16)
121 #define SLINK_TX_TRIG_MASK		(3 << 16)
122 #define SLINK_RX_TRIG_1			(0 << 18)
123 #define SLINK_RX_TRIG_4			(1 << 18)
124 #define SLINK_RX_TRIG_8			(2 << 18)
125 #define SLINK_RX_TRIG_16		(3 << 18)
126 #define SLINK_RX_TRIG_MASK		(3 << 18)
127 #define SLINK_PACKED			(1 << 20)
128 #define SLINK_PACK_SIZE_4		(0 << 21)
129 #define SLINK_PACK_SIZE_8		(1 << 21)
130 #define SLINK_PACK_SIZE_16		(2 << 21)
131 #define SLINK_PACK_SIZE_32		(3 << 21)
132 #define SLINK_PACK_SIZE_MASK		(3 << 21)
133 #define SLINK_IE_TXC			(1 << 26)
134 #define SLINK_IE_RXC			(1 << 27)
135 #define SLINK_DMA_EN			(1 << 31)
136 
137 #define SLINK_STATUS2			0x01c
138 #define SLINK_TX_FIFO_EMPTY_COUNT(val)	(((val) & 0x3f) >> 0)
139 #define SLINK_RX_FIFO_FULL_COUNT(val)	(((val) & 0x3f0000) >> 16)
140 #define SLINK_SS_HOLD_TIME(val)		(((val) & 0xF) << 6)
141 
142 #define SLINK_TX_FIFO			0x100
143 #define SLINK_RX_FIFO			0x180
144 
145 #define DATA_DIR_TX			(1 << 0)
146 #define DATA_DIR_RX			(1 << 1)
147 
148 #define SLINK_DMA_TIMEOUT		(msecs_to_jiffies(1000))
149 
150 #define DEFAULT_SPI_DMA_BUF_LEN		(16*1024)
151 #define TX_FIFO_EMPTY_COUNT_MAX		SLINK_TX_FIFO_EMPTY_COUNT(0x20)
152 #define RX_FIFO_FULL_COUNT_ZERO		SLINK_RX_FIFO_FULL_COUNT(0)
153 
154 #define SLINK_STATUS2_RESET \
155 	(TX_FIFO_EMPTY_COUNT_MAX | RX_FIFO_FULL_COUNT_ZERO << 16)
156 
157 #define MAX_CHIP_SELECT			4
158 #define SLINK_FIFO_DEPTH		32
159 
160 struct tegra_slink_chip_data {
161 	bool cs_hold_time;
162 };
163 
164 struct tegra_slink_data {
165 	struct device				*dev;
166 	struct spi_master			*master;
167 	const struct tegra_slink_chip_data	*chip_data;
168 	spinlock_t				lock;
169 
170 	struct clk				*clk;
171 	void __iomem				*base;
172 	phys_addr_t				phys;
173 	unsigned				irq;
174 	int					dma_req_sel;
175 	u32					spi_max_frequency;
176 	u32					cur_speed;
177 
178 	struct spi_device			*cur_spi;
179 	unsigned				cur_pos;
180 	unsigned				cur_len;
181 	unsigned				words_per_32bit;
182 	unsigned				bytes_per_word;
183 	unsigned				curr_dma_words;
184 	unsigned				cur_direction;
185 
186 	unsigned				cur_rx_pos;
187 	unsigned				cur_tx_pos;
188 
189 	unsigned				dma_buf_size;
190 	unsigned				max_buf_size;
191 	bool					is_curr_dma_xfer;
192 	bool					is_hw_based_cs;
193 
194 	struct completion			rx_dma_complete;
195 	struct completion			tx_dma_complete;
196 
197 	u32					tx_status;
198 	u32					rx_status;
199 	u32					status_reg;
200 	bool					is_packed;
201 	unsigned long				packed_size;
202 
203 	u32					command_reg;
204 	u32					command2_reg;
205 	u32					dma_control_reg;
206 	u32					def_command_reg;
207 	u32					def_command2_reg;
208 
209 	struct completion			xfer_completion;
210 	struct spi_transfer			*curr_xfer;
211 	struct dma_chan				*rx_dma_chan;
212 	u32					*rx_dma_buf;
213 	dma_addr_t				rx_dma_phys;
214 	struct dma_async_tx_descriptor		*rx_dma_desc;
215 
216 	struct dma_chan				*tx_dma_chan;
217 	u32					*tx_dma_buf;
218 	dma_addr_t				tx_dma_phys;
219 	struct dma_async_tx_descriptor		*tx_dma_desc;
220 };
221 
222 static int tegra_slink_runtime_suspend(struct device *dev);
223 static int tegra_slink_runtime_resume(struct device *dev);
224 
225 static inline unsigned long tegra_slink_readl(struct tegra_slink_data *tspi,
226 		unsigned long reg)
227 {
228 	return readl(tspi->base + reg);
229 }
230 
231 static inline void tegra_slink_writel(struct tegra_slink_data *tspi,
232 		unsigned long val, unsigned long reg)
233 {
234 	writel(val, tspi->base + reg);
235 
236 	/* Read back register to make sure that register writes completed */
237 	if (reg != SLINK_TX_FIFO)
238 		readl(tspi->base + SLINK_MAS_DATA);
239 }
240 
241 static void tegra_slink_clear_status(struct tegra_slink_data *tspi)
242 {
243 	unsigned long val;
244 	unsigned long val_write = 0;
245 
246 	val = tegra_slink_readl(tspi, SLINK_STATUS);
247 
248 	/* Write 1 to clear status register */
249 	val_write = SLINK_RDY | SLINK_FIFO_ERROR;
250 	tegra_slink_writel(tspi, val_write, SLINK_STATUS);
251 }
252 
253 static unsigned long tegra_slink_get_packed_size(struct tegra_slink_data *tspi,
254 				  struct spi_transfer *t)
255 {
256 	unsigned long val;
257 
258 	switch (tspi->bytes_per_word) {
259 	case 0:
260 		val = SLINK_PACK_SIZE_4;
261 		break;
262 	case 1:
263 		val = SLINK_PACK_SIZE_8;
264 		break;
265 	case 2:
266 		val = SLINK_PACK_SIZE_16;
267 		break;
268 	case 4:
269 		val = SLINK_PACK_SIZE_32;
270 		break;
271 	default:
272 		val = 0;
273 	}
274 	return val;
275 }
276 
277 static unsigned tegra_slink_calculate_curr_xfer_param(
278 	struct spi_device *spi, struct tegra_slink_data *tspi,
279 	struct spi_transfer *t)
280 {
281 	unsigned remain_len = t->len - tspi->cur_pos;
282 	unsigned max_word;
283 	unsigned bits_per_word ;
284 	unsigned max_len;
285 	unsigned total_fifo_words;
286 
287 	bits_per_word = t->bits_per_word ? t->bits_per_word :
288 						spi->bits_per_word;
289 	tspi->bytes_per_word = (bits_per_word - 1) / 8 + 1;
290 
291 	if (bits_per_word == 8 || bits_per_word == 16) {
292 		tspi->is_packed = 1;
293 		tspi->words_per_32bit = 32/bits_per_word;
294 	} else {
295 		tspi->is_packed = 0;
296 		tspi->words_per_32bit = 1;
297 	}
298 	tspi->packed_size = tegra_slink_get_packed_size(tspi, t);
299 
300 	if (tspi->is_packed) {
301 		max_len = min(remain_len, tspi->max_buf_size);
302 		tspi->curr_dma_words = max_len/tspi->bytes_per_word;
303 		total_fifo_words = max_len/4;
304 	} else {
305 		max_word = (remain_len - 1) / tspi->bytes_per_word + 1;
306 		max_word = min(max_word, tspi->max_buf_size/4);
307 		tspi->curr_dma_words = max_word;
308 		total_fifo_words = max_word;
309 	}
310 	return total_fifo_words;
311 }
312 
313 static unsigned tegra_slink_fill_tx_fifo_from_client_txbuf(
314 	struct tegra_slink_data *tspi, struct spi_transfer *t)
315 {
316 	unsigned nbytes;
317 	unsigned tx_empty_count;
318 	unsigned long fifo_status;
319 	unsigned max_n_32bit;
320 	unsigned i, count;
321 	unsigned long x;
322 	unsigned int written_words;
323 	unsigned fifo_words_left;
324 	u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
325 
326 	fifo_status = tegra_slink_readl(tspi, SLINK_STATUS2);
327 	tx_empty_count = SLINK_TX_FIFO_EMPTY_COUNT(fifo_status);
328 
329 	if (tspi->is_packed) {
330 		fifo_words_left = tx_empty_count * tspi->words_per_32bit;
331 		written_words = min(fifo_words_left, tspi->curr_dma_words);
332 		nbytes = written_words * tspi->bytes_per_word;
333 		max_n_32bit = DIV_ROUND_UP(nbytes, 4);
334 		for (count = 0; count < max_n_32bit; count++) {
335 			x = 0;
336 			for (i = 0; (i < 4) && nbytes; i++, nbytes--)
337 				x |= (*tx_buf++) << (i*8);
338 			tegra_slink_writel(tspi, x, SLINK_TX_FIFO);
339 		}
340 	} else {
341 		max_n_32bit = min(tspi->curr_dma_words,  tx_empty_count);
342 		written_words = max_n_32bit;
343 		nbytes = written_words * tspi->bytes_per_word;
344 		for (count = 0; count < max_n_32bit; count++) {
345 			x = 0;
346 			for (i = 0; nbytes && (i < tspi->bytes_per_word);
347 							i++, nbytes--)
348 				x |= ((*tx_buf++) << i*8);
349 			tegra_slink_writel(tspi, x, SLINK_TX_FIFO);
350 		}
351 	}
352 	tspi->cur_tx_pos += written_words * tspi->bytes_per_word;
353 	return written_words;
354 }
355 
356 static unsigned int tegra_slink_read_rx_fifo_to_client_rxbuf(
357 		struct tegra_slink_data *tspi, struct spi_transfer *t)
358 {
359 	unsigned rx_full_count;
360 	unsigned long fifo_status;
361 	unsigned i, count;
362 	unsigned long x;
363 	unsigned int read_words = 0;
364 	unsigned len;
365 	u8 *rx_buf = (u8 *)t->rx_buf + tspi->cur_rx_pos;
366 
367 	fifo_status = tegra_slink_readl(tspi, SLINK_STATUS2);
368 	rx_full_count = SLINK_RX_FIFO_FULL_COUNT(fifo_status);
369 	if (tspi->is_packed) {
370 		len = tspi->curr_dma_words * tspi->bytes_per_word;
371 		for (count = 0; count < rx_full_count; count++) {
372 			x = tegra_slink_readl(tspi, SLINK_RX_FIFO);
373 			for (i = 0; len && (i < 4); i++, len--)
374 				*rx_buf++ = (x >> i*8) & 0xFF;
375 		}
376 		tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
377 		read_words += tspi->curr_dma_words;
378 	} else {
379 		unsigned int bits_per_word;
380 
381 		bits_per_word = t->bits_per_word ? t->bits_per_word :
382 						tspi->cur_spi->bits_per_word;
383 		for (count = 0; count < rx_full_count; count++) {
384 			x = tegra_slink_readl(tspi, SLINK_RX_FIFO);
385 			for (i = 0; (i < tspi->bytes_per_word); i++)
386 				*rx_buf++ = (x >> (i*8)) & 0xFF;
387 		}
388 		tspi->cur_rx_pos += rx_full_count * tspi->bytes_per_word;
389 		read_words += rx_full_count;
390 	}
391 	return read_words;
392 }
393 
394 static void tegra_slink_copy_client_txbuf_to_spi_txbuf(
395 		struct tegra_slink_data *tspi, struct spi_transfer *t)
396 {
397 	unsigned len;
398 
399 	/* Make the dma buffer to read by cpu */
400 	dma_sync_single_for_cpu(tspi->dev, tspi->tx_dma_phys,
401 				tspi->dma_buf_size, DMA_TO_DEVICE);
402 
403 	if (tspi->is_packed) {
404 		len = tspi->curr_dma_words * tspi->bytes_per_word;
405 		memcpy(tspi->tx_dma_buf, t->tx_buf + tspi->cur_pos, len);
406 	} else {
407 		unsigned int i;
408 		unsigned int count;
409 		u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
410 		unsigned consume = tspi->curr_dma_words * tspi->bytes_per_word;
411 		unsigned int x;
412 
413 		for (count = 0; count < tspi->curr_dma_words; count++) {
414 			x = 0;
415 			for (i = 0; consume && (i < tspi->bytes_per_word);
416 							i++, consume--)
417 				x |= ((*tx_buf++) << i * 8);
418 			tspi->tx_dma_buf[count] = x;
419 		}
420 	}
421 	tspi->cur_tx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
422 
423 	/* Make the dma buffer to read by dma */
424 	dma_sync_single_for_device(tspi->dev, tspi->tx_dma_phys,
425 				tspi->dma_buf_size, DMA_TO_DEVICE);
426 }
427 
428 static void tegra_slink_copy_spi_rxbuf_to_client_rxbuf(
429 		struct tegra_slink_data *tspi, struct spi_transfer *t)
430 {
431 	unsigned len;
432 
433 	/* Make the dma buffer to read by cpu */
434 	dma_sync_single_for_cpu(tspi->dev, tspi->rx_dma_phys,
435 		tspi->dma_buf_size, DMA_FROM_DEVICE);
436 
437 	if (tspi->is_packed) {
438 		len = tspi->curr_dma_words * tspi->bytes_per_word;
439 		memcpy(t->rx_buf + tspi->cur_rx_pos, tspi->rx_dma_buf, len);
440 	} else {
441 		unsigned int i;
442 		unsigned int count;
443 		unsigned char *rx_buf = t->rx_buf + tspi->cur_rx_pos;
444 		unsigned int x;
445 		unsigned int rx_mask, bits_per_word;
446 
447 		bits_per_word = t->bits_per_word ? t->bits_per_word :
448 						tspi->cur_spi->bits_per_word;
449 		rx_mask = (1 << bits_per_word) - 1;
450 		for (count = 0; count < tspi->curr_dma_words; count++) {
451 			x = tspi->rx_dma_buf[count];
452 			x &= rx_mask;
453 			for (i = 0; (i < tspi->bytes_per_word); i++)
454 				*rx_buf++ = (x >> (i*8)) & 0xFF;
455 		}
456 	}
457 	tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
458 
459 	/* Make the dma buffer to read by dma */
460 	dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys,
461 		tspi->dma_buf_size, DMA_FROM_DEVICE);
462 }
463 
464 static void tegra_slink_dma_complete(void *args)
465 {
466 	struct completion *dma_complete = args;
467 
468 	complete(dma_complete);
469 }
470 
471 static int tegra_slink_start_tx_dma(struct tegra_slink_data *tspi, int len)
472 {
473 	INIT_COMPLETION(tspi->tx_dma_complete);
474 	tspi->tx_dma_desc = dmaengine_prep_slave_single(tspi->tx_dma_chan,
475 				tspi->tx_dma_phys, len, DMA_MEM_TO_DEV,
476 				DMA_PREP_INTERRUPT |  DMA_CTRL_ACK);
477 	if (!tspi->tx_dma_desc) {
478 		dev_err(tspi->dev, "Not able to get desc for Tx\n");
479 		return -EIO;
480 	}
481 
482 	tspi->tx_dma_desc->callback = tegra_slink_dma_complete;
483 	tspi->tx_dma_desc->callback_param = &tspi->tx_dma_complete;
484 
485 	dmaengine_submit(tspi->tx_dma_desc);
486 	dma_async_issue_pending(tspi->tx_dma_chan);
487 	return 0;
488 }
489 
490 static int tegra_slink_start_rx_dma(struct tegra_slink_data *tspi, int len)
491 {
492 	INIT_COMPLETION(tspi->rx_dma_complete);
493 	tspi->rx_dma_desc = dmaengine_prep_slave_single(tspi->rx_dma_chan,
494 				tspi->rx_dma_phys, len, DMA_DEV_TO_MEM,
495 				DMA_PREP_INTERRUPT |  DMA_CTRL_ACK);
496 	if (!tspi->rx_dma_desc) {
497 		dev_err(tspi->dev, "Not able to get desc for Rx\n");
498 		return -EIO;
499 	}
500 
501 	tspi->rx_dma_desc->callback = tegra_slink_dma_complete;
502 	tspi->rx_dma_desc->callback_param = &tspi->rx_dma_complete;
503 
504 	dmaengine_submit(tspi->rx_dma_desc);
505 	dma_async_issue_pending(tspi->rx_dma_chan);
506 	return 0;
507 }
508 
509 static int tegra_slink_start_dma_based_transfer(
510 		struct tegra_slink_data *tspi, struct spi_transfer *t)
511 {
512 	unsigned long val;
513 	unsigned long test_val;
514 	unsigned int len;
515 	int ret = 0;
516 	unsigned long status;
517 
518 	/* Make sure that Rx and Tx fifo are empty */
519 	status = tegra_slink_readl(tspi, SLINK_STATUS);
520 	if ((status & SLINK_FIFO_EMPTY) != SLINK_FIFO_EMPTY) {
521 		dev_err(tspi->dev,
522 			"Rx/Tx fifo are not empty status 0x%08lx\n", status);
523 		return -EIO;
524 	}
525 
526 	val = SLINK_DMA_BLOCK_SIZE(tspi->curr_dma_words - 1);
527 	val |= tspi->packed_size;
528 	if (tspi->is_packed)
529 		len = DIV_ROUND_UP(tspi->curr_dma_words * tspi->bytes_per_word,
530 					4) * 4;
531 	else
532 		len = tspi->curr_dma_words * 4;
533 
534 	/* Set attention level based on length of transfer */
535 	if (len & 0xF)
536 		val |= SLINK_TX_TRIG_1 | SLINK_RX_TRIG_1;
537 	else if (((len) >> 4) & 0x1)
538 		val |= SLINK_TX_TRIG_4 | SLINK_RX_TRIG_4;
539 	else
540 		val |= SLINK_TX_TRIG_8 | SLINK_RX_TRIG_8;
541 
542 	if (tspi->cur_direction & DATA_DIR_TX)
543 		val |= SLINK_IE_TXC;
544 
545 	if (tspi->cur_direction & DATA_DIR_RX)
546 		val |= SLINK_IE_RXC;
547 
548 	tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
549 	tspi->dma_control_reg = val;
550 
551 	if (tspi->cur_direction & DATA_DIR_TX) {
552 		tegra_slink_copy_client_txbuf_to_spi_txbuf(tspi, t);
553 		wmb();
554 		ret = tegra_slink_start_tx_dma(tspi, len);
555 		if (ret < 0) {
556 			dev_err(tspi->dev,
557 				"Starting tx dma failed, err %d\n", ret);
558 			return ret;
559 		}
560 
561 		/* Wait for tx fifo to be fill before starting slink */
562 		test_val = tegra_slink_readl(tspi, SLINK_STATUS);
563 		while (!(test_val & SLINK_TX_FULL))
564 			test_val = tegra_slink_readl(tspi, SLINK_STATUS);
565 	}
566 
567 	if (tspi->cur_direction & DATA_DIR_RX) {
568 		/* Make the dma buffer to read by dma */
569 		dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys,
570 				tspi->dma_buf_size, DMA_FROM_DEVICE);
571 
572 		ret = tegra_slink_start_rx_dma(tspi, len);
573 		if (ret < 0) {
574 			dev_err(tspi->dev,
575 				"Starting rx dma failed, err %d\n", ret);
576 			if (tspi->cur_direction & DATA_DIR_TX)
577 				dmaengine_terminate_all(tspi->tx_dma_chan);
578 			return ret;
579 		}
580 	}
581 	tspi->is_curr_dma_xfer = true;
582 	if (tspi->is_packed) {
583 		val |= SLINK_PACKED;
584 		tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
585 		/* HW need small delay after settign Packed mode */
586 		udelay(1);
587 	}
588 	tspi->dma_control_reg = val;
589 
590 	val |= SLINK_DMA_EN;
591 	tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
592 	return ret;
593 }
594 
595 static int tegra_slink_start_cpu_based_transfer(
596 		struct tegra_slink_data *tspi, struct spi_transfer *t)
597 {
598 	unsigned long val;
599 	unsigned cur_words;
600 
601 	val = tspi->packed_size;
602 	if (tspi->cur_direction & DATA_DIR_TX)
603 		val |= SLINK_IE_TXC;
604 
605 	if (tspi->cur_direction & DATA_DIR_RX)
606 		val |= SLINK_IE_RXC;
607 
608 	tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
609 	tspi->dma_control_reg = val;
610 
611 	if (tspi->cur_direction & DATA_DIR_TX)
612 		cur_words = tegra_slink_fill_tx_fifo_from_client_txbuf(tspi, t);
613 	else
614 		cur_words = tspi->curr_dma_words;
615 	val |= SLINK_DMA_BLOCK_SIZE(cur_words - 1);
616 	tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
617 	tspi->dma_control_reg = val;
618 
619 	tspi->is_curr_dma_xfer = false;
620 	if (tspi->is_packed) {
621 		val |= SLINK_PACKED;
622 		tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
623 		udelay(1);
624 		wmb();
625 	}
626 	tspi->dma_control_reg = val;
627 	val |= SLINK_DMA_EN;
628 	tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
629 	return 0;
630 }
631 
632 static int tegra_slink_init_dma_param(struct tegra_slink_data *tspi,
633 			bool dma_to_memory)
634 {
635 	struct dma_chan *dma_chan;
636 	u32 *dma_buf;
637 	dma_addr_t dma_phys;
638 	int ret;
639 	struct dma_slave_config dma_sconfig;
640 	dma_cap_mask_t mask;
641 
642 	dma_cap_zero(mask);
643 	dma_cap_set(DMA_SLAVE, mask);
644 	dma_chan = dma_request_channel(mask, NULL, NULL);
645 	if (!dma_chan) {
646 		dev_err(tspi->dev,
647 			"Dma channel is not available, will try later\n");
648 		return -EPROBE_DEFER;
649 	}
650 
651 	dma_buf = dma_alloc_coherent(tspi->dev, tspi->dma_buf_size,
652 				&dma_phys, GFP_KERNEL);
653 	if (!dma_buf) {
654 		dev_err(tspi->dev, " Not able to allocate the dma buffer\n");
655 		dma_release_channel(dma_chan);
656 		return -ENOMEM;
657 	}
658 
659 	dma_sconfig.slave_id = tspi->dma_req_sel;
660 	if (dma_to_memory) {
661 		dma_sconfig.src_addr = tspi->phys + SLINK_RX_FIFO;
662 		dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
663 		dma_sconfig.src_maxburst = 0;
664 	} else {
665 		dma_sconfig.dst_addr = tspi->phys + SLINK_TX_FIFO;
666 		dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
667 		dma_sconfig.dst_maxburst = 0;
668 	}
669 
670 	ret = dmaengine_slave_config(dma_chan, &dma_sconfig);
671 	if (ret)
672 		goto scrub;
673 	if (dma_to_memory) {
674 		tspi->rx_dma_chan = dma_chan;
675 		tspi->rx_dma_buf = dma_buf;
676 		tspi->rx_dma_phys = dma_phys;
677 	} else {
678 		tspi->tx_dma_chan = dma_chan;
679 		tspi->tx_dma_buf = dma_buf;
680 		tspi->tx_dma_phys = dma_phys;
681 	}
682 	return 0;
683 
684 scrub:
685 	dma_free_coherent(tspi->dev, tspi->dma_buf_size, dma_buf, dma_phys);
686 	dma_release_channel(dma_chan);
687 	return ret;
688 }
689 
690 static void tegra_slink_deinit_dma_param(struct tegra_slink_data *tspi,
691 	bool dma_to_memory)
692 {
693 	u32 *dma_buf;
694 	dma_addr_t dma_phys;
695 	struct dma_chan *dma_chan;
696 
697 	if (dma_to_memory) {
698 		dma_buf = tspi->rx_dma_buf;
699 		dma_chan = tspi->rx_dma_chan;
700 		dma_phys = tspi->rx_dma_phys;
701 		tspi->rx_dma_chan = NULL;
702 		tspi->rx_dma_buf = NULL;
703 	} else {
704 		dma_buf = tspi->tx_dma_buf;
705 		dma_chan = tspi->tx_dma_chan;
706 		dma_phys = tspi->tx_dma_phys;
707 		tspi->tx_dma_buf = NULL;
708 		tspi->tx_dma_chan = NULL;
709 	}
710 	if (!dma_chan)
711 		return;
712 
713 	dma_free_coherent(tspi->dev, tspi->dma_buf_size, dma_buf, dma_phys);
714 	dma_release_channel(dma_chan);
715 }
716 
717 static int tegra_slink_start_transfer_one(struct spi_device *spi,
718 		struct spi_transfer *t, bool is_first_of_msg,
719 		bool is_single_xfer)
720 {
721 	struct tegra_slink_data *tspi = spi_master_get_devdata(spi->master);
722 	u32 speed;
723 	u8 bits_per_word;
724 	unsigned total_fifo_words;
725 	int ret;
726 	struct tegra_spi_device_controller_data *cdata = spi->controller_data;
727 	unsigned long command;
728 	unsigned long command2;
729 
730 	bits_per_word = t->bits_per_word;
731 	speed = t->speed_hz ? t->speed_hz : spi->max_speed_hz;
732 	if (!speed)
733 		speed = tspi->spi_max_frequency;
734 	if (speed != tspi->cur_speed) {
735 		clk_set_rate(tspi->clk, speed * 4);
736 		tspi->cur_speed = speed;
737 	}
738 
739 	tspi->cur_spi = spi;
740 	tspi->cur_pos = 0;
741 	tspi->cur_rx_pos = 0;
742 	tspi->cur_tx_pos = 0;
743 	tspi->curr_xfer = t;
744 	total_fifo_words = tegra_slink_calculate_curr_xfer_param(spi, tspi, t);
745 
746 	if (is_first_of_msg) {
747 		tegra_slink_clear_status(tspi);
748 
749 		command = tspi->def_command_reg;
750 		command |= SLINK_BIT_LENGTH(bits_per_word - 1);
751 
752 		command2 = tspi->def_command2_reg;
753 		command2 |= SLINK_SS_EN_CS(spi->chip_select);
754 
755 		/* possibly use the hw based chip select */
756 		tspi->is_hw_based_cs = false;
757 		if (cdata && cdata->is_hw_based_cs && is_single_xfer &&
758 			((tspi->curr_dma_words * tspi->bytes_per_word) ==
759 						(t->len - tspi->cur_pos))) {
760 			int setup_count;
761 			int sts2;
762 
763 			setup_count = cdata->cs_setup_clk_count >> 1;
764 			setup_count = max(setup_count, 3);
765 			command2 |= SLINK_SS_SETUP(setup_count);
766 			if (tspi->chip_data->cs_hold_time) {
767 				int hold_count;
768 
769 				hold_count = cdata->cs_hold_clk_count;
770 				hold_count = max(hold_count, 0xF);
771 				sts2 = tegra_slink_readl(tspi, SLINK_STATUS2);
772 				sts2 &= ~SLINK_SS_HOLD_TIME(0xF);
773 				sts2 |= SLINK_SS_HOLD_TIME(hold_count);
774 				tegra_slink_writel(tspi, sts2, SLINK_STATUS2);
775 			}
776 			tspi->is_hw_based_cs = true;
777 		}
778 
779 		if (tspi->is_hw_based_cs)
780 			command &= ~SLINK_CS_SW;
781 		else
782 			command |= SLINK_CS_SW | SLINK_CS_VALUE;
783 
784 		command &= ~SLINK_MODES;
785 		if (spi->mode & SPI_CPHA)
786 			command |= SLINK_CK_SDA;
787 
788 		if (spi->mode & SPI_CPOL)
789 			command |= SLINK_IDLE_SCLK_DRIVE_HIGH;
790 		else
791 			command |= SLINK_IDLE_SCLK_DRIVE_LOW;
792 	} else {
793 		command = tspi->command_reg;
794 		command &= ~SLINK_BIT_LENGTH(~0);
795 		command |= SLINK_BIT_LENGTH(bits_per_word - 1);
796 
797 		command2 = tspi->command2_reg;
798 		command2 &= ~(SLINK_RXEN | SLINK_TXEN);
799 	}
800 
801 	tegra_slink_writel(tspi, command, SLINK_COMMAND);
802 	tspi->command_reg = command;
803 
804 	tspi->cur_direction = 0;
805 	if (t->rx_buf) {
806 		command2 |= SLINK_RXEN;
807 		tspi->cur_direction |= DATA_DIR_RX;
808 	}
809 	if (t->tx_buf) {
810 		command2 |= SLINK_TXEN;
811 		tspi->cur_direction |= DATA_DIR_TX;
812 	}
813 	tegra_slink_writel(tspi, command2, SLINK_COMMAND2);
814 	tspi->command2_reg = command2;
815 
816 	if (total_fifo_words > SLINK_FIFO_DEPTH)
817 		ret = tegra_slink_start_dma_based_transfer(tspi, t);
818 	else
819 		ret = tegra_slink_start_cpu_based_transfer(tspi, t);
820 	return ret;
821 }
822 
823 static int tegra_slink_setup(struct spi_device *spi)
824 {
825 	struct tegra_slink_data *tspi = spi_master_get_devdata(spi->master);
826 	unsigned long val;
827 	unsigned long flags;
828 	int ret;
829 	unsigned int cs_pol_bit[MAX_CHIP_SELECT] = {
830 			SLINK_CS_POLARITY,
831 			SLINK_CS_POLARITY1,
832 			SLINK_CS_POLARITY2,
833 			SLINK_CS_POLARITY3,
834 	};
835 
836 	dev_dbg(&spi->dev, "setup %d bpw, %scpol, %scpha, %dHz\n",
837 		spi->bits_per_word,
838 		spi->mode & SPI_CPOL ? "" : "~",
839 		spi->mode & SPI_CPHA ? "" : "~",
840 		spi->max_speed_hz);
841 
842 	BUG_ON(spi->chip_select >= MAX_CHIP_SELECT);
843 
844 	ret = pm_runtime_get_sync(tspi->dev);
845 	if (ret < 0) {
846 		dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret);
847 		return ret;
848 	}
849 
850 	spin_lock_irqsave(&tspi->lock, flags);
851 	val = tspi->def_command_reg;
852 	if (spi->mode & SPI_CS_HIGH)
853 		val |= cs_pol_bit[spi->chip_select];
854 	else
855 		val &= ~cs_pol_bit[spi->chip_select];
856 	tspi->def_command_reg = val;
857 	tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
858 	spin_unlock_irqrestore(&tspi->lock, flags);
859 
860 	pm_runtime_put(tspi->dev);
861 	return 0;
862 }
863 
864 static int tegra_slink_prepare_transfer(struct spi_master *master)
865 {
866 	struct tegra_slink_data *tspi = spi_master_get_devdata(master);
867 
868 	return pm_runtime_get_sync(tspi->dev);
869 }
870 
871 static int tegra_slink_unprepare_transfer(struct spi_master *master)
872 {
873 	struct tegra_slink_data *tspi = spi_master_get_devdata(master);
874 
875 	pm_runtime_put(tspi->dev);
876 	return 0;
877 }
878 
879 static int tegra_slink_transfer_one_message(struct spi_master *master,
880 			struct spi_message *msg)
881 {
882 	bool is_first_msg = true;
883 	int single_xfer;
884 	struct tegra_slink_data *tspi = spi_master_get_devdata(master);
885 	struct spi_transfer *xfer;
886 	struct spi_device *spi = msg->spi;
887 	int ret;
888 
889 	msg->status = 0;
890 	msg->actual_length = 0;
891 	single_xfer = list_is_singular(&msg->transfers);
892 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
893 		INIT_COMPLETION(tspi->xfer_completion);
894 		ret = tegra_slink_start_transfer_one(spi, xfer,
895 					is_first_msg, single_xfer);
896 		if (ret < 0) {
897 			dev_err(tspi->dev,
898 				"spi can not start transfer, err %d\n", ret);
899 			goto exit;
900 		}
901 		is_first_msg = false;
902 		ret = wait_for_completion_timeout(&tspi->xfer_completion,
903 						SLINK_DMA_TIMEOUT);
904 		if (WARN_ON(ret == 0)) {
905 			dev_err(tspi->dev,
906 				"spi trasfer timeout, err %d\n", ret);
907 			ret = -EIO;
908 			goto exit;
909 		}
910 
911 		if (tspi->tx_status ||  tspi->rx_status) {
912 			dev_err(tspi->dev, "Error in Transfer\n");
913 			ret = -EIO;
914 			goto exit;
915 		}
916 		msg->actual_length += xfer->len;
917 		if (xfer->cs_change && xfer->delay_usecs) {
918 			tegra_slink_writel(tspi, tspi->def_command_reg,
919 					SLINK_COMMAND);
920 			udelay(xfer->delay_usecs);
921 		}
922 	}
923 	ret = 0;
924 exit:
925 	tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
926 	tegra_slink_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2);
927 	msg->status = ret;
928 	spi_finalize_current_message(master);
929 	return ret;
930 }
931 
932 static irqreturn_t handle_cpu_based_xfer(struct tegra_slink_data *tspi)
933 {
934 	struct spi_transfer *t = tspi->curr_xfer;
935 	unsigned long flags;
936 
937 	spin_lock_irqsave(&tspi->lock, flags);
938 	if (tspi->tx_status ||  tspi->rx_status ||
939 				(tspi->status_reg & SLINK_BSY)) {
940 		dev_err(tspi->dev,
941 			"CpuXfer ERROR bit set 0x%x\n", tspi->status_reg);
942 		dev_err(tspi->dev,
943 			"CpuXfer 0x%08x:0x%08x:0x%08x\n", tspi->command_reg,
944 				tspi->command2_reg, tspi->dma_control_reg);
945 		tegra_periph_reset_assert(tspi->clk);
946 		udelay(2);
947 		tegra_periph_reset_deassert(tspi->clk);
948 		complete(&tspi->xfer_completion);
949 		goto exit;
950 	}
951 
952 	if (tspi->cur_direction & DATA_DIR_RX)
953 		tegra_slink_read_rx_fifo_to_client_rxbuf(tspi, t);
954 
955 	if (tspi->cur_direction & DATA_DIR_TX)
956 		tspi->cur_pos = tspi->cur_tx_pos;
957 	else
958 		tspi->cur_pos = tspi->cur_rx_pos;
959 
960 	if (tspi->cur_pos == t->len) {
961 		complete(&tspi->xfer_completion);
962 		goto exit;
963 	}
964 
965 	tegra_slink_calculate_curr_xfer_param(tspi->cur_spi, tspi, t);
966 	tegra_slink_start_cpu_based_transfer(tspi, t);
967 exit:
968 	spin_unlock_irqrestore(&tspi->lock, flags);
969 	return IRQ_HANDLED;
970 }
971 
972 static irqreturn_t handle_dma_based_xfer(struct tegra_slink_data *tspi)
973 {
974 	struct spi_transfer *t = tspi->curr_xfer;
975 	long wait_status;
976 	int err = 0;
977 	unsigned total_fifo_words;
978 	unsigned long flags;
979 
980 	/* Abort dmas if any error */
981 	if (tspi->cur_direction & DATA_DIR_TX) {
982 		if (tspi->tx_status) {
983 			dmaengine_terminate_all(tspi->tx_dma_chan);
984 			err += 1;
985 		} else {
986 			wait_status = wait_for_completion_interruptible_timeout(
987 				&tspi->tx_dma_complete, SLINK_DMA_TIMEOUT);
988 			if (wait_status <= 0) {
989 				dmaengine_terminate_all(tspi->tx_dma_chan);
990 				dev_err(tspi->dev, "TxDma Xfer failed\n");
991 				err += 1;
992 			}
993 		}
994 	}
995 
996 	if (tspi->cur_direction & DATA_DIR_RX) {
997 		if (tspi->rx_status) {
998 			dmaengine_terminate_all(tspi->rx_dma_chan);
999 			err += 2;
1000 		} else {
1001 			wait_status = wait_for_completion_interruptible_timeout(
1002 				&tspi->rx_dma_complete, SLINK_DMA_TIMEOUT);
1003 			if (wait_status <= 0) {
1004 				dmaengine_terminate_all(tspi->rx_dma_chan);
1005 				dev_err(tspi->dev, "RxDma Xfer failed\n");
1006 				err += 2;
1007 			}
1008 		}
1009 	}
1010 
1011 	spin_lock_irqsave(&tspi->lock, flags);
1012 	if (err) {
1013 		dev_err(tspi->dev,
1014 			"DmaXfer: ERROR bit set 0x%x\n", tspi->status_reg);
1015 		dev_err(tspi->dev,
1016 			"DmaXfer 0x%08x:0x%08x:0x%08x\n", tspi->command_reg,
1017 				tspi->command2_reg, tspi->dma_control_reg);
1018 		tegra_periph_reset_assert(tspi->clk);
1019 		udelay(2);
1020 		tegra_periph_reset_deassert(tspi->clk);
1021 		complete(&tspi->xfer_completion);
1022 		spin_unlock_irqrestore(&tspi->lock, flags);
1023 		return IRQ_HANDLED;
1024 	}
1025 
1026 	if (tspi->cur_direction & DATA_DIR_RX)
1027 		tegra_slink_copy_spi_rxbuf_to_client_rxbuf(tspi, t);
1028 
1029 	if (tspi->cur_direction & DATA_DIR_TX)
1030 		tspi->cur_pos = tspi->cur_tx_pos;
1031 	else
1032 		tspi->cur_pos = tspi->cur_rx_pos;
1033 
1034 	if (tspi->cur_pos == t->len) {
1035 		complete(&tspi->xfer_completion);
1036 		goto exit;
1037 	}
1038 
1039 	/* Continue transfer in current message */
1040 	total_fifo_words = tegra_slink_calculate_curr_xfer_param(tspi->cur_spi,
1041 							tspi, t);
1042 	if (total_fifo_words > SLINK_FIFO_DEPTH)
1043 		err = tegra_slink_start_dma_based_transfer(tspi, t);
1044 	else
1045 		err = tegra_slink_start_cpu_based_transfer(tspi, t);
1046 
1047 exit:
1048 	spin_unlock_irqrestore(&tspi->lock, flags);
1049 	return IRQ_HANDLED;
1050 }
1051 
1052 static irqreturn_t tegra_slink_isr_thread(int irq, void *context_data)
1053 {
1054 	struct tegra_slink_data *tspi = context_data;
1055 
1056 	if (!tspi->is_curr_dma_xfer)
1057 		return handle_cpu_based_xfer(tspi);
1058 	return handle_dma_based_xfer(tspi);
1059 }
1060 
1061 static irqreturn_t tegra_slink_isr(int irq, void *context_data)
1062 {
1063 	struct tegra_slink_data *tspi = context_data;
1064 
1065 	tspi->status_reg = tegra_slink_readl(tspi, SLINK_STATUS);
1066 	if (tspi->cur_direction & DATA_DIR_TX)
1067 		tspi->tx_status = tspi->status_reg &
1068 					(SLINK_TX_OVF | SLINK_TX_UNF);
1069 
1070 	if (tspi->cur_direction & DATA_DIR_RX)
1071 		tspi->rx_status = tspi->status_reg &
1072 					(SLINK_RX_OVF | SLINK_RX_UNF);
1073 	tegra_slink_clear_status(tspi);
1074 
1075 	return IRQ_WAKE_THREAD;
1076 }
1077 
1078 static struct tegra_spi_platform_data *tegra_slink_parse_dt(
1079 		struct platform_device *pdev)
1080 {
1081 	struct tegra_spi_platform_data *pdata;
1082 	const unsigned int *prop;
1083 	struct device_node *np = pdev->dev.of_node;
1084 	u32 of_dma[2];
1085 
1086 	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1087 	if (!pdata) {
1088 		dev_err(&pdev->dev, "Memory alloc for pdata failed\n");
1089 		return NULL;
1090 	}
1091 
1092 	if (of_property_read_u32_array(np, "nvidia,dma-request-selector",
1093 				of_dma, 2) >= 0)
1094 		pdata->dma_req_sel = of_dma[1];
1095 
1096 	prop = of_get_property(np, "spi-max-frequency", NULL);
1097 	if (prop)
1098 		pdata->spi_max_frequency = be32_to_cpup(prop);
1099 
1100 	return pdata;
1101 }
1102 
1103 const struct tegra_slink_chip_data tegra30_spi_cdata = {
1104 	.cs_hold_time = true,
1105 };
1106 
1107 const struct tegra_slink_chip_data tegra20_spi_cdata = {
1108 	.cs_hold_time = false,
1109 };
1110 
1111 static struct of_device_id tegra_slink_of_match[] = {
1112 	{ .compatible = "nvidia,tegra30-slink", .data = &tegra30_spi_cdata, },
1113 	{ .compatible = "nvidia,tegra20-slink", .data = &tegra20_spi_cdata, },
1114 	{}
1115 };
1116 MODULE_DEVICE_TABLE(of, tegra_slink_of_match);
1117 
1118 static int tegra_slink_probe(struct platform_device *pdev)
1119 {
1120 	struct spi_master	*master;
1121 	struct tegra_slink_data	*tspi;
1122 	struct resource		*r;
1123 	struct tegra_spi_platform_data *pdata = pdev->dev.platform_data;
1124 	int ret, spi_irq;
1125 	const struct tegra_slink_chip_data *cdata = NULL;
1126 	const struct of_device_id *match;
1127 
1128 	match = of_match_device(of_match_ptr(tegra_slink_of_match), &pdev->dev);
1129 	if (!match) {
1130 		dev_err(&pdev->dev, "Error: No device match found\n");
1131 		return -ENODEV;
1132 	}
1133 	cdata = match->data;
1134 	if (!pdata && pdev->dev.of_node)
1135 		pdata = tegra_slink_parse_dt(pdev);
1136 
1137 	if (!pdata) {
1138 		dev_err(&pdev->dev, "No platform data, exiting\n");
1139 		return -ENODEV;
1140 	}
1141 
1142 	if (!pdata->spi_max_frequency)
1143 		pdata->spi_max_frequency = 25000000; /* 25MHz */
1144 
1145 	master = spi_alloc_master(&pdev->dev, sizeof(*tspi));
1146 	if (!master) {
1147 		dev_err(&pdev->dev, "master allocation failed\n");
1148 		return -ENOMEM;
1149 	}
1150 
1151 	/* the spi->mode bits understood by this driver: */
1152 	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
1153 	master->setup = tegra_slink_setup;
1154 	master->prepare_transfer_hardware = tegra_slink_prepare_transfer;
1155 	master->transfer_one_message = tegra_slink_transfer_one_message;
1156 	master->unprepare_transfer_hardware = tegra_slink_unprepare_transfer;
1157 	master->num_chipselect = MAX_CHIP_SELECT;
1158 	master->bus_num = -1;
1159 
1160 	dev_set_drvdata(&pdev->dev, master);
1161 	tspi = spi_master_get_devdata(master);
1162 	tspi->master = master;
1163 	tspi->dma_req_sel = pdata->dma_req_sel;
1164 	tspi->dev = &pdev->dev;
1165 	tspi->chip_data = cdata;
1166 	spin_lock_init(&tspi->lock);
1167 
1168 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1169 	if (!r) {
1170 		dev_err(&pdev->dev, "No IO memory resource\n");
1171 		ret = -ENODEV;
1172 		goto exit_free_master;
1173 	}
1174 	tspi->phys = r->start;
1175 	tspi->base = devm_request_and_ioremap(&pdev->dev, r);
1176 	if (!tspi->base) {
1177 		dev_err(&pdev->dev,
1178 			"Cannot request memregion/iomap dma address\n");
1179 		ret = -EADDRNOTAVAIL;
1180 		goto exit_free_master;
1181 	}
1182 
1183 	spi_irq = platform_get_irq(pdev, 0);
1184 	tspi->irq = spi_irq;
1185 	ret = request_threaded_irq(tspi->irq, tegra_slink_isr,
1186 			tegra_slink_isr_thread, IRQF_ONESHOT,
1187 			dev_name(&pdev->dev), tspi);
1188 	if (ret < 0) {
1189 		dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
1190 					tspi->irq);
1191 		goto exit_free_master;
1192 	}
1193 
1194 	tspi->clk = devm_clk_get(&pdev->dev, "slink");
1195 	if (IS_ERR(tspi->clk)) {
1196 		dev_err(&pdev->dev, "can not get clock\n");
1197 		ret = PTR_ERR(tspi->clk);
1198 		goto exit_free_irq;
1199 	}
1200 
1201 	tspi->max_buf_size = SLINK_FIFO_DEPTH << 2;
1202 	tspi->dma_buf_size = DEFAULT_SPI_DMA_BUF_LEN;
1203 	tspi->spi_max_frequency = pdata->spi_max_frequency;
1204 
1205 	if (pdata->dma_req_sel) {
1206 		ret = tegra_slink_init_dma_param(tspi, true);
1207 		if (ret < 0) {
1208 			dev_err(&pdev->dev, "RxDma Init failed, err %d\n", ret);
1209 			goto exit_free_irq;
1210 		}
1211 
1212 		ret = tegra_slink_init_dma_param(tspi, false);
1213 		if (ret < 0) {
1214 			dev_err(&pdev->dev, "TxDma Init failed, err %d\n", ret);
1215 			goto exit_rx_dma_free;
1216 		}
1217 		tspi->max_buf_size = tspi->dma_buf_size;
1218 		init_completion(&tspi->tx_dma_complete);
1219 		init_completion(&tspi->rx_dma_complete);
1220 	}
1221 
1222 	init_completion(&tspi->xfer_completion);
1223 
1224 	pm_runtime_enable(&pdev->dev);
1225 	if (!pm_runtime_enabled(&pdev->dev)) {
1226 		ret = tegra_slink_runtime_resume(&pdev->dev);
1227 		if (ret)
1228 			goto exit_pm_disable;
1229 	}
1230 
1231 	ret = pm_runtime_get_sync(&pdev->dev);
1232 	if (ret < 0) {
1233 		dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret);
1234 		goto exit_pm_disable;
1235 	}
1236 	tspi->def_command_reg  = SLINK_M_S;
1237 	tspi->def_command2_reg = SLINK_CS_ACTIVE_BETWEEN;
1238 	tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
1239 	tegra_slink_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2);
1240 	pm_runtime_put(&pdev->dev);
1241 
1242 	master->dev.of_node = pdev->dev.of_node;
1243 	ret = spi_register_master(master);
1244 	if (ret < 0) {
1245 		dev_err(&pdev->dev, "can not register to master err %d\n", ret);
1246 		goto exit_pm_disable;
1247 	}
1248 	return ret;
1249 
1250 exit_pm_disable:
1251 	pm_runtime_disable(&pdev->dev);
1252 	if (!pm_runtime_status_suspended(&pdev->dev))
1253 		tegra_slink_runtime_suspend(&pdev->dev);
1254 	tegra_slink_deinit_dma_param(tspi, false);
1255 exit_rx_dma_free:
1256 	tegra_slink_deinit_dma_param(tspi, true);
1257 exit_free_irq:
1258 	free_irq(spi_irq, tspi);
1259 exit_free_master:
1260 	spi_master_put(master);
1261 	return ret;
1262 }
1263 
1264 static int tegra_slink_remove(struct platform_device *pdev)
1265 {
1266 	struct spi_master *master = dev_get_drvdata(&pdev->dev);
1267 	struct tegra_slink_data	*tspi = spi_master_get_devdata(master);
1268 
1269 	free_irq(tspi->irq, tspi);
1270 	spi_unregister_master(master);
1271 
1272 	if (tspi->tx_dma_chan)
1273 		tegra_slink_deinit_dma_param(tspi, false);
1274 
1275 	if (tspi->rx_dma_chan)
1276 		tegra_slink_deinit_dma_param(tspi, true);
1277 
1278 	pm_runtime_disable(&pdev->dev);
1279 	if (!pm_runtime_status_suspended(&pdev->dev))
1280 		tegra_slink_runtime_suspend(&pdev->dev);
1281 
1282 	return 0;
1283 }
1284 
1285 #ifdef CONFIG_PM_SLEEP
1286 static int tegra_slink_suspend(struct device *dev)
1287 {
1288 	struct spi_master *master = dev_get_drvdata(dev);
1289 
1290 	return spi_master_suspend(master);
1291 }
1292 
1293 static int tegra_slink_resume(struct device *dev)
1294 {
1295 	struct spi_master *master = dev_get_drvdata(dev);
1296 	struct tegra_slink_data *tspi = spi_master_get_devdata(master);
1297 	int ret;
1298 
1299 	ret = pm_runtime_get_sync(dev);
1300 	if (ret < 0) {
1301 		dev_err(dev, "pm runtime failed, e = %d\n", ret);
1302 		return ret;
1303 	}
1304 	tegra_slink_writel(tspi, tspi->command_reg, SLINK_COMMAND);
1305 	tegra_slink_writel(tspi, tspi->command2_reg, SLINK_COMMAND2);
1306 	pm_runtime_put(dev);
1307 
1308 	return spi_master_resume(master);
1309 }
1310 #endif
1311 
1312 static int tegra_slink_runtime_suspend(struct device *dev)
1313 {
1314 	struct spi_master *master = dev_get_drvdata(dev);
1315 	struct tegra_slink_data *tspi = spi_master_get_devdata(master);
1316 
1317 	/* Flush all write which are in PPSB queue by reading back */
1318 	tegra_slink_readl(tspi, SLINK_MAS_DATA);
1319 
1320 	clk_disable_unprepare(tspi->clk);
1321 	return 0;
1322 }
1323 
1324 static int tegra_slink_runtime_resume(struct device *dev)
1325 {
1326 	struct spi_master *master = dev_get_drvdata(dev);
1327 	struct tegra_slink_data *tspi = spi_master_get_devdata(master);
1328 	int ret;
1329 
1330 	ret = clk_prepare_enable(tspi->clk);
1331 	if (ret < 0) {
1332 		dev_err(tspi->dev, "clk_prepare failed: %d\n", ret);
1333 		return ret;
1334 	}
1335 	return 0;
1336 }
1337 
1338 static const struct dev_pm_ops slink_pm_ops = {
1339 	SET_RUNTIME_PM_OPS(tegra_slink_runtime_suspend,
1340 		tegra_slink_runtime_resume, NULL)
1341 	SET_SYSTEM_SLEEP_PM_OPS(tegra_slink_suspend, tegra_slink_resume)
1342 };
1343 static struct platform_driver tegra_slink_driver = {
1344 	.driver = {
1345 		.name		= "spi-tegra-slink",
1346 		.owner		= THIS_MODULE,
1347 		.pm		= &slink_pm_ops,
1348 		.of_match_table	= of_match_ptr(tegra_slink_of_match),
1349 	},
1350 	.probe =	tegra_slink_probe,
1351 	.remove =	tegra_slink_remove,
1352 };
1353 module_platform_driver(tegra_slink_driver);
1354 
1355 MODULE_ALIAS("platform:spi-tegra-slink");
1356 MODULE_DESCRIPTION("NVIDIA Tegra20/Tegra30 SLINK Controller Driver");
1357 MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
1358 MODULE_LICENSE("GPL v2");
1359