xref: /openbmc/linux/drivers/spi/spi-tegra114.c (revision e481ff3f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * SPI driver for NVIDIA's Tegra114 SPI Controller.
4  *
5  * Copyright (c) 2013, NVIDIA CORPORATION.  All rights reserved.
6  */
7 
8 #include <linux/clk.h>
9 #include <linux/completion.h>
10 #include <linux/delay.h>
11 #include <linux/dmaengine.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmapool.h>
14 #include <linux/err.h>
15 #include <linux/interrupt.h>
16 #include <linux/io.h>
17 #include <linux/kernel.h>
18 #include <linux/kthread.h>
19 #include <linux/module.h>
20 #include <linux/platform_device.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/of.h>
23 #include <linux/of_device.h>
24 #include <linux/reset.h>
25 #include <linux/spi/spi.h>
26 
27 #define SPI_COMMAND1				0x000
28 #define SPI_BIT_LENGTH(x)			(((x) & 0x1f) << 0)
29 #define SPI_PACKED				(1 << 5)
30 #define SPI_TX_EN				(1 << 11)
31 #define SPI_RX_EN				(1 << 12)
32 #define SPI_BOTH_EN_BYTE			(1 << 13)
33 #define SPI_BOTH_EN_BIT				(1 << 14)
34 #define SPI_LSBYTE_FE				(1 << 15)
35 #define SPI_LSBIT_FE				(1 << 16)
36 #define SPI_BIDIROE				(1 << 17)
37 #define SPI_IDLE_SDA_DRIVE_LOW			(0 << 18)
38 #define SPI_IDLE_SDA_DRIVE_HIGH			(1 << 18)
39 #define SPI_IDLE_SDA_PULL_LOW			(2 << 18)
40 #define SPI_IDLE_SDA_PULL_HIGH			(3 << 18)
41 #define SPI_IDLE_SDA_MASK			(3 << 18)
42 #define SPI_CS_SW_VAL				(1 << 20)
43 #define SPI_CS_SW_HW				(1 << 21)
44 /* SPI_CS_POL_INACTIVE bits are default high */
45 						/* n from 0 to 3 */
46 #define SPI_CS_POL_INACTIVE(n)			(1 << (22 + (n)))
47 #define SPI_CS_POL_INACTIVE_MASK		(0xF << 22)
48 
49 #define SPI_CS_SEL_0				(0 << 26)
50 #define SPI_CS_SEL_1				(1 << 26)
51 #define SPI_CS_SEL_2				(2 << 26)
52 #define SPI_CS_SEL_3				(3 << 26)
53 #define SPI_CS_SEL_MASK				(3 << 26)
54 #define SPI_CS_SEL(x)				(((x) & 0x3) << 26)
55 #define SPI_CONTROL_MODE_0			(0 << 28)
56 #define SPI_CONTROL_MODE_1			(1 << 28)
57 #define SPI_CONTROL_MODE_2			(2 << 28)
58 #define SPI_CONTROL_MODE_3			(3 << 28)
59 #define SPI_CONTROL_MODE_MASK			(3 << 28)
60 #define SPI_MODE_SEL(x)				(((x) & 0x3) << 28)
61 #define SPI_M_S					(1 << 30)
62 #define SPI_PIO					(1 << 31)
63 
64 #define SPI_COMMAND2				0x004
65 #define SPI_TX_TAP_DELAY(x)			(((x) & 0x3F) << 6)
66 #define SPI_RX_TAP_DELAY(x)			(((x) & 0x3F) << 0)
67 
68 #define SPI_CS_TIMING1				0x008
69 #define SPI_SETUP_HOLD(setup, hold)		(((setup) << 4) | (hold))
70 #define SPI_CS_SETUP_HOLD(reg, cs, val)			\
71 		((((val) & 0xFFu) << ((cs) * 8)) |	\
72 		((reg) & ~(0xFFu << ((cs) * 8))))
73 
74 #define SPI_CS_TIMING2				0x00C
75 #define CYCLES_BETWEEN_PACKETS_0(x)		(((x) & 0x1F) << 0)
76 #define CS_ACTIVE_BETWEEN_PACKETS_0		(1 << 5)
77 #define CYCLES_BETWEEN_PACKETS_1(x)		(((x) & 0x1F) << 8)
78 #define CS_ACTIVE_BETWEEN_PACKETS_1		(1 << 13)
79 #define CYCLES_BETWEEN_PACKETS_2(x)		(((x) & 0x1F) << 16)
80 #define CS_ACTIVE_BETWEEN_PACKETS_2		(1 << 21)
81 #define CYCLES_BETWEEN_PACKETS_3(x)		(((x) & 0x1F) << 24)
82 #define CS_ACTIVE_BETWEEN_PACKETS_3		(1 << 29)
83 #define SPI_SET_CS_ACTIVE_BETWEEN_PACKETS(reg, cs, val)		\
84 		(reg = (((val) & 0x1) << ((cs) * 8 + 5)) |	\
85 			((reg) & ~(1 << ((cs) * 8 + 5))))
86 #define SPI_SET_CYCLES_BETWEEN_PACKETS(reg, cs, val)		\
87 		(reg = (((val) & 0x1F) << ((cs) * 8)) |		\
88 			((reg) & ~(0x1F << ((cs) * 8))))
89 #define MAX_SETUP_HOLD_CYCLES			16
90 #define MAX_INACTIVE_CYCLES			32
91 
92 #define SPI_TRANS_STATUS			0x010
93 #define SPI_BLK_CNT(val)			(((val) >> 0) & 0xFFFF)
94 #define SPI_SLV_IDLE_COUNT(val)			(((val) >> 16) & 0xFF)
95 #define SPI_RDY					(1 << 30)
96 
97 #define SPI_FIFO_STATUS				0x014
98 #define SPI_RX_FIFO_EMPTY			(1 << 0)
99 #define SPI_RX_FIFO_FULL			(1 << 1)
100 #define SPI_TX_FIFO_EMPTY			(1 << 2)
101 #define SPI_TX_FIFO_FULL			(1 << 3)
102 #define SPI_RX_FIFO_UNF				(1 << 4)
103 #define SPI_RX_FIFO_OVF				(1 << 5)
104 #define SPI_TX_FIFO_UNF				(1 << 6)
105 #define SPI_TX_FIFO_OVF				(1 << 7)
106 #define SPI_ERR					(1 << 8)
107 #define SPI_TX_FIFO_FLUSH			(1 << 14)
108 #define SPI_RX_FIFO_FLUSH			(1 << 15)
109 #define SPI_TX_FIFO_EMPTY_COUNT(val)		(((val) >> 16) & 0x7F)
110 #define SPI_RX_FIFO_FULL_COUNT(val)		(((val) >> 23) & 0x7F)
111 #define SPI_FRAME_END				(1 << 30)
112 #define SPI_CS_INACTIVE				(1 << 31)
113 
114 #define SPI_FIFO_ERROR				(SPI_RX_FIFO_UNF | \
115 			SPI_RX_FIFO_OVF | SPI_TX_FIFO_UNF | SPI_TX_FIFO_OVF)
116 #define SPI_FIFO_EMPTY			(SPI_RX_FIFO_EMPTY | SPI_TX_FIFO_EMPTY)
117 
118 #define SPI_TX_DATA				0x018
119 #define SPI_RX_DATA				0x01C
120 
121 #define SPI_DMA_CTL				0x020
122 #define SPI_TX_TRIG_1				(0 << 15)
123 #define SPI_TX_TRIG_4				(1 << 15)
124 #define SPI_TX_TRIG_8				(2 << 15)
125 #define SPI_TX_TRIG_16				(3 << 15)
126 #define SPI_TX_TRIG_MASK			(3 << 15)
127 #define SPI_RX_TRIG_1				(0 << 19)
128 #define SPI_RX_TRIG_4				(1 << 19)
129 #define SPI_RX_TRIG_8				(2 << 19)
130 #define SPI_RX_TRIG_16				(3 << 19)
131 #define SPI_RX_TRIG_MASK			(3 << 19)
132 #define SPI_IE_TX				(1 << 28)
133 #define SPI_IE_RX				(1 << 29)
134 #define SPI_CONT				(1 << 30)
135 #define SPI_DMA					(1 << 31)
136 #define SPI_DMA_EN				SPI_DMA
137 
138 #define SPI_DMA_BLK				0x024
139 #define SPI_DMA_BLK_SET(x)			(((x) & 0xFFFF) << 0)
140 
141 #define SPI_TX_FIFO				0x108
142 #define SPI_RX_FIFO				0x188
143 #define SPI_INTR_MASK				0x18c
144 #define SPI_INTR_ALL_MASK			(0x1fUL << 25)
145 #define MAX_CHIP_SELECT				4
146 #define SPI_FIFO_DEPTH				64
147 #define DATA_DIR_TX				(1 << 0)
148 #define DATA_DIR_RX				(1 << 1)
149 
150 #define SPI_DMA_TIMEOUT				(msecs_to_jiffies(1000))
151 #define DEFAULT_SPI_DMA_BUF_LEN			(16*1024)
152 #define TX_FIFO_EMPTY_COUNT_MAX			SPI_TX_FIFO_EMPTY_COUNT(0x40)
153 #define RX_FIFO_FULL_COUNT_ZERO			SPI_RX_FIFO_FULL_COUNT(0)
154 #define MAX_HOLD_CYCLES				16
155 #define SPI_DEFAULT_SPEED			25000000
156 
157 struct tegra_spi_soc_data {
158 	bool has_intr_mask_reg;
159 };
160 
161 struct tegra_spi_client_data {
162 	int tx_clk_tap_delay;
163 	int rx_clk_tap_delay;
164 };
165 
166 struct tegra_spi_data {
167 	struct device				*dev;
168 	struct spi_master			*master;
169 	spinlock_t				lock;
170 
171 	struct clk				*clk;
172 	struct reset_control			*rst;
173 	void __iomem				*base;
174 	phys_addr_t				phys;
175 	unsigned				irq;
176 	u32					cur_speed;
177 
178 	struct spi_device			*cur_spi;
179 	struct spi_device			*cs_control;
180 	unsigned				cur_pos;
181 	unsigned				words_per_32bit;
182 	unsigned				bytes_per_word;
183 	unsigned				curr_dma_words;
184 	unsigned				cur_direction;
185 
186 	unsigned				cur_rx_pos;
187 	unsigned				cur_tx_pos;
188 
189 	unsigned				dma_buf_size;
190 	unsigned				max_buf_size;
191 	bool					is_curr_dma_xfer;
192 	bool					use_hw_based_cs;
193 
194 	struct completion			rx_dma_complete;
195 	struct completion			tx_dma_complete;
196 
197 	u32					tx_status;
198 	u32					rx_status;
199 	u32					status_reg;
200 	bool					is_packed;
201 
202 	u32					command1_reg;
203 	u32					dma_control_reg;
204 	u32					def_command1_reg;
205 	u32					def_command2_reg;
206 	u32					spi_cs_timing1;
207 	u32					spi_cs_timing2;
208 	u8					last_used_cs;
209 
210 	struct completion			xfer_completion;
211 	struct spi_transfer			*curr_xfer;
212 	struct dma_chan				*rx_dma_chan;
213 	u32					*rx_dma_buf;
214 	dma_addr_t				rx_dma_phys;
215 	struct dma_async_tx_descriptor		*rx_dma_desc;
216 
217 	struct dma_chan				*tx_dma_chan;
218 	u32					*tx_dma_buf;
219 	dma_addr_t				tx_dma_phys;
220 	struct dma_async_tx_descriptor		*tx_dma_desc;
221 	const struct tegra_spi_soc_data		*soc_data;
222 };
223 
224 static int tegra_spi_runtime_suspend(struct device *dev);
225 static int tegra_spi_runtime_resume(struct device *dev);
226 
227 static inline u32 tegra_spi_readl(struct tegra_spi_data *tspi,
228 		unsigned long reg)
229 {
230 	return readl(tspi->base + reg);
231 }
232 
233 static inline void tegra_spi_writel(struct tegra_spi_data *tspi,
234 		u32 val, unsigned long reg)
235 {
236 	writel(val, tspi->base + reg);
237 
238 	/* Read back register to make sure that register writes completed */
239 	if (reg != SPI_TX_FIFO)
240 		readl(tspi->base + SPI_COMMAND1);
241 }
242 
243 static void tegra_spi_clear_status(struct tegra_spi_data *tspi)
244 {
245 	u32 val;
246 
247 	/* Write 1 to clear status register */
248 	val = tegra_spi_readl(tspi, SPI_TRANS_STATUS);
249 	tegra_spi_writel(tspi, val, SPI_TRANS_STATUS);
250 
251 	/* Clear fifo status error if any */
252 	val = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
253 	if (val & SPI_ERR)
254 		tegra_spi_writel(tspi, SPI_ERR | SPI_FIFO_ERROR,
255 				SPI_FIFO_STATUS);
256 }
257 
258 static unsigned tegra_spi_calculate_curr_xfer_param(
259 	struct spi_device *spi, struct tegra_spi_data *tspi,
260 	struct spi_transfer *t)
261 {
262 	unsigned remain_len = t->len - tspi->cur_pos;
263 	unsigned max_word;
264 	unsigned bits_per_word = t->bits_per_word;
265 	unsigned max_len;
266 	unsigned total_fifo_words;
267 
268 	tspi->bytes_per_word = DIV_ROUND_UP(bits_per_word, 8);
269 
270 	if ((bits_per_word == 8 || bits_per_word == 16 ||
271 	     bits_per_word == 32) && t->len > 3) {
272 		tspi->is_packed = true;
273 		tspi->words_per_32bit = 32/bits_per_word;
274 	} else {
275 		tspi->is_packed = false;
276 		tspi->words_per_32bit = 1;
277 	}
278 
279 	if (tspi->is_packed) {
280 		max_len = min(remain_len, tspi->max_buf_size);
281 		tspi->curr_dma_words = max_len/tspi->bytes_per_word;
282 		total_fifo_words = (max_len + 3) / 4;
283 	} else {
284 		max_word = (remain_len - 1) / tspi->bytes_per_word + 1;
285 		max_word = min(max_word, tspi->max_buf_size/4);
286 		tspi->curr_dma_words = max_word;
287 		total_fifo_words = max_word;
288 	}
289 	return total_fifo_words;
290 }
291 
292 static unsigned tegra_spi_fill_tx_fifo_from_client_txbuf(
293 	struct tegra_spi_data *tspi, struct spi_transfer *t)
294 {
295 	unsigned nbytes;
296 	unsigned tx_empty_count;
297 	u32 fifo_status;
298 	unsigned max_n_32bit;
299 	unsigned i, count;
300 	unsigned int written_words;
301 	unsigned fifo_words_left;
302 	u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
303 
304 	fifo_status = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
305 	tx_empty_count = SPI_TX_FIFO_EMPTY_COUNT(fifo_status);
306 
307 	if (tspi->is_packed) {
308 		fifo_words_left = tx_empty_count * tspi->words_per_32bit;
309 		written_words = min(fifo_words_left, tspi->curr_dma_words);
310 		nbytes = written_words * tspi->bytes_per_word;
311 		max_n_32bit = DIV_ROUND_UP(nbytes, 4);
312 		for (count = 0; count < max_n_32bit; count++) {
313 			u32 x = 0;
314 
315 			for (i = 0; (i < 4) && nbytes; i++, nbytes--)
316 				x |= (u32)(*tx_buf++) << (i * 8);
317 			tegra_spi_writel(tspi, x, SPI_TX_FIFO);
318 		}
319 
320 		tspi->cur_tx_pos += written_words * tspi->bytes_per_word;
321 	} else {
322 		unsigned int write_bytes;
323 		max_n_32bit = min(tspi->curr_dma_words,  tx_empty_count);
324 		written_words = max_n_32bit;
325 		nbytes = written_words * tspi->bytes_per_word;
326 		if (nbytes > t->len - tspi->cur_pos)
327 			nbytes = t->len - tspi->cur_pos;
328 		write_bytes = nbytes;
329 		for (count = 0; count < max_n_32bit; count++) {
330 			u32 x = 0;
331 
332 			for (i = 0; nbytes && (i < tspi->bytes_per_word);
333 							i++, nbytes--)
334 				x |= (u32)(*tx_buf++) << (i * 8);
335 			tegra_spi_writel(tspi, x, SPI_TX_FIFO);
336 		}
337 
338 		tspi->cur_tx_pos += write_bytes;
339 	}
340 
341 	return written_words;
342 }
343 
344 static unsigned int tegra_spi_read_rx_fifo_to_client_rxbuf(
345 		struct tegra_spi_data *tspi, struct spi_transfer *t)
346 {
347 	unsigned rx_full_count;
348 	u32 fifo_status;
349 	unsigned i, count;
350 	unsigned int read_words = 0;
351 	unsigned len;
352 	u8 *rx_buf = (u8 *)t->rx_buf + tspi->cur_rx_pos;
353 
354 	fifo_status = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
355 	rx_full_count = SPI_RX_FIFO_FULL_COUNT(fifo_status);
356 	if (tspi->is_packed) {
357 		len = tspi->curr_dma_words * tspi->bytes_per_word;
358 		for (count = 0; count < rx_full_count; count++) {
359 			u32 x = tegra_spi_readl(tspi, SPI_RX_FIFO);
360 
361 			for (i = 0; len && (i < 4); i++, len--)
362 				*rx_buf++ = (x >> i*8) & 0xFF;
363 		}
364 		read_words += tspi->curr_dma_words;
365 		tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
366 	} else {
367 		u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
368 		u8 bytes_per_word = tspi->bytes_per_word;
369 		unsigned int read_bytes;
370 
371 		len = rx_full_count * bytes_per_word;
372 		if (len > t->len - tspi->cur_pos)
373 			len = t->len - tspi->cur_pos;
374 		read_bytes = len;
375 		for (count = 0; count < rx_full_count; count++) {
376 			u32 x = tegra_spi_readl(tspi, SPI_RX_FIFO) & rx_mask;
377 
378 			for (i = 0; len && (i < bytes_per_word); i++, len--)
379 				*rx_buf++ = (x >> (i*8)) & 0xFF;
380 		}
381 		read_words += rx_full_count;
382 		tspi->cur_rx_pos += read_bytes;
383 	}
384 
385 	return read_words;
386 }
387 
388 static void tegra_spi_copy_client_txbuf_to_spi_txbuf(
389 		struct tegra_spi_data *tspi, struct spi_transfer *t)
390 {
391 	/* Make the dma buffer to read by cpu */
392 	dma_sync_single_for_cpu(tspi->dev, tspi->tx_dma_phys,
393 				tspi->dma_buf_size, DMA_TO_DEVICE);
394 
395 	if (tspi->is_packed) {
396 		unsigned len = tspi->curr_dma_words * tspi->bytes_per_word;
397 
398 		memcpy(tspi->tx_dma_buf, t->tx_buf + tspi->cur_pos, len);
399 		tspi->cur_tx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
400 	} else {
401 		unsigned int i;
402 		unsigned int count;
403 		u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
404 		unsigned consume = tspi->curr_dma_words * tspi->bytes_per_word;
405 		unsigned int write_bytes;
406 
407 		if (consume > t->len - tspi->cur_pos)
408 			consume = t->len - tspi->cur_pos;
409 		write_bytes = consume;
410 		for (count = 0; count < tspi->curr_dma_words; count++) {
411 			u32 x = 0;
412 
413 			for (i = 0; consume && (i < tspi->bytes_per_word);
414 							i++, consume--)
415 				x |= (u32)(*tx_buf++) << (i * 8);
416 			tspi->tx_dma_buf[count] = x;
417 		}
418 
419 		tspi->cur_tx_pos += write_bytes;
420 	}
421 
422 	/* Make the dma buffer to read by dma */
423 	dma_sync_single_for_device(tspi->dev, tspi->tx_dma_phys,
424 				tspi->dma_buf_size, DMA_TO_DEVICE);
425 }
426 
427 static void tegra_spi_copy_spi_rxbuf_to_client_rxbuf(
428 		struct tegra_spi_data *tspi, struct spi_transfer *t)
429 {
430 	/* Make the dma buffer to read by cpu */
431 	dma_sync_single_for_cpu(tspi->dev, tspi->rx_dma_phys,
432 		tspi->dma_buf_size, DMA_FROM_DEVICE);
433 
434 	if (tspi->is_packed) {
435 		unsigned len = tspi->curr_dma_words * tspi->bytes_per_word;
436 
437 		memcpy(t->rx_buf + tspi->cur_rx_pos, tspi->rx_dma_buf, len);
438 		tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
439 	} else {
440 		unsigned int i;
441 		unsigned int count;
442 		unsigned char *rx_buf = t->rx_buf + tspi->cur_rx_pos;
443 		u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
444 		unsigned consume = tspi->curr_dma_words * tspi->bytes_per_word;
445 		unsigned int read_bytes;
446 
447 		if (consume > t->len - tspi->cur_pos)
448 			consume = t->len - tspi->cur_pos;
449 		read_bytes = consume;
450 		for (count = 0; count < tspi->curr_dma_words; count++) {
451 			u32 x = tspi->rx_dma_buf[count] & rx_mask;
452 
453 			for (i = 0; consume && (i < tspi->bytes_per_word);
454 							i++, consume--)
455 				*rx_buf++ = (x >> (i*8)) & 0xFF;
456 		}
457 
458 		tspi->cur_rx_pos += read_bytes;
459 	}
460 
461 	/* Make the dma buffer to read by dma */
462 	dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys,
463 		tspi->dma_buf_size, DMA_FROM_DEVICE);
464 }
465 
466 static void tegra_spi_dma_complete(void *args)
467 {
468 	struct completion *dma_complete = args;
469 
470 	complete(dma_complete);
471 }
472 
473 static int tegra_spi_start_tx_dma(struct tegra_spi_data *tspi, int len)
474 {
475 	reinit_completion(&tspi->tx_dma_complete);
476 	tspi->tx_dma_desc = dmaengine_prep_slave_single(tspi->tx_dma_chan,
477 				tspi->tx_dma_phys, len, DMA_MEM_TO_DEV,
478 				DMA_PREP_INTERRUPT |  DMA_CTRL_ACK);
479 	if (!tspi->tx_dma_desc) {
480 		dev_err(tspi->dev, "Not able to get desc for Tx\n");
481 		return -EIO;
482 	}
483 
484 	tspi->tx_dma_desc->callback = tegra_spi_dma_complete;
485 	tspi->tx_dma_desc->callback_param = &tspi->tx_dma_complete;
486 
487 	dmaengine_submit(tspi->tx_dma_desc);
488 	dma_async_issue_pending(tspi->tx_dma_chan);
489 	return 0;
490 }
491 
492 static int tegra_spi_start_rx_dma(struct tegra_spi_data *tspi, int len)
493 {
494 	reinit_completion(&tspi->rx_dma_complete);
495 	tspi->rx_dma_desc = dmaengine_prep_slave_single(tspi->rx_dma_chan,
496 				tspi->rx_dma_phys, len, DMA_DEV_TO_MEM,
497 				DMA_PREP_INTERRUPT |  DMA_CTRL_ACK);
498 	if (!tspi->rx_dma_desc) {
499 		dev_err(tspi->dev, "Not able to get desc for Rx\n");
500 		return -EIO;
501 	}
502 
503 	tspi->rx_dma_desc->callback = tegra_spi_dma_complete;
504 	tspi->rx_dma_desc->callback_param = &tspi->rx_dma_complete;
505 
506 	dmaengine_submit(tspi->rx_dma_desc);
507 	dma_async_issue_pending(tspi->rx_dma_chan);
508 	return 0;
509 }
510 
511 static int tegra_spi_flush_fifos(struct tegra_spi_data *tspi)
512 {
513 	unsigned long timeout = jiffies + HZ;
514 	u32 status;
515 
516 	status = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
517 	if ((status & SPI_FIFO_EMPTY) != SPI_FIFO_EMPTY) {
518 		status |= SPI_RX_FIFO_FLUSH | SPI_TX_FIFO_FLUSH;
519 		tegra_spi_writel(tspi, status, SPI_FIFO_STATUS);
520 		while ((status & SPI_FIFO_EMPTY) != SPI_FIFO_EMPTY) {
521 			status = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
522 			if (time_after(jiffies, timeout)) {
523 				dev_err(tspi->dev,
524 					"timeout waiting for fifo flush\n");
525 				return -EIO;
526 			}
527 
528 			udelay(1);
529 		}
530 	}
531 
532 	return 0;
533 }
534 
535 static int tegra_spi_start_dma_based_transfer(
536 		struct tegra_spi_data *tspi, struct spi_transfer *t)
537 {
538 	u32 val;
539 	unsigned int len;
540 	int ret = 0;
541 	u8 dma_burst;
542 	struct dma_slave_config dma_sconfig = {0};
543 
544 	val = SPI_DMA_BLK_SET(tspi->curr_dma_words - 1);
545 	tegra_spi_writel(tspi, val, SPI_DMA_BLK);
546 
547 	if (tspi->is_packed)
548 		len = DIV_ROUND_UP(tspi->curr_dma_words * tspi->bytes_per_word,
549 					4) * 4;
550 	else
551 		len = tspi->curr_dma_words * 4;
552 
553 	/* Set attention level based on length of transfer */
554 	if (len & 0xF) {
555 		val |= SPI_TX_TRIG_1 | SPI_RX_TRIG_1;
556 		dma_burst = 1;
557 	} else if (((len) >> 4) & 0x1) {
558 		val |= SPI_TX_TRIG_4 | SPI_RX_TRIG_4;
559 		dma_burst = 4;
560 	} else {
561 		val |= SPI_TX_TRIG_8 | SPI_RX_TRIG_8;
562 		dma_burst = 8;
563 	}
564 
565 	if (!tspi->soc_data->has_intr_mask_reg) {
566 		if (tspi->cur_direction & DATA_DIR_TX)
567 			val |= SPI_IE_TX;
568 
569 		if (tspi->cur_direction & DATA_DIR_RX)
570 			val |= SPI_IE_RX;
571 	}
572 
573 	tegra_spi_writel(tspi, val, SPI_DMA_CTL);
574 	tspi->dma_control_reg = val;
575 
576 	dma_sconfig.device_fc = true;
577 	if (tspi->cur_direction & DATA_DIR_TX) {
578 		dma_sconfig.dst_addr = tspi->phys + SPI_TX_FIFO;
579 		dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
580 		dma_sconfig.dst_maxburst = dma_burst;
581 		ret = dmaengine_slave_config(tspi->tx_dma_chan, &dma_sconfig);
582 		if (ret < 0) {
583 			dev_err(tspi->dev,
584 				"DMA slave config failed: %d\n", ret);
585 			return ret;
586 		}
587 
588 		tegra_spi_copy_client_txbuf_to_spi_txbuf(tspi, t);
589 		ret = tegra_spi_start_tx_dma(tspi, len);
590 		if (ret < 0) {
591 			dev_err(tspi->dev,
592 				"Starting tx dma failed, err %d\n", ret);
593 			return ret;
594 		}
595 	}
596 
597 	if (tspi->cur_direction & DATA_DIR_RX) {
598 		dma_sconfig.src_addr = tspi->phys + SPI_RX_FIFO;
599 		dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
600 		dma_sconfig.src_maxburst = dma_burst;
601 		ret = dmaengine_slave_config(tspi->rx_dma_chan, &dma_sconfig);
602 		if (ret < 0) {
603 			dev_err(tspi->dev,
604 				"DMA slave config failed: %d\n", ret);
605 			return ret;
606 		}
607 
608 		/* Make the dma buffer to read by dma */
609 		dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys,
610 				tspi->dma_buf_size, DMA_FROM_DEVICE);
611 
612 		ret = tegra_spi_start_rx_dma(tspi, len);
613 		if (ret < 0) {
614 			dev_err(tspi->dev,
615 				"Starting rx dma failed, err %d\n", ret);
616 			if (tspi->cur_direction & DATA_DIR_TX)
617 				dmaengine_terminate_all(tspi->tx_dma_chan);
618 			return ret;
619 		}
620 	}
621 	tspi->is_curr_dma_xfer = true;
622 	tspi->dma_control_reg = val;
623 
624 	val |= SPI_DMA_EN;
625 	tegra_spi_writel(tspi, val, SPI_DMA_CTL);
626 	return ret;
627 }
628 
629 static int tegra_spi_start_cpu_based_transfer(
630 		struct tegra_spi_data *tspi, struct spi_transfer *t)
631 {
632 	u32 val;
633 	unsigned cur_words;
634 
635 	if (tspi->cur_direction & DATA_DIR_TX)
636 		cur_words = tegra_spi_fill_tx_fifo_from_client_txbuf(tspi, t);
637 	else
638 		cur_words = tspi->curr_dma_words;
639 
640 	val = SPI_DMA_BLK_SET(cur_words - 1);
641 	tegra_spi_writel(tspi, val, SPI_DMA_BLK);
642 
643 	val = 0;
644 	if (tspi->cur_direction & DATA_DIR_TX)
645 		val |= SPI_IE_TX;
646 
647 	if (tspi->cur_direction & DATA_DIR_RX)
648 		val |= SPI_IE_RX;
649 
650 	tegra_spi_writel(tspi, val, SPI_DMA_CTL);
651 	tspi->dma_control_reg = val;
652 
653 	tspi->is_curr_dma_xfer = false;
654 
655 	val = tspi->command1_reg;
656 	val |= SPI_PIO;
657 	tegra_spi_writel(tspi, val, SPI_COMMAND1);
658 	return 0;
659 }
660 
661 static int tegra_spi_init_dma_param(struct tegra_spi_data *tspi,
662 			bool dma_to_memory)
663 {
664 	struct dma_chan *dma_chan;
665 	u32 *dma_buf;
666 	dma_addr_t dma_phys;
667 
668 	dma_chan = dma_request_chan(tspi->dev, dma_to_memory ? "rx" : "tx");
669 	if (IS_ERR(dma_chan))
670 		return dev_err_probe(tspi->dev, PTR_ERR(dma_chan),
671 				     "Dma channel is not available\n");
672 
673 	dma_buf = dma_alloc_coherent(tspi->dev, tspi->dma_buf_size,
674 				&dma_phys, GFP_KERNEL);
675 	if (!dma_buf) {
676 		dev_err(tspi->dev, " Not able to allocate the dma buffer\n");
677 		dma_release_channel(dma_chan);
678 		return -ENOMEM;
679 	}
680 
681 	if (dma_to_memory) {
682 		tspi->rx_dma_chan = dma_chan;
683 		tspi->rx_dma_buf = dma_buf;
684 		tspi->rx_dma_phys = dma_phys;
685 	} else {
686 		tspi->tx_dma_chan = dma_chan;
687 		tspi->tx_dma_buf = dma_buf;
688 		tspi->tx_dma_phys = dma_phys;
689 	}
690 	return 0;
691 }
692 
693 static void tegra_spi_deinit_dma_param(struct tegra_spi_data *tspi,
694 	bool dma_to_memory)
695 {
696 	u32 *dma_buf;
697 	dma_addr_t dma_phys;
698 	struct dma_chan *dma_chan;
699 
700 	if (dma_to_memory) {
701 		dma_buf = tspi->rx_dma_buf;
702 		dma_chan = tspi->rx_dma_chan;
703 		dma_phys = tspi->rx_dma_phys;
704 		tspi->rx_dma_chan = NULL;
705 		tspi->rx_dma_buf = NULL;
706 	} else {
707 		dma_buf = tspi->tx_dma_buf;
708 		dma_chan = tspi->tx_dma_chan;
709 		dma_phys = tspi->tx_dma_phys;
710 		tspi->tx_dma_buf = NULL;
711 		tspi->tx_dma_chan = NULL;
712 	}
713 	if (!dma_chan)
714 		return;
715 
716 	dma_free_coherent(tspi->dev, tspi->dma_buf_size, dma_buf, dma_phys);
717 	dma_release_channel(dma_chan);
718 }
719 
720 static int tegra_spi_set_hw_cs_timing(struct spi_device *spi,
721 				      struct spi_delay *setup,
722 				      struct spi_delay *hold,
723 				      struct spi_delay *inactive)
724 {
725 	struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
726 	u8 setup_dly, hold_dly, inactive_dly;
727 	u32 setup_hold;
728 	u32 spi_cs_timing;
729 	u32 inactive_cycles;
730 	u8 cs_state;
731 
732 	if ((setup && setup->unit != SPI_DELAY_UNIT_SCK) ||
733 	    (hold && hold->unit != SPI_DELAY_UNIT_SCK) ||
734 	    (inactive && inactive->unit != SPI_DELAY_UNIT_SCK)) {
735 		dev_err(&spi->dev,
736 			"Invalid delay unit %d, should be SPI_DELAY_UNIT_SCK\n",
737 			SPI_DELAY_UNIT_SCK);
738 		return -EINVAL;
739 	}
740 
741 	setup_dly = setup ? setup->value : 0;
742 	hold_dly = hold ? hold->value : 0;
743 	inactive_dly = inactive ? inactive->value : 0;
744 
745 	setup_dly = min_t(u8, setup_dly, MAX_SETUP_HOLD_CYCLES);
746 	hold_dly = min_t(u8, hold_dly, MAX_SETUP_HOLD_CYCLES);
747 	if (setup_dly && hold_dly) {
748 		setup_hold = SPI_SETUP_HOLD(setup_dly - 1, hold_dly - 1);
749 		spi_cs_timing = SPI_CS_SETUP_HOLD(tspi->spi_cs_timing1,
750 						  spi->chip_select,
751 						  setup_hold);
752 		if (tspi->spi_cs_timing1 != spi_cs_timing) {
753 			tspi->spi_cs_timing1 = spi_cs_timing;
754 			tegra_spi_writel(tspi, spi_cs_timing, SPI_CS_TIMING1);
755 		}
756 	}
757 
758 	inactive_cycles = min_t(u8, inactive_dly, MAX_INACTIVE_CYCLES);
759 	if (inactive_cycles)
760 		inactive_cycles--;
761 	cs_state = inactive_cycles ? 0 : 1;
762 	spi_cs_timing = tspi->spi_cs_timing2;
763 	SPI_SET_CS_ACTIVE_BETWEEN_PACKETS(spi_cs_timing, spi->chip_select,
764 					  cs_state);
765 	SPI_SET_CYCLES_BETWEEN_PACKETS(spi_cs_timing, spi->chip_select,
766 				       inactive_cycles);
767 	if (tspi->spi_cs_timing2 != spi_cs_timing) {
768 		tspi->spi_cs_timing2 = spi_cs_timing;
769 		tegra_spi_writel(tspi, spi_cs_timing, SPI_CS_TIMING2);
770 	}
771 
772 	return 0;
773 }
774 
775 static u32 tegra_spi_setup_transfer_one(struct spi_device *spi,
776 					struct spi_transfer *t,
777 					bool is_first_of_msg,
778 					bool is_single_xfer)
779 {
780 	struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
781 	struct tegra_spi_client_data *cdata = spi->controller_data;
782 	u32 speed = t->speed_hz;
783 	u8 bits_per_word = t->bits_per_word;
784 	u32 command1, command2;
785 	int req_mode;
786 	u32 tx_tap = 0, rx_tap = 0;
787 
788 	if (speed != tspi->cur_speed) {
789 		clk_set_rate(tspi->clk, speed);
790 		tspi->cur_speed = speed;
791 	}
792 
793 	tspi->cur_spi = spi;
794 	tspi->cur_pos = 0;
795 	tspi->cur_rx_pos = 0;
796 	tspi->cur_tx_pos = 0;
797 	tspi->curr_xfer = t;
798 
799 	if (is_first_of_msg) {
800 		tegra_spi_clear_status(tspi);
801 
802 		command1 = tspi->def_command1_reg;
803 		command1 |= SPI_BIT_LENGTH(bits_per_word - 1);
804 
805 		command1 &= ~SPI_CONTROL_MODE_MASK;
806 		req_mode = spi->mode & 0x3;
807 		if (req_mode == SPI_MODE_0)
808 			command1 |= SPI_CONTROL_MODE_0;
809 		else if (req_mode == SPI_MODE_1)
810 			command1 |= SPI_CONTROL_MODE_1;
811 		else if (req_mode == SPI_MODE_2)
812 			command1 |= SPI_CONTROL_MODE_2;
813 		else if (req_mode == SPI_MODE_3)
814 			command1 |= SPI_CONTROL_MODE_3;
815 
816 		if (spi->mode & SPI_LSB_FIRST)
817 			command1 |= SPI_LSBIT_FE;
818 		else
819 			command1 &= ~SPI_LSBIT_FE;
820 
821 		if (spi->mode & SPI_3WIRE)
822 			command1 |= SPI_BIDIROE;
823 		else
824 			command1 &= ~SPI_BIDIROE;
825 
826 		if (tspi->cs_control) {
827 			if (tspi->cs_control != spi)
828 				tegra_spi_writel(tspi, command1, SPI_COMMAND1);
829 			tspi->cs_control = NULL;
830 		} else
831 			tegra_spi_writel(tspi, command1, SPI_COMMAND1);
832 
833 		/* GPIO based chip select control */
834 		if (spi->cs_gpiod)
835 			gpiod_set_value(spi->cs_gpiod, 1);
836 
837 		if (is_single_xfer && !(t->cs_change)) {
838 			tspi->use_hw_based_cs = true;
839 			command1 &= ~(SPI_CS_SW_HW | SPI_CS_SW_VAL);
840 		} else {
841 			tspi->use_hw_based_cs = false;
842 			command1 |= SPI_CS_SW_HW;
843 			if (spi->mode & SPI_CS_HIGH)
844 				command1 |= SPI_CS_SW_VAL;
845 			else
846 				command1 &= ~SPI_CS_SW_VAL;
847 		}
848 
849 		if (tspi->last_used_cs != spi->chip_select) {
850 			if (cdata && cdata->tx_clk_tap_delay)
851 				tx_tap = cdata->tx_clk_tap_delay;
852 			if (cdata && cdata->rx_clk_tap_delay)
853 				rx_tap = cdata->rx_clk_tap_delay;
854 			command2 = SPI_TX_TAP_DELAY(tx_tap) |
855 				   SPI_RX_TAP_DELAY(rx_tap);
856 			if (command2 != tspi->def_command2_reg)
857 				tegra_spi_writel(tspi, command2, SPI_COMMAND2);
858 			tspi->last_used_cs = spi->chip_select;
859 		}
860 
861 	} else {
862 		command1 = tspi->command1_reg;
863 		command1 &= ~SPI_BIT_LENGTH(~0);
864 		command1 |= SPI_BIT_LENGTH(bits_per_word - 1);
865 	}
866 
867 	return command1;
868 }
869 
870 static int tegra_spi_start_transfer_one(struct spi_device *spi,
871 		struct spi_transfer *t, u32 command1)
872 {
873 	struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
874 	unsigned total_fifo_words;
875 	int ret;
876 
877 	total_fifo_words = tegra_spi_calculate_curr_xfer_param(spi, tspi, t);
878 
879 	if (t->rx_nbits == SPI_NBITS_DUAL || t->tx_nbits == SPI_NBITS_DUAL)
880 		command1 |= SPI_BOTH_EN_BIT;
881 	else
882 		command1 &= ~SPI_BOTH_EN_BIT;
883 
884 	if (tspi->is_packed)
885 		command1 |= SPI_PACKED;
886 	else
887 		command1 &= ~SPI_PACKED;
888 
889 	command1 &= ~(SPI_CS_SEL_MASK | SPI_TX_EN | SPI_RX_EN);
890 	tspi->cur_direction = 0;
891 	if (t->rx_buf) {
892 		command1 |= SPI_RX_EN;
893 		tspi->cur_direction |= DATA_DIR_RX;
894 	}
895 	if (t->tx_buf) {
896 		command1 |= SPI_TX_EN;
897 		tspi->cur_direction |= DATA_DIR_TX;
898 	}
899 	command1 |= SPI_CS_SEL(spi->chip_select);
900 	tegra_spi_writel(tspi, command1, SPI_COMMAND1);
901 	tspi->command1_reg = command1;
902 
903 	dev_dbg(tspi->dev, "The def 0x%x and written 0x%x\n",
904 		tspi->def_command1_reg, (unsigned)command1);
905 
906 	ret = tegra_spi_flush_fifos(tspi);
907 	if (ret < 0)
908 		return ret;
909 	if (total_fifo_words > SPI_FIFO_DEPTH)
910 		ret = tegra_spi_start_dma_based_transfer(tspi, t);
911 	else
912 		ret = tegra_spi_start_cpu_based_transfer(tspi, t);
913 	return ret;
914 }
915 
916 static struct tegra_spi_client_data
917 	*tegra_spi_parse_cdata_dt(struct spi_device *spi)
918 {
919 	struct tegra_spi_client_data *cdata;
920 	struct device_node *slave_np;
921 
922 	slave_np = spi->dev.of_node;
923 	if (!slave_np) {
924 		dev_dbg(&spi->dev, "device node not found\n");
925 		return NULL;
926 	}
927 
928 	cdata = kzalloc(sizeof(*cdata), GFP_KERNEL);
929 	if (!cdata)
930 		return NULL;
931 
932 	of_property_read_u32(slave_np, "nvidia,tx-clk-tap-delay",
933 			     &cdata->tx_clk_tap_delay);
934 	of_property_read_u32(slave_np, "nvidia,rx-clk-tap-delay",
935 			     &cdata->rx_clk_tap_delay);
936 	return cdata;
937 }
938 
939 static void tegra_spi_cleanup(struct spi_device *spi)
940 {
941 	struct tegra_spi_client_data *cdata = spi->controller_data;
942 
943 	spi->controller_data = NULL;
944 	if (spi->dev.of_node)
945 		kfree(cdata);
946 }
947 
948 static int tegra_spi_setup(struct spi_device *spi)
949 {
950 	struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
951 	struct tegra_spi_client_data *cdata = spi->controller_data;
952 	u32 val;
953 	unsigned long flags;
954 	int ret;
955 
956 	dev_dbg(&spi->dev, "setup %d bpw, %scpol, %scpha, %dHz\n",
957 		spi->bits_per_word,
958 		spi->mode & SPI_CPOL ? "" : "~",
959 		spi->mode & SPI_CPHA ? "" : "~",
960 		spi->max_speed_hz);
961 
962 	if (!cdata) {
963 		cdata = tegra_spi_parse_cdata_dt(spi);
964 		spi->controller_data = cdata;
965 	}
966 
967 	ret = pm_runtime_get_sync(tspi->dev);
968 	if (ret < 0) {
969 		pm_runtime_put_noidle(tspi->dev);
970 		dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret);
971 		if (cdata)
972 			tegra_spi_cleanup(spi);
973 		return ret;
974 	}
975 
976 	if (tspi->soc_data->has_intr_mask_reg) {
977 		val = tegra_spi_readl(tspi, SPI_INTR_MASK);
978 		val &= ~SPI_INTR_ALL_MASK;
979 		tegra_spi_writel(tspi, val, SPI_INTR_MASK);
980 	}
981 
982 	spin_lock_irqsave(&tspi->lock, flags);
983 	/* GPIO based chip select control */
984 	if (spi->cs_gpiod)
985 		gpiod_set_value(spi->cs_gpiod, 0);
986 
987 	val = tspi->def_command1_reg;
988 	if (spi->mode & SPI_CS_HIGH)
989 		val &= ~SPI_CS_POL_INACTIVE(spi->chip_select);
990 	else
991 		val |= SPI_CS_POL_INACTIVE(spi->chip_select);
992 	tspi->def_command1_reg = val;
993 	tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
994 	spin_unlock_irqrestore(&tspi->lock, flags);
995 
996 	pm_runtime_put(tspi->dev);
997 	return 0;
998 }
999 
1000 static void tegra_spi_transfer_end(struct spi_device *spi)
1001 {
1002 	struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
1003 	int cs_val = (spi->mode & SPI_CS_HIGH) ? 0 : 1;
1004 
1005 	/* GPIO based chip select control */
1006 	if (spi->cs_gpiod)
1007 		gpiod_set_value(spi->cs_gpiod, 0);
1008 
1009 	if (!tspi->use_hw_based_cs) {
1010 		if (cs_val)
1011 			tspi->command1_reg |= SPI_CS_SW_VAL;
1012 		else
1013 			tspi->command1_reg &= ~SPI_CS_SW_VAL;
1014 		tegra_spi_writel(tspi, tspi->command1_reg, SPI_COMMAND1);
1015 	}
1016 
1017 	tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
1018 }
1019 
1020 static void tegra_spi_dump_regs(struct tegra_spi_data *tspi)
1021 {
1022 	dev_dbg(tspi->dev, "============ SPI REGISTER DUMP ============\n");
1023 	dev_dbg(tspi->dev, "Command1:    0x%08x | Command2:    0x%08x\n",
1024 		tegra_spi_readl(tspi, SPI_COMMAND1),
1025 		tegra_spi_readl(tspi, SPI_COMMAND2));
1026 	dev_dbg(tspi->dev, "DMA_CTL:     0x%08x | DMA_BLK:     0x%08x\n",
1027 		tegra_spi_readl(tspi, SPI_DMA_CTL),
1028 		tegra_spi_readl(tspi, SPI_DMA_BLK));
1029 	dev_dbg(tspi->dev, "TRANS_STAT:  0x%08x | FIFO_STATUS: 0x%08x\n",
1030 		tegra_spi_readl(tspi, SPI_TRANS_STATUS),
1031 		tegra_spi_readl(tspi, SPI_FIFO_STATUS));
1032 }
1033 
1034 static int tegra_spi_transfer_one_message(struct spi_master *master,
1035 			struct spi_message *msg)
1036 {
1037 	bool is_first_msg = true;
1038 	struct tegra_spi_data *tspi = spi_master_get_devdata(master);
1039 	struct spi_transfer *xfer;
1040 	struct spi_device *spi = msg->spi;
1041 	int ret;
1042 	bool skip = false;
1043 	int single_xfer;
1044 
1045 	msg->status = 0;
1046 	msg->actual_length = 0;
1047 
1048 	single_xfer = list_is_singular(&msg->transfers);
1049 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1050 		u32 cmd1;
1051 
1052 		reinit_completion(&tspi->xfer_completion);
1053 
1054 		cmd1 = tegra_spi_setup_transfer_one(spi, xfer, is_first_msg,
1055 						    single_xfer);
1056 
1057 		if (!xfer->len) {
1058 			ret = 0;
1059 			skip = true;
1060 			goto complete_xfer;
1061 		}
1062 
1063 		ret = tegra_spi_start_transfer_one(spi, xfer, cmd1);
1064 		if (ret < 0) {
1065 			dev_err(tspi->dev,
1066 				"spi can not start transfer, err %d\n", ret);
1067 			goto complete_xfer;
1068 		}
1069 
1070 		is_first_msg = false;
1071 		ret = wait_for_completion_timeout(&tspi->xfer_completion,
1072 						SPI_DMA_TIMEOUT);
1073 		if (WARN_ON(ret == 0)) {
1074 			dev_err(tspi->dev, "spi transfer timeout\n");
1075 			if (tspi->is_curr_dma_xfer &&
1076 			    (tspi->cur_direction & DATA_DIR_TX))
1077 				dmaengine_terminate_all(tspi->tx_dma_chan);
1078 			if (tspi->is_curr_dma_xfer &&
1079 			    (tspi->cur_direction & DATA_DIR_RX))
1080 				dmaengine_terminate_all(tspi->rx_dma_chan);
1081 			ret = -EIO;
1082 			tegra_spi_dump_regs(tspi);
1083 			tegra_spi_flush_fifos(tspi);
1084 			reset_control_assert(tspi->rst);
1085 			udelay(2);
1086 			reset_control_deassert(tspi->rst);
1087 			tspi->last_used_cs = master->num_chipselect + 1;
1088 			goto complete_xfer;
1089 		}
1090 
1091 		if (tspi->tx_status ||  tspi->rx_status) {
1092 			dev_err(tspi->dev, "Error in Transfer\n");
1093 			ret = -EIO;
1094 			tegra_spi_dump_regs(tspi);
1095 			goto complete_xfer;
1096 		}
1097 		msg->actual_length += xfer->len;
1098 
1099 complete_xfer:
1100 		if (ret < 0 || skip) {
1101 			tegra_spi_transfer_end(spi);
1102 			spi_transfer_delay_exec(xfer);
1103 			goto exit;
1104 		} else if (list_is_last(&xfer->transfer_list,
1105 					&msg->transfers)) {
1106 			if (xfer->cs_change)
1107 				tspi->cs_control = spi;
1108 			else {
1109 				tegra_spi_transfer_end(spi);
1110 				spi_transfer_delay_exec(xfer);
1111 			}
1112 		} else if (xfer->cs_change) {
1113 			tegra_spi_transfer_end(spi);
1114 			spi_transfer_delay_exec(xfer);
1115 		}
1116 
1117 	}
1118 	ret = 0;
1119 exit:
1120 	msg->status = ret;
1121 	spi_finalize_current_message(master);
1122 	return ret;
1123 }
1124 
1125 static irqreturn_t handle_cpu_based_xfer(struct tegra_spi_data *tspi)
1126 {
1127 	struct spi_transfer *t = tspi->curr_xfer;
1128 	unsigned long flags;
1129 
1130 	spin_lock_irqsave(&tspi->lock, flags);
1131 	if (tspi->tx_status ||  tspi->rx_status) {
1132 		dev_err(tspi->dev, "CpuXfer ERROR bit set 0x%x\n",
1133 			tspi->status_reg);
1134 		dev_err(tspi->dev, "CpuXfer 0x%08x:0x%08x\n",
1135 			tspi->command1_reg, tspi->dma_control_reg);
1136 		tegra_spi_dump_regs(tspi);
1137 		tegra_spi_flush_fifos(tspi);
1138 		complete(&tspi->xfer_completion);
1139 		spin_unlock_irqrestore(&tspi->lock, flags);
1140 		reset_control_assert(tspi->rst);
1141 		udelay(2);
1142 		reset_control_deassert(tspi->rst);
1143 		return IRQ_HANDLED;
1144 	}
1145 
1146 	if (tspi->cur_direction & DATA_DIR_RX)
1147 		tegra_spi_read_rx_fifo_to_client_rxbuf(tspi, t);
1148 
1149 	if (tspi->cur_direction & DATA_DIR_TX)
1150 		tspi->cur_pos = tspi->cur_tx_pos;
1151 	else
1152 		tspi->cur_pos = tspi->cur_rx_pos;
1153 
1154 	if (tspi->cur_pos == t->len) {
1155 		complete(&tspi->xfer_completion);
1156 		goto exit;
1157 	}
1158 
1159 	tegra_spi_calculate_curr_xfer_param(tspi->cur_spi, tspi, t);
1160 	tegra_spi_start_cpu_based_transfer(tspi, t);
1161 exit:
1162 	spin_unlock_irqrestore(&tspi->lock, flags);
1163 	return IRQ_HANDLED;
1164 }
1165 
1166 static irqreturn_t handle_dma_based_xfer(struct tegra_spi_data *tspi)
1167 {
1168 	struct spi_transfer *t = tspi->curr_xfer;
1169 	long wait_status;
1170 	int err = 0;
1171 	unsigned total_fifo_words;
1172 	unsigned long flags;
1173 
1174 	/* Abort dmas if any error */
1175 	if (tspi->cur_direction & DATA_DIR_TX) {
1176 		if (tspi->tx_status) {
1177 			dmaengine_terminate_all(tspi->tx_dma_chan);
1178 			err += 1;
1179 		} else {
1180 			wait_status = wait_for_completion_interruptible_timeout(
1181 				&tspi->tx_dma_complete, SPI_DMA_TIMEOUT);
1182 			if (wait_status <= 0) {
1183 				dmaengine_terminate_all(tspi->tx_dma_chan);
1184 				dev_err(tspi->dev, "TxDma Xfer failed\n");
1185 				err += 1;
1186 			}
1187 		}
1188 	}
1189 
1190 	if (tspi->cur_direction & DATA_DIR_RX) {
1191 		if (tspi->rx_status) {
1192 			dmaengine_terminate_all(tspi->rx_dma_chan);
1193 			err += 2;
1194 		} else {
1195 			wait_status = wait_for_completion_interruptible_timeout(
1196 				&tspi->rx_dma_complete, SPI_DMA_TIMEOUT);
1197 			if (wait_status <= 0) {
1198 				dmaengine_terminate_all(tspi->rx_dma_chan);
1199 				dev_err(tspi->dev, "RxDma Xfer failed\n");
1200 				err += 2;
1201 			}
1202 		}
1203 	}
1204 
1205 	spin_lock_irqsave(&tspi->lock, flags);
1206 	if (err) {
1207 		dev_err(tspi->dev, "DmaXfer: ERROR bit set 0x%x\n",
1208 			tspi->status_reg);
1209 		dev_err(tspi->dev, "DmaXfer 0x%08x:0x%08x\n",
1210 			tspi->command1_reg, tspi->dma_control_reg);
1211 		tegra_spi_dump_regs(tspi);
1212 		tegra_spi_flush_fifos(tspi);
1213 		complete(&tspi->xfer_completion);
1214 		spin_unlock_irqrestore(&tspi->lock, flags);
1215 		reset_control_assert(tspi->rst);
1216 		udelay(2);
1217 		reset_control_deassert(tspi->rst);
1218 		return IRQ_HANDLED;
1219 	}
1220 
1221 	if (tspi->cur_direction & DATA_DIR_RX)
1222 		tegra_spi_copy_spi_rxbuf_to_client_rxbuf(tspi, t);
1223 
1224 	if (tspi->cur_direction & DATA_DIR_TX)
1225 		tspi->cur_pos = tspi->cur_tx_pos;
1226 	else
1227 		tspi->cur_pos = tspi->cur_rx_pos;
1228 
1229 	if (tspi->cur_pos == t->len) {
1230 		complete(&tspi->xfer_completion);
1231 		goto exit;
1232 	}
1233 
1234 	/* Continue transfer in current message */
1235 	total_fifo_words = tegra_spi_calculate_curr_xfer_param(tspi->cur_spi,
1236 							tspi, t);
1237 	if (total_fifo_words > SPI_FIFO_DEPTH)
1238 		err = tegra_spi_start_dma_based_transfer(tspi, t);
1239 	else
1240 		err = tegra_spi_start_cpu_based_transfer(tspi, t);
1241 
1242 exit:
1243 	spin_unlock_irqrestore(&tspi->lock, flags);
1244 	return IRQ_HANDLED;
1245 }
1246 
1247 static irqreturn_t tegra_spi_isr_thread(int irq, void *context_data)
1248 {
1249 	struct tegra_spi_data *tspi = context_data;
1250 
1251 	if (!tspi->is_curr_dma_xfer)
1252 		return handle_cpu_based_xfer(tspi);
1253 	return handle_dma_based_xfer(tspi);
1254 }
1255 
1256 static irqreturn_t tegra_spi_isr(int irq, void *context_data)
1257 {
1258 	struct tegra_spi_data *tspi = context_data;
1259 
1260 	tspi->status_reg = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
1261 	if (tspi->cur_direction & DATA_DIR_TX)
1262 		tspi->tx_status = tspi->status_reg &
1263 					(SPI_TX_FIFO_UNF | SPI_TX_FIFO_OVF);
1264 
1265 	if (tspi->cur_direction & DATA_DIR_RX)
1266 		tspi->rx_status = tspi->status_reg &
1267 					(SPI_RX_FIFO_OVF | SPI_RX_FIFO_UNF);
1268 	tegra_spi_clear_status(tspi);
1269 
1270 	return IRQ_WAKE_THREAD;
1271 }
1272 
1273 static struct tegra_spi_soc_data tegra114_spi_soc_data = {
1274 	.has_intr_mask_reg = false,
1275 };
1276 
1277 static struct tegra_spi_soc_data tegra124_spi_soc_data = {
1278 	.has_intr_mask_reg = false,
1279 };
1280 
1281 static struct tegra_spi_soc_data tegra210_spi_soc_data = {
1282 	.has_intr_mask_reg = true,
1283 };
1284 
1285 static const struct of_device_id tegra_spi_of_match[] = {
1286 	{
1287 		.compatible = "nvidia,tegra114-spi",
1288 		.data	    = &tegra114_spi_soc_data,
1289 	}, {
1290 		.compatible = "nvidia,tegra124-spi",
1291 		.data	    = &tegra124_spi_soc_data,
1292 	}, {
1293 		.compatible = "nvidia,tegra210-spi",
1294 		.data	    = &tegra210_spi_soc_data,
1295 	},
1296 	{}
1297 };
1298 MODULE_DEVICE_TABLE(of, tegra_spi_of_match);
1299 
1300 static int tegra_spi_probe(struct platform_device *pdev)
1301 {
1302 	struct spi_master	*master;
1303 	struct tegra_spi_data	*tspi;
1304 	struct resource		*r;
1305 	int ret, spi_irq;
1306 	int bus_num;
1307 
1308 	master = spi_alloc_master(&pdev->dev, sizeof(*tspi));
1309 	if (!master) {
1310 		dev_err(&pdev->dev, "master allocation failed\n");
1311 		return -ENOMEM;
1312 	}
1313 	platform_set_drvdata(pdev, master);
1314 	tspi = spi_master_get_devdata(master);
1315 
1316 	if (of_property_read_u32(pdev->dev.of_node, "spi-max-frequency",
1317 				 &master->max_speed_hz))
1318 		master->max_speed_hz = 25000000; /* 25MHz */
1319 
1320 	/* the spi->mode bits understood by this driver: */
1321 	master->use_gpio_descriptors = true;
1322 	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST |
1323 			    SPI_TX_DUAL | SPI_RX_DUAL | SPI_3WIRE;
1324 	master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
1325 	master->setup = tegra_spi_setup;
1326 	master->cleanup = tegra_spi_cleanup;
1327 	master->transfer_one_message = tegra_spi_transfer_one_message;
1328 	master->set_cs_timing = tegra_spi_set_hw_cs_timing;
1329 	master->num_chipselect = MAX_CHIP_SELECT;
1330 	master->auto_runtime_pm = true;
1331 	bus_num = of_alias_get_id(pdev->dev.of_node, "spi");
1332 	if (bus_num >= 0)
1333 		master->bus_num = bus_num;
1334 
1335 	tspi->master = master;
1336 	tspi->dev = &pdev->dev;
1337 	spin_lock_init(&tspi->lock);
1338 
1339 	tspi->soc_data = of_device_get_match_data(&pdev->dev);
1340 	if (!tspi->soc_data) {
1341 		dev_err(&pdev->dev, "unsupported tegra\n");
1342 		ret = -ENODEV;
1343 		goto exit_free_master;
1344 	}
1345 
1346 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1347 	tspi->base = devm_ioremap_resource(&pdev->dev, r);
1348 	if (IS_ERR(tspi->base)) {
1349 		ret = PTR_ERR(tspi->base);
1350 		goto exit_free_master;
1351 	}
1352 	tspi->phys = r->start;
1353 
1354 	spi_irq = platform_get_irq(pdev, 0);
1355 	tspi->irq = spi_irq;
1356 
1357 	tspi->clk = devm_clk_get(&pdev->dev, "spi");
1358 	if (IS_ERR(tspi->clk)) {
1359 		dev_err(&pdev->dev, "can not get clock\n");
1360 		ret = PTR_ERR(tspi->clk);
1361 		goto exit_free_master;
1362 	}
1363 
1364 	tspi->rst = devm_reset_control_get_exclusive(&pdev->dev, "spi");
1365 	if (IS_ERR(tspi->rst)) {
1366 		dev_err(&pdev->dev, "can not get reset\n");
1367 		ret = PTR_ERR(tspi->rst);
1368 		goto exit_free_master;
1369 	}
1370 
1371 	tspi->max_buf_size = SPI_FIFO_DEPTH << 2;
1372 	tspi->dma_buf_size = DEFAULT_SPI_DMA_BUF_LEN;
1373 
1374 	ret = tegra_spi_init_dma_param(tspi, true);
1375 	if (ret < 0)
1376 		goto exit_free_master;
1377 	ret = tegra_spi_init_dma_param(tspi, false);
1378 	if (ret < 0)
1379 		goto exit_rx_dma_free;
1380 	tspi->max_buf_size = tspi->dma_buf_size;
1381 	init_completion(&tspi->tx_dma_complete);
1382 	init_completion(&tspi->rx_dma_complete);
1383 
1384 	init_completion(&tspi->xfer_completion);
1385 
1386 	pm_runtime_enable(&pdev->dev);
1387 	if (!pm_runtime_enabled(&pdev->dev)) {
1388 		ret = tegra_spi_runtime_resume(&pdev->dev);
1389 		if (ret)
1390 			goto exit_pm_disable;
1391 	}
1392 
1393 	ret = pm_runtime_get_sync(&pdev->dev);
1394 	if (ret < 0) {
1395 		dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret);
1396 		pm_runtime_put_noidle(&pdev->dev);
1397 		goto exit_pm_disable;
1398 	}
1399 
1400 	reset_control_assert(tspi->rst);
1401 	udelay(2);
1402 	reset_control_deassert(tspi->rst);
1403 	tspi->def_command1_reg  = SPI_M_S;
1404 	tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
1405 	tspi->spi_cs_timing1 = tegra_spi_readl(tspi, SPI_CS_TIMING1);
1406 	tspi->spi_cs_timing2 = tegra_spi_readl(tspi, SPI_CS_TIMING2);
1407 	tspi->def_command2_reg = tegra_spi_readl(tspi, SPI_COMMAND2);
1408 	tspi->last_used_cs = master->num_chipselect + 1;
1409 	pm_runtime_put(&pdev->dev);
1410 	ret = request_threaded_irq(tspi->irq, tegra_spi_isr,
1411 				   tegra_spi_isr_thread, IRQF_ONESHOT,
1412 				   dev_name(&pdev->dev), tspi);
1413 	if (ret < 0) {
1414 		dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
1415 			tspi->irq);
1416 		goto exit_pm_disable;
1417 	}
1418 
1419 	master->dev.of_node = pdev->dev.of_node;
1420 	ret = devm_spi_register_master(&pdev->dev, master);
1421 	if (ret < 0) {
1422 		dev_err(&pdev->dev, "can not register to master err %d\n", ret);
1423 		goto exit_free_irq;
1424 	}
1425 	return ret;
1426 
1427 exit_free_irq:
1428 	free_irq(spi_irq, tspi);
1429 exit_pm_disable:
1430 	pm_runtime_disable(&pdev->dev);
1431 	if (!pm_runtime_status_suspended(&pdev->dev))
1432 		tegra_spi_runtime_suspend(&pdev->dev);
1433 	tegra_spi_deinit_dma_param(tspi, false);
1434 exit_rx_dma_free:
1435 	tegra_spi_deinit_dma_param(tspi, true);
1436 exit_free_master:
1437 	spi_master_put(master);
1438 	return ret;
1439 }
1440 
1441 static int tegra_spi_remove(struct platform_device *pdev)
1442 {
1443 	struct spi_master *master = platform_get_drvdata(pdev);
1444 	struct tegra_spi_data	*tspi = spi_master_get_devdata(master);
1445 
1446 	free_irq(tspi->irq, tspi);
1447 
1448 	if (tspi->tx_dma_chan)
1449 		tegra_spi_deinit_dma_param(tspi, false);
1450 
1451 	if (tspi->rx_dma_chan)
1452 		tegra_spi_deinit_dma_param(tspi, true);
1453 
1454 	pm_runtime_disable(&pdev->dev);
1455 	if (!pm_runtime_status_suspended(&pdev->dev))
1456 		tegra_spi_runtime_suspend(&pdev->dev);
1457 
1458 	return 0;
1459 }
1460 
1461 #ifdef CONFIG_PM_SLEEP
1462 static int tegra_spi_suspend(struct device *dev)
1463 {
1464 	struct spi_master *master = dev_get_drvdata(dev);
1465 
1466 	return spi_master_suspend(master);
1467 }
1468 
1469 static int tegra_spi_resume(struct device *dev)
1470 {
1471 	struct spi_master *master = dev_get_drvdata(dev);
1472 	struct tegra_spi_data *tspi = spi_master_get_devdata(master);
1473 	int ret;
1474 
1475 	ret = pm_runtime_get_sync(dev);
1476 	if (ret < 0) {
1477 		pm_runtime_put_noidle(dev);
1478 		dev_err(dev, "pm runtime failed, e = %d\n", ret);
1479 		return ret;
1480 	}
1481 	tegra_spi_writel(tspi, tspi->command1_reg, SPI_COMMAND1);
1482 	tegra_spi_writel(tspi, tspi->def_command2_reg, SPI_COMMAND2);
1483 	tspi->last_used_cs = master->num_chipselect + 1;
1484 	pm_runtime_put(dev);
1485 
1486 	return spi_master_resume(master);
1487 }
1488 #endif
1489 
1490 static int tegra_spi_runtime_suspend(struct device *dev)
1491 {
1492 	struct spi_master *master = dev_get_drvdata(dev);
1493 	struct tegra_spi_data *tspi = spi_master_get_devdata(master);
1494 
1495 	/* Flush all write which are in PPSB queue by reading back */
1496 	tegra_spi_readl(tspi, SPI_COMMAND1);
1497 
1498 	clk_disable_unprepare(tspi->clk);
1499 	return 0;
1500 }
1501 
1502 static int tegra_spi_runtime_resume(struct device *dev)
1503 {
1504 	struct spi_master *master = dev_get_drvdata(dev);
1505 	struct tegra_spi_data *tspi = spi_master_get_devdata(master);
1506 	int ret;
1507 
1508 	ret = clk_prepare_enable(tspi->clk);
1509 	if (ret < 0) {
1510 		dev_err(tspi->dev, "clk_prepare failed: %d\n", ret);
1511 		return ret;
1512 	}
1513 	return 0;
1514 }
1515 
1516 static const struct dev_pm_ops tegra_spi_pm_ops = {
1517 	SET_RUNTIME_PM_OPS(tegra_spi_runtime_suspend,
1518 		tegra_spi_runtime_resume, NULL)
1519 	SET_SYSTEM_SLEEP_PM_OPS(tegra_spi_suspend, tegra_spi_resume)
1520 };
1521 static struct platform_driver tegra_spi_driver = {
1522 	.driver = {
1523 		.name		= "spi-tegra114",
1524 		.pm		= &tegra_spi_pm_ops,
1525 		.of_match_table	= tegra_spi_of_match,
1526 	},
1527 	.probe =	tegra_spi_probe,
1528 	.remove =	tegra_spi_remove,
1529 };
1530 module_platform_driver(tegra_spi_driver);
1531 
1532 MODULE_ALIAS("platform:spi-tegra114");
1533 MODULE_DESCRIPTION("NVIDIA Tegra114 SPI Controller Driver");
1534 MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
1535 MODULE_LICENSE("GPL v2");
1536