xref: /openbmc/linux/drivers/spi/spi-qup.c (revision 791d3ef2)
1 /*
2  * Copyright (c) 2008-2014, The Linux foundation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License rev 2 and
6  * only rev 2 as published by the free Software foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or fITNESS fOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  */
13 
14 #include <linux/clk.h>
15 #include <linux/delay.h>
16 #include <linux/err.h>
17 #include <linux/interrupt.h>
18 #include <linux/io.h>
19 #include <linux/list.h>
20 #include <linux/module.h>
21 #include <linux/of.h>
22 #include <linux/of_device.h>
23 #include <linux/platform_device.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/spi/spi.h>
26 #include <linux/dmaengine.h>
27 #include <linux/dma-mapping.h>
28 
29 #define QUP_CONFIG			0x0000
30 #define QUP_STATE			0x0004
31 #define QUP_IO_M_MODES			0x0008
32 #define QUP_SW_RESET			0x000c
33 #define QUP_OPERATIONAL			0x0018
34 #define QUP_ERROR_FLAGS			0x001c
35 #define QUP_ERROR_FLAGS_EN		0x0020
36 #define QUP_OPERATIONAL_MASK		0x0028
37 #define QUP_HW_VERSION			0x0030
38 #define QUP_MX_OUTPUT_CNT		0x0100
39 #define QUP_OUTPUT_FIFO			0x0110
40 #define QUP_MX_WRITE_CNT		0x0150
41 #define QUP_MX_INPUT_CNT		0x0200
42 #define QUP_MX_READ_CNT			0x0208
43 #define QUP_INPUT_FIFO			0x0218
44 
45 #define SPI_CONFIG			0x0300
46 #define SPI_IO_CONTROL			0x0304
47 #define SPI_ERROR_FLAGS			0x0308
48 #define SPI_ERROR_FLAGS_EN		0x030c
49 
50 /* QUP_CONFIG fields */
51 #define QUP_CONFIG_SPI_MODE		(1 << 8)
52 #define QUP_CONFIG_CLOCK_AUTO_GATE	BIT(13)
53 #define QUP_CONFIG_NO_INPUT		BIT(7)
54 #define QUP_CONFIG_NO_OUTPUT		BIT(6)
55 #define QUP_CONFIG_N			0x001f
56 
57 /* QUP_STATE fields */
58 #define QUP_STATE_VALID			BIT(2)
59 #define QUP_STATE_RESET			0
60 #define QUP_STATE_RUN			1
61 #define QUP_STATE_PAUSE			3
62 #define QUP_STATE_MASK			3
63 #define QUP_STATE_CLEAR			2
64 
65 #define QUP_HW_VERSION_2_1_1		0x20010001
66 
67 /* QUP_IO_M_MODES fields */
68 #define QUP_IO_M_PACK_EN		BIT(15)
69 #define QUP_IO_M_UNPACK_EN		BIT(14)
70 #define QUP_IO_M_INPUT_MODE_MASK_SHIFT	12
71 #define QUP_IO_M_OUTPUT_MODE_MASK_SHIFT	10
72 #define QUP_IO_M_INPUT_MODE_MASK	(3 << QUP_IO_M_INPUT_MODE_MASK_SHIFT)
73 #define QUP_IO_M_OUTPUT_MODE_MASK	(3 << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT)
74 
75 #define QUP_IO_M_OUTPUT_BLOCK_SIZE(x)	(((x) & (0x03 << 0)) >> 0)
76 #define QUP_IO_M_OUTPUT_FIFO_SIZE(x)	(((x) & (0x07 << 2)) >> 2)
77 #define QUP_IO_M_INPUT_BLOCK_SIZE(x)	(((x) & (0x03 << 5)) >> 5)
78 #define QUP_IO_M_INPUT_FIFO_SIZE(x)	(((x) & (0x07 << 7)) >> 7)
79 
80 #define QUP_IO_M_MODE_FIFO		0
81 #define QUP_IO_M_MODE_BLOCK		1
82 #define QUP_IO_M_MODE_DMOV		2
83 #define QUP_IO_M_MODE_BAM		3
84 
85 /* QUP_OPERATIONAL fields */
86 #define QUP_OP_IN_BLOCK_READ_REQ	BIT(13)
87 #define QUP_OP_OUT_BLOCK_WRITE_REQ	BIT(12)
88 #define QUP_OP_MAX_INPUT_DONE_FLAG	BIT(11)
89 #define QUP_OP_MAX_OUTPUT_DONE_FLAG	BIT(10)
90 #define QUP_OP_IN_SERVICE_FLAG		BIT(9)
91 #define QUP_OP_OUT_SERVICE_FLAG		BIT(8)
92 #define QUP_OP_IN_FIFO_FULL		BIT(7)
93 #define QUP_OP_OUT_FIFO_FULL		BIT(6)
94 #define QUP_OP_IN_FIFO_NOT_EMPTY	BIT(5)
95 #define QUP_OP_OUT_FIFO_NOT_EMPTY	BIT(4)
96 
97 /* QUP_ERROR_FLAGS and QUP_ERROR_FLAGS_EN fields */
98 #define QUP_ERROR_OUTPUT_OVER_RUN	BIT(5)
99 #define QUP_ERROR_INPUT_UNDER_RUN	BIT(4)
100 #define QUP_ERROR_OUTPUT_UNDER_RUN	BIT(3)
101 #define QUP_ERROR_INPUT_OVER_RUN	BIT(2)
102 
103 /* SPI_CONFIG fields */
104 #define SPI_CONFIG_HS_MODE		BIT(10)
105 #define SPI_CONFIG_INPUT_FIRST		BIT(9)
106 #define SPI_CONFIG_LOOPBACK		BIT(8)
107 
108 /* SPI_IO_CONTROL fields */
109 #define SPI_IO_C_FORCE_CS		BIT(11)
110 #define SPI_IO_C_CLK_IDLE_HIGH		BIT(10)
111 #define SPI_IO_C_MX_CS_MODE		BIT(8)
112 #define SPI_IO_C_CS_N_POLARITY_0	BIT(4)
113 #define SPI_IO_C_CS_SELECT(x)		(((x) & 3) << 2)
114 #define SPI_IO_C_CS_SELECT_MASK		0x000c
115 #define SPI_IO_C_TRISTATE_CS		BIT(1)
116 #define SPI_IO_C_NO_TRI_STATE		BIT(0)
117 
118 /* SPI_ERROR_FLAGS and SPI_ERROR_FLAGS_EN fields */
119 #define SPI_ERROR_CLK_OVER_RUN		BIT(1)
120 #define SPI_ERROR_CLK_UNDER_RUN		BIT(0)
121 
122 #define SPI_NUM_CHIPSELECTS		4
123 
124 #define SPI_MAX_XFER			(SZ_64K - 64)
125 
126 /* high speed mode is when bus rate is greater then 26MHz */
127 #define SPI_HS_MIN_RATE			26000000
128 #define SPI_MAX_RATE			50000000
129 
130 #define SPI_DELAY_THRESHOLD		1
131 #define SPI_DELAY_RETRY			10
132 
133 struct spi_qup {
134 	void __iomem		*base;
135 	struct device		*dev;
136 	struct clk		*cclk;	/* core clock */
137 	struct clk		*iclk;	/* interface clock */
138 	int			irq;
139 	spinlock_t		lock;
140 
141 	int			in_fifo_sz;
142 	int			out_fifo_sz;
143 	int			in_blk_sz;
144 	int			out_blk_sz;
145 
146 	struct spi_transfer	*xfer;
147 	struct completion	done;
148 	int			error;
149 	int			w_size;	/* bytes per SPI word */
150 	int			n_words;
151 	int			tx_bytes;
152 	int			rx_bytes;
153 	const u8		*tx_buf;
154 	u8			*rx_buf;
155 	int			qup_v1;
156 
157 	int			mode;
158 	struct dma_slave_config	rx_conf;
159 	struct dma_slave_config	tx_conf;
160 };
161 
162 static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer);
163 
164 static inline bool spi_qup_is_flag_set(struct spi_qup *controller, u32 flag)
165 {
166 	u32 opflag = readl_relaxed(controller->base + QUP_OPERATIONAL);
167 
168 	return (opflag & flag) != 0;
169 }
170 
171 static inline bool spi_qup_is_dma_xfer(int mode)
172 {
173 	if (mode == QUP_IO_M_MODE_DMOV || mode == QUP_IO_M_MODE_BAM)
174 		return true;
175 
176 	return false;
177 }
178 
179 /* get's the transaction size length */
180 static inline unsigned int spi_qup_len(struct spi_qup *controller)
181 {
182 	return controller->n_words * controller->w_size;
183 }
184 
185 static inline bool spi_qup_is_valid_state(struct spi_qup *controller)
186 {
187 	u32 opstate = readl_relaxed(controller->base + QUP_STATE);
188 
189 	return opstate & QUP_STATE_VALID;
190 }
191 
192 static int spi_qup_set_state(struct spi_qup *controller, u32 state)
193 {
194 	unsigned long loop;
195 	u32 cur_state;
196 
197 	loop = 0;
198 	while (!spi_qup_is_valid_state(controller)) {
199 
200 		usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
201 
202 		if (++loop > SPI_DELAY_RETRY)
203 			return -EIO;
204 	}
205 
206 	if (loop)
207 		dev_dbg(controller->dev, "invalid state for %ld,us %d\n",
208 			loop, state);
209 
210 	cur_state = readl_relaxed(controller->base + QUP_STATE);
211 	/*
212 	 * Per spec: for PAUSE_STATE to RESET_STATE, two writes
213 	 * of (b10) are required
214 	 */
215 	if (((cur_state & QUP_STATE_MASK) == QUP_STATE_PAUSE) &&
216 	    (state == QUP_STATE_RESET)) {
217 		writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
218 		writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
219 	} else {
220 		cur_state &= ~QUP_STATE_MASK;
221 		cur_state |= state;
222 		writel_relaxed(cur_state, controller->base + QUP_STATE);
223 	}
224 
225 	loop = 0;
226 	while (!spi_qup_is_valid_state(controller)) {
227 
228 		usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
229 
230 		if (++loop > SPI_DELAY_RETRY)
231 			return -EIO;
232 	}
233 
234 	return 0;
235 }
236 
237 static void spi_qup_read_from_fifo(struct spi_qup *controller, u32 num_words)
238 {
239 	u8 *rx_buf = controller->rx_buf;
240 	int i, shift, num_bytes;
241 	u32 word;
242 
243 	for (; num_words; num_words--) {
244 
245 		word = readl_relaxed(controller->base + QUP_INPUT_FIFO);
246 
247 		num_bytes = min_t(int, spi_qup_len(controller) -
248 				       controller->rx_bytes,
249 				       controller->w_size);
250 
251 		if (!rx_buf) {
252 			controller->rx_bytes += num_bytes;
253 			continue;
254 		}
255 
256 		for (i = 0; i < num_bytes; i++, controller->rx_bytes++) {
257 			/*
258 			 * The data format depends on bytes per SPI word:
259 			 *  4 bytes: 0x12345678
260 			 *  2 bytes: 0x00001234
261 			 *  1 byte : 0x00000012
262 			 */
263 			shift = BITS_PER_BYTE;
264 			shift *= (controller->w_size - i - 1);
265 			rx_buf[controller->rx_bytes] = word >> shift;
266 		}
267 	}
268 }
269 
270 static void spi_qup_read(struct spi_qup *controller, u32 *opflags)
271 {
272 	u32 remainder, words_per_block, num_words;
273 	bool is_block_mode = controller->mode == QUP_IO_M_MODE_BLOCK;
274 
275 	remainder = DIV_ROUND_UP(spi_qup_len(controller) - controller->rx_bytes,
276 				 controller->w_size);
277 	words_per_block = controller->in_blk_sz >> 2;
278 
279 	do {
280 		/* ACK by clearing service flag */
281 		writel_relaxed(QUP_OP_IN_SERVICE_FLAG,
282 			       controller->base + QUP_OPERATIONAL);
283 
284 		if (is_block_mode) {
285 			num_words = (remainder > words_per_block) ?
286 					words_per_block : remainder;
287 		} else {
288 			if (!spi_qup_is_flag_set(controller,
289 						 QUP_OP_IN_FIFO_NOT_EMPTY))
290 				break;
291 
292 			num_words = 1;
293 		}
294 
295 		/* read up to the maximum transfer size available */
296 		spi_qup_read_from_fifo(controller, num_words);
297 
298 		remainder -= num_words;
299 
300 		/* if block mode, check to see if next block is available */
301 		if (is_block_mode && !spi_qup_is_flag_set(controller,
302 					QUP_OP_IN_BLOCK_READ_REQ))
303 			break;
304 
305 	} while (remainder);
306 
307 	/*
308 	 * Due to extra stickiness of the QUP_OP_IN_SERVICE_FLAG during block
309 	 * reads, it has to be cleared again at the very end.  However, be sure
310 	 * to refresh opflags value because MAX_INPUT_DONE_FLAG may now be
311 	 * present and this is used to determine if transaction is complete
312 	 */
313 	*opflags = readl_relaxed(controller->base + QUP_OPERATIONAL);
314 	if (is_block_mode && *opflags & QUP_OP_MAX_INPUT_DONE_FLAG)
315 		writel_relaxed(QUP_OP_IN_SERVICE_FLAG,
316 			       controller->base + QUP_OPERATIONAL);
317 
318 }
319 
320 static void spi_qup_write_to_fifo(struct spi_qup *controller, u32 num_words)
321 {
322 	const u8 *tx_buf = controller->tx_buf;
323 	int i, num_bytes;
324 	u32 word, data;
325 
326 	for (; num_words; num_words--) {
327 		word = 0;
328 
329 		num_bytes = min_t(int, spi_qup_len(controller) -
330 				       controller->tx_bytes,
331 				       controller->w_size);
332 		if (tx_buf)
333 			for (i = 0; i < num_bytes; i++) {
334 				data = tx_buf[controller->tx_bytes + i];
335 				word |= data << (BITS_PER_BYTE * (3 - i));
336 			}
337 
338 		controller->tx_bytes += num_bytes;
339 
340 		writel_relaxed(word, controller->base + QUP_OUTPUT_FIFO);
341 	}
342 }
343 
344 static void spi_qup_dma_done(void *data)
345 {
346 	struct spi_qup *qup = data;
347 
348 	complete(&qup->done);
349 }
350 
351 static void spi_qup_write(struct spi_qup *controller)
352 {
353 	bool is_block_mode = controller->mode == QUP_IO_M_MODE_BLOCK;
354 	u32 remainder, words_per_block, num_words;
355 
356 	remainder = DIV_ROUND_UP(spi_qup_len(controller) - controller->tx_bytes,
357 				 controller->w_size);
358 	words_per_block = controller->out_blk_sz >> 2;
359 
360 	do {
361 		/* ACK by clearing service flag */
362 		writel_relaxed(QUP_OP_OUT_SERVICE_FLAG,
363 			       controller->base + QUP_OPERATIONAL);
364 
365 		if (is_block_mode) {
366 			num_words = (remainder > words_per_block) ?
367 				words_per_block : remainder;
368 		} else {
369 			if (spi_qup_is_flag_set(controller,
370 						QUP_OP_OUT_FIFO_FULL))
371 				break;
372 
373 			num_words = 1;
374 		}
375 
376 		spi_qup_write_to_fifo(controller, num_words);
377 
378 		remainder -= num_words;
379 
380 		/* if block mode, check to see if next block is available */
381 		if (is_block_mode && !spi_qup_is_flag_set(controller,
382 					QUP_OP_OUT_BLOCK_WRITE_REQ))
383 			break;
384 
385 	} while (remainder);
386 }
387 
388 static int spi_qup_prep_sg(struct spi_master *master, struct scatterlist *sgl,
389 			   unsigned int nents, enum dma_transfer_direction dir,
390 			   dma_async_tx_callback callback)
391 {
392 	struct spi_qup *qup = spi_master_get_devdata(master);
393 	unsigned long flags = DMA_PREP_INTERRUPT | DMA_PREP_FENCE;
394 	struct dma_async_tx_descriptor *desc;
395 	struct dma_chan *chan;
396 	dma_cookie_t cookie;
397 
398 	if (dir == DMA_MEM_TO_DEV)
399 		chan = master->dma_tx;
400 	else
401 		chan = master->dma_rx;
402 
403 	desc = dmaengine_prep_slave_sg(chan, sgl, nents, dir, flags);
404 	if (IS_ERR_OR_NULL(desc))
405 		return desc ? PTR_ERR(desc) : -EINVAL;
406 
407 	desc->callback = callback;
408 	desc->callback_param = qup;
409 
410 	cookie = dmaengine_submit(desc);
411 
412 	return dma_submit_error(cookie);
413 }
414 
415 static void spi_qup_dma_terminate(struct spi_master *master,
416 				  struct spi_transfer *xfer)
417 {
418 	if (xfer->tx_buf)
419 		dmaengine_terminate_all(master->dma_tx);
420 	if (xfer->rx_buf)
421 		dmaengine_terminate_all(master->dma_rx);
422 }
423 
424 static u32 spi_qup_sgl_get_nents_len(struct scatterlist *sgl, u32 max,
425 				     u32 *nents)
426 {
427 	struct scatterlist *sg;
428 	u32 total = 0;
429 
430 	for (sg = sgl; sg; sg = sg_next(sg)) {
431 		unsigned int len = sg_dma_len(sg);
432 
433 		/* check for overflow as well as limit */
434 		if (((total + len) < total) || ((total + len) > max))
435 			break;
436 
437 		total += len;
438 		(*nents)++;
439 	}
440 
441 	return total;
442 }
443 
444 static int spi_qup_do_dma(struct spi_device *spi, struct spi_transfer *xfer,
445 			  unsigned long timeout)
446 {
447 	dma_async_tx_callback rx_done = NULL, tx_done = NULL;
448 	struct spi_master *master = spi->master;
449 	struct spi_qup *qup = spi_master_get_devdata(master);
450 	struct scatterlist *tx_sgl, *rx_sgl;
451 	int ret;
452 
453 	if (xfer->rx_buf)
454 		rx_done = spi_qup_dma_done;
455 	else if (xfer->tx_buf)
456 		tx_done = spi_qup_dma_done;
457 
458 	rx_sgl = xfer->rx_sg.sgl;
459 	tx_sgl = xfer->tx_sg.sgl;
460 
461 	do {
462 		u32 rx_nents = 0, tx_nents = 0;
463 
464 		if (rx_sgl)
465 			qup->n_words = spi_qup_sgl_get_nents_len(rx_sgl,
466 					SPI_MAX_XFER, &rx_nents) / qup->w_size;
467 		if (tx_sgl)
468 			qup->n_words = spi_qup_sgl_get_nents_len(tx_sgl,
469 					SPI_MAX_XFER, &tx_nents) / qup->w_size;
470 		if (!qup->n_words)
471 			return -EIO;
472 
473 		ret = spi_qup_io_config(spi, xfer);
474 		if (ret)
475 			return ret;
476 
477 		/* before issuing the descriptors, set the QUP to run */
478 		ret = spi_qup_set_state(qup, QUP_STATE_RUN);
479 		if (ret) {
480 			dev_warn(qup->dev, "cannot set RUN state\n");
481 			return ret;
482 		}
483 		if (rx_sgl) {
484 			ret = spi_qup_prep_sg(master, rx_sgl, rx_nents,
485 					      DMA_DEV_TO_MEM, rx_done);
486 			if (ret)
487 				return ret;
488 			dma_async_issue_pending(master->dma_rx);
489 		}
490 
491 		if (tx_sgl) {
492 			ret = spi_qup_prep_sg(master, tx_sgl, tx_nents,
493 					      DMA_MEM_TO_DEV, tx_done);
494 			if (ret)
495 				return ret;
496 
497 			dma_async_issue_pending(master->dma_tx);
498 		}
499 
500 		if (!wait_for_completion_timeout(&qup->done, timeout))
501 			return -ETIMEDOUT;
502 
503 		for (; rx_sgl && rx_nents--; rx_sgl = sg_next(rx_sgl))
504 			;
505 		for (; tx_sgl && tx_nents--; tx_sgl = sg_next(tx_sgl))
506 			;
507 
508 	} while (rx_sgl || tx_sgl);
509 
510 	return 0;
511 }
512 
513 static int spi_qup_do_pio(struct spi_device *spi, struct spi_transfer *xfer,
514 			  unsigned long timeout)
515 {
516 	struct spi_master *master = spi->master;
517 	struct spi_qup *qup = spi_master_get_devdata(master);
518 	int ret, n_words, iterations, offset = 0;
519 
520 	n_words = qup->n_words;
521 	iterations = n_words / SPI_MAX_XFER; /* round down */
522 	qup->rx_buf = xfer->rx_buf;
523 	qup->tx_buf = xfer->tx_buf;
524 
525 	do {
526 		if (iterations)
527 			qup->n_words = SPI_MAX_XFER;
528 		else
529 			qup->n_words = n_words % SPI_MAX_XFER;
530 
531 		if (qup->tx_buf && offset)
532 			qup->tx_buf = xfer->tx_buf + offset * SPI_MAX_XFER;
533 
534 		if (qup->rx_buf && offset)
535 			qup->rx_buf = xfer->rx_buf + offset * SPI_MAX_XFER;
536 
537 		/*
538 		 * if the transaction is small enough, we need
539 		 * to fallback to FIFO mode
540 		 */
541 		if (qup->n_words <= (qup->in_fifo_sz / sizeof(u32)))
542 			qup->mode = QUP_IO_M_MODE_FIFO;
543 
544 		ret = spi_qup_io_config(spi, xfer);
545 		if (ret)
546 			return ret;
547 
548 		ret = spi_qup_set_state(qup, QUP_STATE_RUN);
549 		if (ret) {
550 			dev_warn(qup->dev, "cannot set RUN state\n");
551 			return ret;
552 		}
553 
554 		ret = spi_qup_set_state(qup, QUP_STATE_PAUSE);
555 		if (ret) {
556 			dev_warn(qup->dev, "cannot set PAUSE state\n");
557 			return ret;
558 		}
559 
560 		if (qup->mode == QUP_IO_M_MODE_FIFO)
561 			spi_qup_write(qup);
562 
563 		ret = spi_qup_set_state(qup, QUP_STATE_RUN);
564 		if (ret) {
565 			dev_warn(qup->dev, "cannot set RUN state\n");
566 			return ret;
567 		}
568 
569 		if (!wait_for_completion_timeout(&qup->done, timeout))
570 			return -ETIMEDOUT;
571 
572 		offset++;
573 	} while (iterations--);
574 
575 	return 0;
576 }
577 
578 static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
579 {
580 	struct spi_qup *controller = dev_id;
581 	u32 opflags, qup_err, spi_err;
582 	int error = 0;
583 
584 	qup_err = readl_relaxed(controller->base + QUP_ERROR_FLAGS);
585 	spi_err = readl_relaxed(controller->base + SPI_ERROR_FLAGS);
586 	opflags = readl_relaxed(controller->base + QUP_OPERATIONAL);
587 
588 	writel_relaxed(qup_err, controller->base + QUP_ERROR_FLAGS);
589 	writel_relaxed(spi_err, controller->base + SPI_ERROR_FLAGS);
590 
591 	if (qup_err) {
592 		if (qup_err & QUP_ERROR_OUTPUT_OVER_RUN)
593 			dev_warn(controller->dev, "OUTPUT_OVER_RUN\n");
594 		if (qup_err & QUP_ERROR_INPUT_UNDER_RUN)
595 			dev_warn(controller->dev, "INPUT_UNDER_RUN\n");
596 		if (qup_err & QUP_ERROR_OUTPUT_UNDER_RUN)
597 			dev_warn(controller->dev, "OUTPUT_UNDER_RUN\n");
598 		if (qup_err & QUP_ERROR_INPUT_OVER_RUN)
599 			dev_warn(controller->dev, "INPUT_OVER_RUN\n");
600 
601 		error = -EIO;
602 	}
603 
604 	if (spi_err) {
605 		if (spi_err & SPI_ERROR_CLK_OVER_RUN)
606 			dev_warn(controller->dev, "CLK_OVER_RUN\n");
607 		if (spi_err & SPI_ERROR_CLK_UNDER_RUN)
608 			dev_warn(controller->dev, "CLK_UNDER_RUN\n");
609 
610 		error = -EIO;
611 	}
612 
613 	if (spi_qup_is_dma_xfer(controller->mode)) {
614 		writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
615 	} else {
616 		if (opflags & QUP_OP_IN_SERVICE_FLAG)
617 			spi_qup_read(controller, &opflags);
618 
619 		if (opflags & QUP_OP_OUT_SERVICE_FLAG)
620 			spi_qup_write(controller);
621 	}
622 
623 	if ((opflags & QUP_OP_MAX_INPUT_DONE_FLAG) || error)
624 		complete(&controller->done);
625 
626 	return IRQ_HANDLED;
627 }
628 
629 /* set clock freq ... bits per word, determine mode */
630 static int spi_qup_io_prep(struct spi_device *spi, struct spi_transfer *xfer)
631 {
632 	struct spi_qup *controller = spi_master_get_devdata(spi->master);
633 	int ret;
634 
635 	if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) {
636 		dev_err(controller->dev, "too big size for loopback %d > %d\n",
637 			xfer->len, controller->in_fifo_sz);
638 		return -EIO;
639 	}
640 
641 	ret = clk_set_rate(controller->cclk, xfer->speed_hz);
642 	if (ret) {
643 		dev_err(controller->dev, "fail to set frequency %d",
644 			xfer->speed_hz);
645 		return -EIO;
646 	}
647 
648 	controller->w_size = DIV_ROUND_UP(xfer->bits_per_word, 8);
649 	controller->n_words = xfer->len / controller->w_size;
650 
651 	if (controller->n_words <= (controller->in_fifo_sz / sizeof(u32)))
652 		controller->mode = QUP_IO_M_MODE_FIFO;
653 	else if (spi->master->can_dma &&
654 		 spi->master->can_dma(spi->master, spi, xfer) &&
655 		 spi->master->cur_msg_mapped)
656 		controller->mode = QUP_IO_M_MODE_BAM;
657 	else
658 		controller->mode = QUP_IO_M_MODE_BLOCK;
659 
660 	return 0;
661 }
662 
663 /* prep qup for another spi transaction of specific type */
664 static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
665 {
666 	struct spi_qup *controller = spi_master_get_devdata(spi->master);
667 	u32 config, iomode, control;
668 	unsigned long flags;
669 
670 	spin_lock_irqsave(&controller->lock, flags);
671 	controller->xfer     = xfer;
672 	controller->error    = 0;
673 	controller->rx_bytes = 0;
674 	controller->tx_bytes = 0;
675 	spin_unlock_irqrestore(&controller->lock, flags);
676 
677 
678 	if (spi_qup_set_state(controller, QUP_STATE_RESET)) {
679 		dev_err(controller->dev, "cannot set RESET state\n");
680 		return -EIO;
681 	}
682 
683 	switch (controller->mode) {
684 	case QUP_IO_M_MODE_FIFO:
685 		writel_relaxed(controller->n_words,
686 			       controller->base + QUP_MX_READ_CNT);
687 		writel_relaxed(controller->n_words,
688 			       controller->base + QUP_MX_WRITE_CNT);
689 		/* must be zero for FIFO */
690 		writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT);
691 		writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
692 		break;
693 	case QUP_IO_M_MODE_BAM:
694 		writel_relaxed(controller->n_words,
695 			       controller->base + QUP_MX_INPUT_CNT);
696 		writel_relaxed(controller->n_words,
697 			       controller->base + QUP_MX_OUTPUT_CNT);
698 		/* must be zero for BLOCK and BAM */
699 		writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
700 		writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
701 
702 		if (!controller->qup_v1) {
703 			void __iomem *input_cnt;
704 
705 			input_cnt = controller->base + QUP_MX_INPUT_CNT;
706 			/*
707 			 * for DMA transfers, both QUP_MX_INPUT_CNT and
708 			 * QUP_MX_OUTPUT_CNT must be zero to all cases but one.
709 			 * That case is a non-balanced transfer when there is
710 			 * only a rx_buf.
711 			 */
712 			if (xfer->tx_buf)
713 				writel_relaxed(0, input_cnt);
714 			else
715 				writel_relaxed(controller->n_words, input_cnt);
716 
717 			writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
718 		}
719 		break;
720 	case QUP_IO_M_MODE_BLOCK:
721 		reinit_completion(&controller->done);
722 		writel_relaxed(controller->n_words,
723 			       controller->base + QUP_MX_INPUT_CNT);
724 		writel_relaxed(controller->n_words,
725 			       controller->base + QUP_MX_OUTPUT_CNT);
726 		/* must be zero for BLOCK and BAM */
727 		writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
728 		writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
729 		break;
730 	default:
731 		dev_err(controller->dev, "unknown mode = %d\n",
732 				controller->mode);
733 		return -EIO;
734 	}
735 
736 	iomode = readl_relaxed(controller->base + QUP_IO_M_MODES);
737 	/* Set input and output transfer mode */
738 	iomode &= ~(QUP_IO_M_INPUT_MODE_MASK | QUP_IO_M_OUTPUT_MODE_MASK);
739 
740 	if (!spi_qup_is_dma_xfer(controller->mode))
741 		iomode &= ~(QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN);
742 	else
743 		iomode |= QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN;
744 
745 	iomode |= (controller->mode << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT);
746 	iomode |= (controller->mode << QUP_IO_M_INPUT_MODE_MASK_SHIFT);
747 
748 	writel_relaxed(iomode, controller->base + QUP_IO_M_MODES);
749 
750 	control = readl_relaxed(controller->base + SPI_IO_CONTROL);
751 
752 	if (spi->mode & SPI_CPOL)
753 		control |= SPI_IO_C_CLK_IDLE_HIGH;
754 	else
755 		control &= ~SPI_IO_C_CLK_IDLE_HIGH;
756 
757 	writel_relaxed(control, controller->base + SPI_IO_CONTROL);
758 
759 	config = readl_relaxed(controller->base + SPI_CONFIG);
760 
761 	if (spi->mode & SPI_LOOP)
762 		config |= SPI_CONFIG_LOOPBACK;
763 	else
764 		config &= ~SPI_CONFIG_LOOPBACK;
765 
766 	if (spi->mode & SPI_CPHA)
767 		config &= ~SPI_CONFIG_INPUT_FIRST;
768 	else
769 		config |= SPI_CONFIG_INPUT_FIRST;
770 
771 	/*
772 	 * HS_MODE improves signal stability for spi-clk high rates,
773 	 * but is invalid in loop back mode.
774 	 */
775 	if ((xfer->speed_hz >= SPI_HS_MIN_RATE) && !(spi->mode & SPI_LOOP))
776 		config |= SPI_CONFIG_HS_MODE;
777 	else
778 		config &= ~SPI_CONFIG_HS_MODE;
779 
780 	writel_relaxed(config, controller->base + SPI_CONFIG);
781 
782 	config = readl_relaxed(controller->base + QUP_CONFIG);
783 	config &= ~(QUP_CONFIG_NO_INPUT | QUP_CONFIG_NO_OUTPUT | QUP_CONFIG_N);
784 	config |= xfer->bits_per_word - 1;
785 	config |= QUP_CONFIG_SPI_MODE;
786 
787 	if (spi_qup_is_dma_xfer(controller->mode)) {
788 		if (!xfer->tx_buf)
789 			config |= QUP_CONFIG_NO_OUTPUT;
790 		if (!xfer->rx_buf)
791 			config |= QUP_CONFIG_NO_INPUT;
792 	}
793 
794 	writel_relaxed(config, controller->base + QUP_CONFIG);
795 
796 	/* only write to OPERATIONAL_MASK when register is present */
797 	if (!controller->qup_v1) {
798 		u32 mask = 0;
799 
800 		/*
801 		 * mask INPUT and OUTPUT service flags to prevent IRQs on FIFO
802 		 * status change in BAM mode
803 		 */
804 
805 		if (spi_qup_is_dma_xfer(controller->mode))
806 			mask = QUP_OP_IN_SERVICE_FLAG | QUP_OP_OUT_SERVICE_FLAG;
807 
808 		writel_relaxed(mask, controller->base + QUP_OPERATIONAL_MASK);
809 	}
810 
811 	return 0;
812 }
813 
814 static int spi_qup_transfer_one(struct spi_master *master,
815 			      struct spi_device *spi,
816 			      struct spi_transfer *xfer)
817 {
818 	struct spi_qup *controller = spi_master_get_devdata(master);
819 	unsigned long timeout, flags;
820 	int ret = -EIO;
821 
822 	ret = spi_qup_io_prep(spi, xfer);
823 	if (ret)
824 		return ret;
825 
826 	timeout = DIV_ROUND_UP(xfer->speed_hz, MSEC_PER_SEC);
827 	timeout = DIV_ROUND_UP(min_t(unsigned long, SPI_MAX_XFER,
828 				     xfer->len) * 8, timeout);
829 	timeout = 100 * msecs_to_jiffies(timeout);
830 
831 	reinit_completion(&controller->done);
832 
833 	spin_lock_irqsave(&controller->lock, flags);
834 	controller->xfer     = xfer;
835 	controller->error    = 0;
836 	controller->rx_bytes = 0;
837 	controller->tx_bytes = 0;
838 	spin_unlock_irqrestore(&controller->lock, flags);
839 
840 	if (spi_qup_is_dma_xfer(controller->mode))
841 		ret = spi_qup_do_dma(spi, xfer, timeout);
842 	else
843 		ret = spi_qup_do_pio(spi, xfer, timeout);
844 
845 	if (ret)
846 		goto exit;
847 
848 exit:
849 	spi_qup_set_state(controller, QUP_STATE_RESET);
850 	spin_lock_irqsave(&controller->lock, flags);
851 	if (!ret)
852 		ret = controller->error;
853 	spin_unlock_irqrestore(&controller->lock, flags);
854 
855 	if (ret && spi_qup_is_dma_xfer(controller->mode))
856 		spi_qup_dma_terminate(master, xfer);
857 
858 	return ret;
859 }
860 
861 static bool spi_qup_can_dma(struct spi_master *master, struct spi_device *spi,
862 			    struct spi_transfer *xfer)
863 {
864 	struct spi_qup *qup = spi_master_get_devdata(master);
865 	size_t dma_align = dma_get_cache_alignment();
866 	int n_words;
867 
868 	if (xfer->rx_buf) {
869 		if (!IS_ALIGNED((size_t)xfer->rx_buf, dma_align) ||
870 		    IS_ERR_OR_NULL(master->dma_rx))
871 			return false;
872 		if (qup->qup_v1 && (xfer->len % qup->in_blk_sz))
873 			return false;
874 	}
875 
876 	if (xfer->tx_buf) {
877 		if (!IS_ALIGNED((size_t)xfer->tx_buf, dma_align) ||
878 		    IS_ERR_OR_NULL(master->dma_tx))
879 			return false;
880 		if (qup->qup_v1 && (xfer->len % qup->out_blk_sz))
881 			return false;
882 	}
883 
884 	n_words = xfer->len / DIV_ROUND_UP(xfer->bits_per_word, 8);
885 	if (n_words <= (qup->in_fifo_sz / sizeof(u32)))
886 		return false;
887 
888 	return true;
889 }
890 
891 static void spi_qup_release_dma(struct spi_master *master)
892 {
893 	if (!IS_ERR_OR_NULL(master->dma_rx))
894 		dma_release_channel(master->dma_rx);
895 	if (!IS_ERR_OR_NULL(master->dma_tx))
896 		dma_release_channel(master->dma_tx);
897 }
898 
899 static int spi_qup_init_dma(struct spi_master *master, resource_size_t base)
900 {
901 	struct spi_qup *spi = spi_master_get_devdata(master);
902 	struct dma_slave_config *rx_conf = &spi->rx_conf,
903 				*tx_conf = &spi->tx_conf;
904 	struct device *dev = spi->dev;
905 	int ret;
906 
907 	/* allocate dma resources, if available */
908 	master->dma_rx = dma_request_slave_channel_reason(dev, "rx");
909 	if (IS_ERR(master->dma_rx))
910 		return PTR_ERR(master->dma_rx);
911 
912 	master->dma_tx = dma_request_slave_channel_reason(dev, "tx");
913 	if (IS_ERR(master->dma_tx)) {
914 		ret = PTR_ERR(master->dma_tx);
915 		goto err_tx;
916 	}
917 
918 	/* set DMA parameters */
919 	rx_conf->direction = DMA_DEV_TO_MEM;
920 	rx_conf->device_fc = 1;
921 	rx_conf->src_addr = base + QUP_INPUT_FIFO;
922 	rx_conf->src_maxburst = spi->in_blk_sz;
923 
924 	tx_conf->direction = DMA_MEM_TO_DEV;
925 	tx_conf->device_fc = 1;
926 	tx_conf->dst_addr = base + QUP_OUTPUT_FIFO;
927 	tx_conf->dst_maxburst = spi->out_blk_sz;
928 
929 	ret = dmaengine_slave_config(master->dma_rx, rx_conf);
930 	if (ret) {
931 		dev_err(dev, "failed to configure RX channel\n");
932 		goto err;
933 	}
934 
935 	ret = dmaengine_slave_config(master->dma_tx, tx_conf);
936 	if (ret) {
937 		dev_err(dev, "failed to configure TX channel\n");
938 		goto err;
939 	}
940 
941 	return 0;
942 
943 err:
944 	dma_release_channel(master->dma_tx);
945 err_tx:
946 	dma_release_channel(master->dma_rx);
947 	return ret;
948 }
949 
950 static void spi_qup_set_cs(struct spi_device *spi, bool val)
951 {
952 	struct spi_qup *controller;
953 	u32 spi_ioc;
954 	u32 spi_ioc_orig;
955 
956 	controller = spi_master_get_devdata(spi->master);
957 	spi_ioc = readl_relaxed(controller->base + SPI_IO_CONTROL);
958 	spi_ioc_orig = spi_ioc;
959 	if (!val)
960 		spi_ioc |= SPI_IO_C_FORCE_CS;
961 	else
962 		spi_ioc &= ~SPI_IO_C_FORCE_CS;
963 
964 	if (spi_ioc != spi_ioc_orig)
965 		writel_relaxed(spi_ioc, controller->base + SPI_IO_CONTROL);
966 }
967 
968 static int spi_qup_probe(struct platform_device *pdev)
969 {
970 	struct spi_master *master;
971 	struct clk *iclk, *cclk;
972 	struct spi_qup *controller;
973 	struct resource *res;
974 	struct device *dev;
975 	void __iomem *base;
976 	u32 max_freq, iomode, num_cs;
977 	int ret, irq, size;
978 
979 	dev = &pdev->dev;
980 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
981 	base = devm_ioremap_resource(dev, res);
982 	if (IS_ERR(base))
983 		return PTR_ERR(base);
984 
985 	irq = platform_get_irq(pdev, 0);
986 	if (irq < 0)
987 		return irq;
988 
989 	cclk = devm_clk_get(dev, "core");
990 	if (IS_ERR(cclk))
991 		return PTR_ERR(cclk);
992 
993 	iclk = devm_clk_get(dev, "iface");
994 	if (IS_ERR(iclk))
995 		return PTR_ERR(iclk);
996 
997 	/* This is optional parameter */
998 	if (of_property_read_u32(dev->of_node, "spi-max-frequency", &max_freq))
999 		max_freq = SPI_MAX_RATE;
1000 
1001 	if (!max_freq || max_freq > SPI_MAX_RATE) {
1002 		dev_err(dev, "invalid clock frequency %d\n", max_freq);
1003 		return -ENXIO;
1004 	}
1005 
1006 	ret = clk_prepare_enable(cclk);
1007 	if (ret) {
1008 		dev_err(dev, "cannot enable core clock\n");
1009 		return ret;
1010 	}
1011 
1012 	ret = clk_prepare_enable(iclk);
1013 	if (ret) {
1014 		clk_disable_unprepare(cclk);
1015 		dev_err(dev, "cannot enable iface clock\n");
1016 		return ret;
1017 	}
1018 
1019 	master = spi_alloc_master(dev, sizeof(struct spi_qup));
1020 	if (!master) {
1021 		clk_disable_unprepare(cclk);
1022 		clk_disable_unprepare(iclk);
1023 		dev_err(dev, "cannot allocate master\n");
1024 		return -ENOMEM;
1025 	}
1026 
1027 	/* use num-cs unless not present or out of range */
1028 	if (of_property_read_u32(dev->of_node, "num-cs", &num_cs) ||
1029 	    num_cs > SPI_NUM_CHIPSELECTS)
1030 		master->num_chipselect = SPI_NUM_CHIPSELECTS;
1031 	else
1032 		master->num_chipselect = num_cs;
1033 
1034 	master->bus_num = pdev->id;
1035 	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
1036 	master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
1037 	master->max_speed_hz = max_freq;
1038 	master->transfer_one = spi_qup_transfer_one;
1039 	master->dev.of_node = pdev->dev.of_node;
1040 	master->auto_runtime_pm = true;
1041 	master->dma_alignment = dma_get_cache_alignment();
1042 	master->max_dma_len = SPI_MAX_XFER;
1043 
1044 	platform_set_drvdata(pdev, master);
1045 
1046 	controller = spi_master_get_devdata(master);
1047 
1048 	controller->dev = dev;
1049 	controller->base = base;
1050 	controller->iclk = iclk;
1051 	controller->cclk = cclk;
1052 	controller->irq = irq;
1053 
1054 	ret = spi_qup_init_dma(master, res->start);
1055 	if (ret == -EPROBE_DEFER)
1056 		goto error;
1057 	else if (!ret)
1058 		master->can_dma = spi_qup_can_dma;
1059 
1060 	controller->qup_v1 = (uintptr_t)of_device_get_match_data(dev);
1061 
1062 	if (!controller->qup_v1)
1063 		master->set_cs = spi_qup_set_cs;
1064 
1065 	spin_lock_init(&controller->lock);
1066 	init_completion(&controller->done);
1067 
1068 	iomode = readl_relaxed(base + QUP_IO_M_MODES);
1069 
1070 	size = QUP_IO_M_OUTPUT_BLOCK_SIZE(iomode);
1071 	if (size)
1072 		controller->out_blk_sz = size * 16;
1073 	else
1074 		controller->out_blk_sz = 4;
1075 
1076 	size = QUP_IO_M_INPUT_BLOCK_SIZE(iomode);
1077 	if (size)
1078 		controller->in_blk_sz = size * 16;
1079 	else
1080 		controller->in_blk_sz = 4;
1081 
1082 	size = QUP_IO_M_OUTPUT_FIFO_SIZE(iomode);
1083 	controller->out_fifo_sz = controller->out_blk_sz * (2 << size);
1084 
1085 	size = QUP_IO_M_INPUT_FIFO_SIZE(iomode);
1086 	controller->in_fifo_sz = controller->in_blk_sz * (2 << size);
1087 
1088 	dev_info(dev, "IN:block:%d, fifo:%d, OUT:block:%d, fifo:%d\n",
1089 		 controller->in_blk_sz, controller->in_fifo_sz,
1090 		 controller->out_blk_sz, controller->out_fifo_sz);
1091 
1092 	writel_relaxed(1, base + QUP_SW_RESET);
1093 
1094 	ret = spi_qup_set_state(controller, QUP_STATE_RESET);
1095 	if (ret) {
1096 		dev_err(dev, "cannot set RESET state\n");
1097 		goto error_dma;
1098 	}
1099 
1100 	writel_relaxed(0, base + QUP_OPERATIONAL);
1101 	writel_relaxed(0, base + QUP_IO_M_MODES);
1102 
1103 	if (!controller->qup_v1)
1104 		writel_relaxed(0, base + QUP_OPERATIONAL_MASK);
1105 
1106 	writel_relaxed(SPI_ERROR_CLK_UNDER_RUN | SPI_ERROR_CLK_OVER_RUN,
1107 		       base + SPI_ERROR_FLAGS_EN);
1108 
1109 	/* if earlier version of the QUP, disable INPUT_OVERRUN */
1110 	if (controller->qup_v1)
1111 		writel_relaxed(QUP_ERROR_OUTPUT_OVER_RUN |
1112 			QUP_ERROR_INPUT_UNDER_RUN | QUP_ERROR_OUTPUT_UNDER_RUN,
1113 			base + QUP_ERROR_FLAGS_EN);
1114 
1115 	writel_relaxed(0, base + SPI_CONFIG);
1116 	writel_relaxed(SPI_IO_C_NO_TRI_STATE, base + SPI_IO_CONTROL);
1117 
1118 	ret = devm_request_irq(dev, irq, spi_qup_qup_irq,
1119 			       IRQF_TRIGGER_HIGH, pdev->name, controller);
1120 	if (ret)
1121 		goto error_dma;
1122 
1123 	pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
1124 	pm_runtime_use_autosuspend(dev);
1125 	pm_runtime_set_active(dev);
1126 	pm_runtime_enable(dev);
1127 
1128 	ret = devm_spi_register_master(dev, master);
1129 	if (ret)
1130 		goto disable_pm;
1131 
1132 	return 0;
1133 
1134 disable_pm:
1135 	pm_runtime_disable(&pdev->dev);
1136 error_dma:
1137 	spi_qup_release_dma(master);
1138 error:
1139 	clk_disable_unprepare(cclk);
1140 	clk_disable_unprepare(iclk);
1141 	spi_master_put(master);
1142 	return ret;
1143 }
1144 
1145 #ifdef CONFIG_PM
1146 static int spi_qup_pm_suspend_runtime(struct device *device)
1147 {
1148 	struct spi_master *master = dev_get_drvdata(device);
1149 	struct spi_qup *controller = spi_master_get_devdata(master);
1150 	u32 config;
1151 
1152 	/* Enable clocks auto gaiting */
1153 	config = readl(controller->base + QUP_CONFIG);
1154 	config |= QUP_CONFIG_CLOCK_AUTO_GATE;
1155 	writel_relaxed(config, controller->base + QUP_CONFIG);
1156 
1157 	clk_disable_unprepare(controller->cclk);
1158 	clk_disable_unprepare(controller->iclk);
1159 
1160 	return 0;
1161 }
1162 
1163 static int spi_qup_pm_resume_runtime(struct device *device)
1164 {
1165 	struct spi_master *master = dev_get_drvdata(device);
1166 	struct spi_qup *controller = spi_master_get_devdata(master);
1167 	u32 config;
1168 	int ret;
1169 
1170 	ret = clk_prepare_enable(controller->iclk);
1171 	if (ret)
1172 		return ret;
1173 
1174 	ret = clk_prepare_enable(controller->cclk);
1175 	if (ret)
1176 		return ret;
1177 
1178 	/* Disable clocks auto gaiting */
1179 	config = readl_relaxed(controller->base + QUP_CONFIG);
1180 	config &= ~QUP_CONFIG_CLOCK_AUTO_GATE;
1181 	writel_relaxed(config, controller->base + QUP_CONFIG);
1182 	return 0;
1183 }
1184 #endif /* CONFIG_PM */
1185 
1186 #ifdef CONFIG_PM_SLEEP
1187 static int spi_qup_suspend(struct device *device)
1188 {
1189 	struct spi_master *master = dev_get_drvdata(device);
1190 	struct spi_qup *controller = spi_master_get_devdata(master);
1191 	int ret;
1192 
1193 	ret = spi_master_suspend(master);
1194 	if (ret)
1195 		return ret;
1196 
1197 	ret = spi_qup_set_state(controller, QUP_STATE_RESET);
1198 	if (ret)
1199 		return ret;
1200 
1201 	if (!pm_runtime_suspended(device)) {
1202 		clk_disable_unprepare(controller->cclk);
1203 		clk_disable_unprepare(controller->iclk);
1204 	}
1205 	return 0;
1206 }
1207 
1208 static int spi_qup_resume(struct device *device)
1209 {
1210 	struct spi_master *master = dev_get_drvdata(device);
1211 	struct spi_qup *controller = spi_master_get_devdata(master);
1212 	int ret;
1213 
1214 	ret = clk_prepare_enable(controller->iclk);
1215 	if (ret)
1216 		return ret;
1217 
1218 	ret = clk_prepare_enable(controller->cclk);
1219 	if (ret)
1220 		return ret;
1221 
1222 	ret = spi_qup_set_state(controller, QUP_STATE_RESET);
1223 	if (ret)
1224 		return ret;
1225 
1226 	return spi_master_resume(master);
1227 }
1228 #endif /* CONFIG_PM_SLEEP */
1229 
1230 static int spi_qup_remove(struct platform_device *pdev)
1231 {
1232 	struct spi_master *master = dev_get_drvdata(&pdev->dev);
1233 	struct spi_qup *controller = spi_master_get_devdata(master);
1234 	int ret;
1235 
1236 	ret = pm_runtime_get_sync(&pdev->dev);
1237 	if (ret < 0)
1238 		return ret;
1239 
1240 	ret = spi_qup_set_state(controller, QUP_STATE_RESET);
1241 	if (ret)
1242 		return ret;
1243 
1244 	spi_qup_release_dma(master);
1245 
1246 	clk_disable_unprepare(controller->cclk);
1247 	clk_disable_unprepare(controller->iclk);
1248 
1249 	pm_runtime_put_noidle(&pdev->dev);
1250 	pm_runtime_disable(&pdev->dev);
1251 
1252 	return 0;
1253 }
1254 
1255 static const struct of_device_id spi_qup_dt_match[] = {
1256 	{ .compatible = "qcom,spi-qup-v1.1.1", .data = (void *)1, },
1257 	{ .compatible = "qcom,spi-qup-v2.1.1", },
1258 	{ .compatible = "qcom,spi-qup-v2.2.1", },
1259 	{ }
1260 };
1261 MODULE_DEVICE_TABLE(of, spi_qup_dt_match);
1262 
1263 static const struct dev_pm_ops spi_qup_dev_pm_ops = {
1264 	SET_SYSTEM_SLEEP_PM_OPS(spi_qup_suspend, spi_qup_resume)
1265 	SET_RUNTIME_PM_OPS(spi_qup_pm_suspend_runtime,
1266 			   spi_qup_pm_resume_runtime,
1267 			   NULL)
1268 };
1269 
1270 static struct platform_driver spi_qup_driver = {
1271 	.driver = {
1272 		.name		= "spi_qup",
1273 		.pm		= &spi_qup_dev_pm_ops,
1274 		.of_match_table = spi_qup_dt_match,
1275 	},
1276 	.probe = spi_qup_probe,
1277 	.remove = spi_qup_remove,
1278 };
1279 module_platform_driver(spi_qup_driver);
1280 
1281 MODULE_LICENSE("GPL v2");
1282 MODULE_ALIAS("platform:spi_qup");
1283