xref: /openbmc/linux/drivers/spi/spi-qup.c (revision 94cdda6b)
1 /*
2  * Copyright (c) 2008-2014, The Linux foundation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License rev 2 and
6  * only rev 2 as published by the free Software foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or fITNESS fOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  */
13 
14 #include <linux/clk.h>
15 #include <linux/delay.h>
16 #include <linux/err.h>
17 #include <linux/interrupt.h>
18 #include <linux/io.h>
19 #include <linux/list.h>
20 #include <linux/module.h>
21 #include <linux/of.h>
22 #include <linux/platform_device.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/spi/spi.h>
25 #include <linux/dmaengine.h>
26 #include <linux/dma-mapping.h>
27 
28 #define QUP_CONFIG			0x0000
29 #define QUP_STATE			0x0004
30 #define QUP_IO_M_MODES			0x0008
31 #define QUP_SW_RESET			0x000c
32 #define QUP_OPERATIONAL			0x0018
33 #define QUP_ERROR_FLAGS			0x001c
34 #define QUP_ERROR_FLAGS_EN		0x0020
35 #define QUP_OPERATIONAL_MASK		0x0028
36 #define QUP_HW_VERSION			0x0030
37 #define QUP_MX_OUTPUT_CNT		0x0100
38 #define QUP_OUTPUT_FIFO			0x0110
39 #define QUP_MX_WRITE_CNT		0x0150
40 #define QUP_MX_INPUT_CNT		0x0200
41 #define QUP_MX_READ_CNT			0x0208
42 #define QUP_INPUT_FIFO			0x0218
43 
44 #define SPI_CONFIG			0x0300
45 #define SPI_IO_CONTROL			0x0304
46 #define SPI_ERROR_FLAGS			0x0308
47 #define SPI_ERROR_FLAGS_EN		0x030c
48 
49 /* QUP_CONFIG fields */
50 #define QUP_CONFIG_SPI_MODE		(1 << 8)
51 #define QUP_CONFIG_CLOCK_AUTO_GATE	BIT(13)
52 #define QUP_CONFIG_NO_INPUT		BIT(7)
53 #define QUP_CONFIG_NO_OUTPUT		BIT(6)
54 #define QUP_CONFIG_N			0x001f
55 
56 /* QUP_STATE fields */
57 #define QUP_STATE_VALID			BIT(2)
58 #define QUP_STATE_RESET			0
59 #define QUP_STATE_RUN			1
60 #define QUP_STATE_PAUSE			3
61 #define QUP_STATE_MASK			3
62 #define QUP_STATE_CLEAR			2
63 
64 #define QUP_HW_VERSION_2_1_1		0x20010001
65 
66 /* QUP_IO_M_MODES fields */
67 #define QUP_IO_M_PACK_EN		BIT(15)
68 #define QUP_IO_M_UNPACK_EN		BIT(14)
69 #define QUP_IO_M_INPUT_MODE_MASK_SHIFT	12
70 #define QUP_IO_M_OUTPUT_MODE_MASK_SHIFT	10
71 #define QUP_IO_M_INPUT_MODE_MASK	(3 << QUP_IO_M_INPUT_MODE_MASK_SHIFT)
72 #define QUP_IO_M_OUTPUT_MODE_MASK	(3 << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT)
73 
74 #define QUP_IO_M_OUTPUT_BLOCK_SIZE(x)	(((x) & (0x03 << 0)) >> 0)
75 #define QUP_IO_M_OUTPUT_FIFO_SIZE(x)	(((x) & (0x07 << 2)) >> 2)
76 #define QUP_IO_M_INPUT_BLOCK_SIZE(x)	(((x) & (0x03 << 5)) >> 5)
77 #define QUP_IO_M_INPUT_FIFO_SIZE(x)	(((x) & (0x07 << 7)) >> 7)
78 
79 #define QUP_IO_M_MODE_FIFO		0
80 #define QUP_IO_M_MODE_BLOCK		1
81 #define QUP_IO_M_MODE_DMOV		2
82 #define QUP_IO_M_MODE_BAM		3
83 
84 /* QUP_OPERATIONAL fields */
85 #define QUP_OP_MAX_INPUT_DONE_FLAG	BIT(11)
86 #define QUP_OP_MAX_OUTPUT_DONE_FLAG	BIT(10)
87 #define QUP_OP_IN_SERVICE_FLAG		BIT(9)
88 #define QUP_OP_OUT_SERVICE_FLAG		BIT(8)
89 #define QUP_OP_IN_FIFO_FULL		BIT(7)
90 #define QUP_OP_OUT_FIFO_FULL		BIT(6)
91 #define QUP_OP_IN_FIFO_NOT_EMPTY	BIT(5)
92 #define QUP_OP_OUT_FIFO_NOT_EMPTY	BIT(4)
93 
94 /* QUP_ERROR_FLAGS and QUP_ERROR_FLAGS_EN fields */
95 #define QUP_ERROR_OUTPUT_OVER_RUN	BIT(5)
96 #define QUP_ERROR_INPUT_UNDER_RUN	BIT(4)
97 #define QUP_ERROR_OUTPUT_UNDER_RUN	BIT(3)
98 #define QUP_ERROR_INPUT_OVER_RUN	BIT(2)
99 
100 /* SPI_CONFIG fields */
101 #define SPI_CONFIG_HS_MODE		BIT(10)
102 #define SPI_CONFIG_INPUT_FIRST		BIT(9)
103 #define SPI_CONFIG_LOOPBACK		BIT(8)
104 
105 /* SPI_IO_CONTROL fields */
106 #define SPI_IO_C_FORCE_CS		BIT(11)
107 #define SPI_IO_C_CLK_IDLE_HIGH		BIT(10)
108 #define SPI_IO_C_MX_CS_MODE		BIT(8)
109 #define SPI_IO_C_CS_N_POLARITY_0	BIT(4)
110 #define SPI_IO_C_CS_SELECT(x)		(((x) & 3) << 2)
111 #define SPI_IO_C_CS_SELECT_MASK		0x000c
112 #define SPI_IO_C_TRISTATE_CS		BIT(1)
113 #define SPI_IO_C_NO_TRI_STATE		BIT(0)
114 
115 /* SPI_ERROR_FLAGS and SPI_ERROR_FLAGS_EN fields */
116 #define SPI_ERROR_CLK_OVER_RUN		BIT(1)
117 #define SPI_ERROR_CLK_UNDER_RUN		BIT(0)
118 
119 #define SPI_NUM_CHIPSELECTS		4
120 
121 #define SPI_MAX_DMA_XFER		(SZ_64K - 64)
122 
123 /* high speed mode is when bus rate is greater then 26MHz */
124 #define SPI_HS_MIN_RATE			26000000
125 #define SPI_MAX_RATE			50000000
126 
127 #define SPI_DELAY_THRESHOLD		1
128 #define SPI_DELAY_RETRY			10
129 
130 struct spi_qup {
131 	void __iomem		*base;
132 	struct device		*dev;
133 	struct clk		*cclk;	/* core clock */
134 	struct clk		*iclk;	/* interface clock */
135 	int			irq;
136 	spinlock_t		lock;
137 
138 	int			in_fifo_sz;
139 	int			out_fifo_sz;
140 	int			in_blk_sz;
141 	int			out_blk_sz;
142 
143 	struct spi_transfer	*xfer;
144 	struct completion	done;
145 	int			error;
146 	int			w_size;	/* bytes per SPI word */
147 	int			n_words;
148 	int			tx_bytes;
149 	int			rx_bytes;
150 	int			qup_v1;
151 
152 	int			use_dma;
153 	struct dma_slave_config	rx_conf;
154 	struct dma_slave_config	tx_conf;
155 };
156 
157 
158 static inline bool spi_qup_is_valid_state(struct spi_qup *controller)
159 {
160 	u32 opstate = readl_relaxed(controller->base + QUP_STATE);
161 
162 	return opstate & QUP_STATE_VALID;
163 }
164 
165 static int spi_qup_set_state(struct spi_qup *controller, u32 state)
166 {
167 	unsigned long loop;
168 	u32 cur_state;
169 
170 	loop = 0;
171 	while (!spi_qup_is_valid_state(controller)) {
172 
173 		usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
174 
175 		if (++loop > SPI_DELAY_RETRY)
176 			return -EIO;
177 	}
178 
179 	if (loop)
180 		dev_dbg(controller->dev, "invalid state for %ld,us %d\n",
181 			loop, state);
182 
183 	cur_state = readl_relaxed(controller->base + QUP_STATE);
184 	/*
185 	 * Per spec: for PAUSE_STATE to RESET_STATE, two writes
186 	 * of (b10) are required
187 	 */
188 	if (((cur_state & QUP_STATE_MASK) == QUP_STATE_PAUSE) &&
189 	    (state == QUP_STATE_RESET)) {
190 		writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
191 		writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
192 	} else {
193 		cur_state &= ~QUP_STATE_MASK;
194 		cur_state |= state;
195 		writel_relaxed(cur_state, controller->base + QUP_STATE);
196 	}
197 
198 	loop = 0;
199 	while (!spi_qup_is_valid_state(controller)) {
200 
201 		usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
202 
203 		if (++loop > SPI_DELAY_RETRY)
204 			return -EIO;
205 	}
206 
207 	return 0;
208 }
209 
210 static void spi_qup_fifo_read(struct spi_qup *controller,
211 			    struct spi_transfer *xfer)
212 {
213 	u8 *rx_buf = xfer->rx_buf;
214 	u32 word, state;
215 	int idx, shift, w_size;
216 
217 	w_size = controller->w_size;
218 
219 	while (controller->rx_bytes < xfer->len) {
220 
221 		state = readl_relaxed(controller->base + QUP_OPERATIONAL);
222 		if (0 == (state & QUP_OP_IN_FIFO_NOT_EMPTY))
223 			break;
224 
225 		word = readl_relaxed(controller->base + QUP_INPUT_FIFO);
226 
227 		if (!rx_buf) {
228 			controller->rx_bytes += w_size;
229 			continue;
230 		}
231 
232 		for (idx = 0; idx < w_size; idx++, controller->rx_bytes++) {
233 			/*
234 			 * The data format depends on bytes per SPI word:
235 			 *  4 bytes: 0x12345678
236 			 *  2 bytes: 0x00001234
237 			 *  1 byte : 0x00000012
238 			 */
239 			shift = BITS_PER_BYTE;
240 			shift *= (w_size - idx - 1);
241 			rx_buf[controller->rx_bytes] = word >> shift;
242 		}
243 	}
244 }
245 
246 static void spi_qup_fifo_write(struct spi_qup *controller,
247 			    struct spi_transfer *xfer)
248 {
249 	const u8 *tx_buf = xfer->tx_buf;
250 	u32 word, state, data;
251 	int idx, w_size;
252 
253 	w_size = controller->w_size;
254 
255 	while (controller->tx_bytes < xfer->len) {
256 
257 		state = readl_relaxed(controller->base + QUP_OPERATIONAL);
258 		if (state & QUP_OP_OUT_FIFO_FULL)
259 			break;
260 
261 		word = 0;
262 		for (idx = 0; idx < w_size; idx++, controller->tx_bytes++) {
263 
264 			if (!tx_buf) {
265 				controller->tx_bytes += w_size;
266 				break;
267 			}
268 
269 			data = tx_buf[controller->tx_bytes];
270 			word |= data << (BITS_PER_BYTE * (3 - idx));
271 		}
272 
273 		writel_relaxed(word, controller->base + QUP_OUTPUT_FIFO);
274 	}
275 }
276 
277 static void spi_qup_dma_done(void *data)
278 {
279 	struct spi_qup *qup = data;
280 
281 	complete(&qup->done);
282 }
283 
284 static int spi_qup_prep_sg(struct spi_master *master, struct spi_transfer *xfer,
285 			   enum dma_transfer_direction dir,
286 			   dma_async_tx_callback callback)
287 {
288 	struct spi_qup *qup = spi_master_get_devdata(master);
289 	unsigned long flags = DMA_PREP_INTERRUPT | DMA_PREP_FENCE;
290 	struct dma_async_tx_descriptor *desc;
291 	struct scatterlist *sgl;
292 	struct dma_chan *chan;
293 	dma_cookie_t cookie;
294 	unsigned int nents;
295 
296 	if (dir == DMA_MEM_TO_DEV) {
297 		chan = master->dma_tx;
298 		nents = xfer->tx_sg.nents;
299 		sgl = xfer->tx_sg.sgl;
300 	} else {
301 		chan = master->dma_rx;
302 		nents = xfer->rx_sg.nents;
303 		sgl = xfer->rx_sg.sgl;
304 	}
305 
306 	desc = dmaengine_prep_slave_sg(chan, sgl, nents, dir, flags);
307 	if (!desc)
308 		return -EINVAL;
309 
310 	desc->callback = callback;
311 	desc->callback_param = qup;
312 
313 	cookie = dmaengine_submit(desc);
314 
315 	return dma_submit_error(cookie);
316 }
317 
318 static void spi_qup_dma_terminate(struct spi_master *master,
319 				  struct spi_transfer *xfer)
320 {
321 	if (xfer->tx_buf)
322 		dmaengine_terminate_all(master->dma_tx);
323 	if (xfer->rx_buf)
324 		dmaengine_terminate_all(master->dma_rx);
325 }
326 
327 static int spi_qup_do_dma(struct spi_master *master, struct spi_transfer *xfer)
328 {
329 	dma_async_tx_callback rx_done = NULL, tx_done = NULL;
330 	int ret;
331 
332 	if (xfer->rx_buf)
333 		rx_done = spi_qup_dma_done;
334 	else if (xfer->tx_buf)
335 		tx_done = spi_qup_dma_done;
336 
337 	if (xfer->rx_buf) {
338 		ret = spi_qup_prep_sg(master, xfer, DMA_DEV_TO_MEM, rx_done);
339 		if (ret)
340 			return ret;
341 
342 		dma_async_issue_pending(master->dma_rx);
343 	}
344 
345 	if (xfer->tx_buf) {
346 		ret = spi_qup_prep_sg(master, xfer, DMA_MEM_TO_DEV, tx_done);
347 		if (ret)
348 			return ret;
349 
350 		dma_async_issue_pending(master->dma_tx);
351 	}
352 
353 	return 0;
354 }
355 
356 static int spi_qup_do_pio(struct spi_master *master, struct spi_transfer *xfer)
357 {
358 	struct spi_qup *qup = spi_master_get_devdata(master);
359 	int ret;
360 
361 	ret = spi_qup_set_state(qup, QUP_STATE_RUN);
362 	if (ret) {
363 		dev_warn(qup->dev, "cannot set RUN state\n");
364 		return ret;
365 	}
366 
367 	ret = spi_qup_set_state(qup, QUP_STATE_PAUSE);
368 	if (ret) {
369 		dev_warn(qup->dev, "cannot set PAUSE state\n");
370 		return ret;
371 	}
372 
373 	spi_qup_fifo_write(qup, xfer);
374 
375 	return 0;
376 }
377 
378 static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
379 {
380 	struct spi_qup *controller = dev_id;
381 	struct spi_transfer *xfer;
382 	u32 opflags, qup_err, spi_err;
383 	unsigned long flags;
384 	int error = 0;
385 
386 	spin_lock_irqsave(&controller->lock, flags);
387 	xfer = controller->xfer;
388 	controller->xfer = NULL;
389 	spin_unlock_irqrestore(&controller->lock, flags);
390 
391 	qup_err = readl_relaxed(controller->base + QUP_ERROR_FLAGS);
392 	spi_err = readl_relaxed(controller->base + SPI_ERROR_FLAGS);
393 	opflags = readl_relaxed(controller->base + QUP_OPERATIONAL);
394 
395 	writel_relaxed(qup_err, controller->base + QUP_ERROR_FLAGS);
396 	writel_relaxed(spi_err, controller->base + SPI_ERROR_FLAGS);
397 	writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
398 
399 	if (!xfer) {
400 		dev_err_ratelimited(controller->dev, "unexpected irq %08x %08x %08x\n",
401 				    qup_err, spi_err, opflags);
402 		return IRQ_HANDLED;
403 	}
404 
405 	if (qup_err) {
406 		if (qup_err & QUP_ERROR_OUTPUT_OVER_RUN)
407 			dev_warn(controller->dev, "OUTPUT_OVER_RUN\n");
408 		if (qup_err & QUP_ERROR_INPUT_UNDER_RUN)
409 			dev_warn(controller->dev, "INPUT_UNDER_RUN\n");
410 		if (qup_err & QUP_ERROR_OUTPUT_UNDER_RUN)
411 			dev_warn(controller->dev, "OUTPUT_UNDER_RUN\n");
412 		if (qup_err & QUP_ERROR_INPUT_OVER_RUN)
413 			dev_warn(controller->dev, "INPUT_OVER_RUN\n");
414 
415 		error = -EIO;
416 	}
417 
418 	if (spi_err) {
419 		if (spi_err & SPI_ERROR_CLK_OVER_RUN)
420 			dev_warn(controller->dev, "CLK_OVER_RUN\n");
421 		if (spi_err & SPI_ERROR_CLK_UNDER_RUN)
422 			dev_warn(controller->dev, "CLK_UNDER_RUN\n");
423 
424 		error = -EIO;
425 	}
426 
427 	if (!controller->use_dma) {
428 		if (opflags & QUP_OP_IN_SERVICE_FLAG)
429 			spi_qup_fifo_read(controller, xfer);
430 
431 		if (opflags & QUP_OP_OUT_SERVICE_FLAG)
432 			spi_qup_fifo_write(controller, xfer);
433 	}
434 
435 	spin_lock_irqsave(&controller->lock, flags);
436 	controller->error = error;
437 	controller->xfer = xfer;
438 	spin_unlock_irqrestore(&controller->lock, flags);
439 
440 	if (controller->rx_bytes == xfer->len || error)
441 		complete(&controller->done);
442 
443 	return IRQ_HANDLED;
444 }
445 
446 static u32
447 spi_qup_get_mode(struct spi_master *master, struct spi_transfer *xfer)
448 {
449 	struct spi_qup *qup = spi_master_get_devdata(master);
450 	u32 mode;
451 
452 	qup->w_size = 4;
453 
454 	if (xfer->bits_per_word <= 8)
455 		qup->w_size = 1;
456 	else if (xfer->bits_per_word <= 16)
457 		qup->w_size = 2;
458 
459 	qup->n_words = xfer->len / qup->w_size;
460 
461 	if (qup->n_words <= (qup->in_fifo_sz / sizeof(u32)))
462 		mode = QUP_IO_M_MODE_FIFO;
463 	else
464 		mode = QUP_IO_M_MODE_BLOCK;
465 
466 	return mode;
467 }
468 
469 /* set clock freq ... bits per word */
470 static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
471 {
472 	struct spi_qup *controller = spi_master_get_devdata(spi->master);
473 	u32 config, iomode, mode, control;
474 	int ret, n_words;
475 
476 	if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) {
477 		dev_err(controller->dev, "too big size for loopback %d > %d\n",
478 			xfer->len, controller->in_fifo_sz);
479 		return -EIO;
480 	}
481 
482 	ret = clk_set_rate(controller->cclk, xfer->speed_hz);
483 	if (ret) {
484 		dev_err(controller->dev, "fail to set frequency %d",
485 			xfer->speed_hz);
486 		return -EIO;
487 	}
488 
489 	if (spi_qup_set_state(controller, QUP_STATE_RESET)) {
490 		dev_err(controller->dev, "cannot set RESET state\n");
491 		return -EIO;
492 	}
493 
494 	mode = spi_qup_get_mode(spi->master, xfer);
495 	n_words = controller->n_words;
496 
497 	if (mode == QUP_IO_M_MODE_FIFO) {
498 		writel_relaxed(n_words, controller->base + QUP_MX_READ_CNT);
499 		writel_relaxed(n_words, controller->base + QUP_MX_WRITE_CNT);
500 		/* must be zero for FIFO */
501 		writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT);
502 		writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
503 	} else if (!controller->use_dma) {
504 		writel_relaxed(n_words, controller->base + QUP_MX_INPUT_CNT);
505 		writel_relaxed(n_words, controller->base + QUP_MX_OUTPUT_CNT);
506 		/* must be zero for BLOCK and BAM */
507 		writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
508 		writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
509 	} else {
510 		mode = QUP_IO_M_MODE_BAM;
511 		writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
512 		writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
513 
514 		if (!controller->qup_v1) {
515 			void __iomem *input_cnt;
516 
517 			input_cnt = controller->base + QUP_MX_INPUT_CNT;
518 			/*
519 			 * for DMA transfers, both QUP_MX_INPUT_CNT and
520 			 * QUP_MX_OUTPUT_CNT must be zero to all cases but one.
521 			 * That case is a non-balanced transfer when there is
522 			 * only a rx_buf.
523 			 */
524 			if (xfer->tx_buf)
525 				writel_relaxed(0, input_cnt);
526 			else
527 				writel_relaxed(n_words, input_cnt);
528 
529 			writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
530 		}
531 	}
532 
533 	iomode = readl_relaxed(controller->base + QUP_IO_M_MODES);
534 	/* Set input and output transfer mode */
535 	iomode &= ~(QUP_IO_M_INPUT_MODE_MASK | QUP_IO_M_OUTPUT_MODE_MASK);
536 
537 	if (!controller->use_dma)
538 		iomode &= ~(QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN);
539 	else
540 		iomode |= QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN;
541 
542 	iomode |= (mode << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT);
543 	iomode |= (mode << QUP_IO_M_INPUT_MODE_MASK_SHIFT);
544 
545 	writel_relaxed(iomode, controller->base + QUP_IO_M_MODES);
546 
547 	control = readl_relaxed(controller->base + SPI_IO_CONTROL);
548 
549 	if (spi->mode & SPI_CPOL)
550 		control |= SPI_IO_C_CLK_IDLE_HIGH;
551 	else
552 		control &= ~SPI_IO_C_CLK_IDLE_HIGH;
553 
554 	writel_relaxed(control, controller->base + SPI_IO_CONTROL);
555 
556 	config = readl_relaxed(controller->base + SPI_CONFIG);
557 
558 	if (spi->mode & SPI_LOOP)
559 		config |= SPI_CONFIG_LOOPBACK;
560 	else
561 		config &= ~SPI_CONFIG_LOOPBACK;
562 
563 	if (spi->mode & SPI_CPHA)
564 		config &= ~SPI_CONFIG_INPUT_FIRST;
565 	else
566 		config |= SPI_CONFIG_INPUT_FIRST;
567 
568 	/*
569 	 * HS_MODE improves signal stability for spi-clk high rates,
570 	 * but is invalid in loop back mode.
571 	 */
572 	if ((xfer->speed_hz >= SPI_HS_MIN_RATE) && !(spi->mode & SPI_LOOP))
573 		config |= SPI_CONFIG_HS_MODE;
574 	else
575 		config &= ~SPI_CONFIG_HS_MODE;
576 
577 	writel_relaxed(config, controller->base + SPI_CONFIG);
578 
579 	config = readl_relaxed(controller->base + QUP_CONFIG);
580 	config &= ~(QUP_CONFIG_NO_INPUT | QUP_CONFIG_NO_OUTPUT | QUP_CONFIG_N);
581 	config |= xfer->bits_per_word - 1;
582 	config |= QUP_CONFIG_SPI_MODE;
583 
584 	if (controller->use_dma) {
585 		if (!xfer->tx_buf)
586 			config |= QUP_CONFIG_NO_OUTPUT;
587 		if (!xfer->rx_buf)
588 			config |= QUP_CONFIG_NO_INPUT;
589 	}
590 
591 	writel_relaxed(config, controller->base + QUP_CONFIG);
592 
593 	/* only write to OPERATIONAL_MASK when register is present */
594 	if (!controller->qup_v1) {
595 		u32 mask = 0;
596 
597 		/*
598 		 * mask INPUT and OUTPUT service flags to prevent IRQs on FIFO
599 		 * status change in BAM mode
600 		 */
601 
602 		if (mode == QUP_IO_M_MODE_BAM)
603 			mask = QUP_OP_IN_SERVICE_FLAG | QUP_OP_OUT_SERVICE_FLAG;
604 
605 		writel_relaxed(mask, controller->base + QUP_OPERATIONAL_MASK);
606 	}
607 
608 	return 0;
609 }
610 
611 static int spi_qup_transfer_one(struct spi_master *master,
612 			      struct spi_device *spi,
613 			      struct spi_transfer *xfer)
614 {
615 	struct spi_qup *controller = spi_master_get_devdata(master);
616 	unsigned long timeout, flags;
617 	int ret = -EIO;
618 
619 	ret = spi_qup_io_config(spi, xfer);
620 	if (ret)
621 		return ret;
622 
623 	timeout = DIV_ROUND_UP(xfer->speed_hz, MSEC_PER_SEC);
624 	timeout = DIV_ROUND_UP(xfer->len * 8, timeout);
625 	timeout = 100 * msecs_to_jiffies(timeout);
626 
627 	reinit_completion(&controller->done);
628 
629 	spin_lock_irqsave(&controller->lock, flags);
630 	controller->xfer     = xfer;
631 	controller->error    = 0;
632 	controller->rx_bytes = 0;
633 	controller->tx_bytes = 0;
634 	spin_unlock_irqrestore(&controller->lock, flags);
635 
636 	if (controller->use_dma)
637 		ret = spi_qup_do_dma(master, xfer);
638 	else
639 		ret = spi_qup_do_pio(master, xfer);
640 
641 	if (ret)
642 		goto exit;
643 
644 	if (spi_qup_set_state(controller, QUP_STATE_RUN)) {
645 		dev_warn(controller->dev, "cannot set EXECUTE state\n");
646 		goto exit;
647 	}
648 
649 	if (!wait_for_completion_timeout(&controller->done, timeout))
650 		ret = -ETIMEDOUT;
651 
652 exit:
653 	spi_qup_set_state(controller, QUP_STATE_RESET);
654 	spin_lock_irqsave(&controller->lock, flags);
655 	controller->xfer = NULL;
656 	if (!ret)
657 		ret = controller->error;
658 	spin_unlock_irqrestore(&controller->lock, flags);
659 
660 	if (ret && controller->use_dma)
661 		spi_qup_dma_terminate(master, xfer);
662 
663 	return ret;
664 }
665 
666 static bool spi_qup_can_dma(struct spi_master *master, struct spi_device *spi,
667 			    struct spi_transfer *xfer)
668 {
669 	struct spi_qup *qup = spi_master_get_devdata(master);
670 	size_t dma_align = dma_get_cache_alignment();
671 	u32 mode;
672 
673 	qup->use_dma = 0;
674 
675 	if (xfer->rx_buf && (xfer->len % qup->in_blk_sz ||
676 	    IS_ERR_OR_NULL(master->dma_rx) ||
677 	    !IS_ALIGNED((size_t)xfer->rx_buf, dma_align)))
678 		return false;
679 
680 	if (xfer->tx_buf && (xfer->len % qup->out_blk_sz ||
681 	    IS_ERR_OR_NULL(master->dma_tx) ||
682 	    !IS_ALIGNED((size_t)xfer->tx_buf, dma_align)))
683 		return false;
684 
685 	mode = spi_qup_get_mode(master, xfer);
686 	if (mode == QUP_IO_M_MODE_FIFO)
687 		return false;
688 
689 	qup->use_dma = 1;
690 
691 	return true;
692 }
693 
694 static void spi_qup_release_dma(struct spi_master *master)
695 {
696 	if (!IS_ERR_OR_NULL(master->dma_rx))
697 		dma_release_channel(master->dma_rx);
698 	if (!IS_ERR_OR_NULL(master->dma_tx))
699 		dma_release_channel(master->dma_tx);
700 }
701 
702 static int spi_qup_init_dma(struct spi_master *master, resource_size_t base)
703 {
704 	struct spi_qup *spi = spi_master_get_devdata(master);
705 	struct dma_slave_config *rx_conf = &spi->rx_conf,
706 				*tx_conf = &spi->tx_conf;
707 	struct device *dev = spi->dev;
708 	int ret;
709 
710 	/* allocate dma resources, if available */
711 	master->dma_rx = dma_request_slave_channel_reason(dev, "rx");
712 	if (IS_ERR(master->dma_rx))
713 		return PTR_ERR(master->dma_rx);
714 
715 	master->dma_tx = dma_request_slave_channel_reason(dev, "tx");
716 	if (IS_ERR(master->dma_tx)) {
717 		ret = PTR_ERR(master->dma_tx);
718 		goto err_tx;
719 	}
720 
721 	/* set DMA parameters */
722 	rx_conf->direction = DMA_DEV_TO_MEM;
723 	rx_conf->device_fc = 1;
724 	rx_conf->src_addr = base + QUP_INPUT_FIFO;
725 	rx_conf->src_maxburst = spi->in_blk_sz;
726 
727 	tx_conf->direction = DMA_MEM_TO_DEV;
728 	tx_conf->device_fc = 1;
729 	tx_conf->dst_addr = base + QUP_OUTPUT_FIFO;
730 	tx_conf->dst_maxburst = spi->out_blk_sz;
731 
732 	ret = dmaengine_slave_config(master->dma_rx, rx_conf);
733 	if (ret) {
734 		dev_err(dev, "failed to configure RX channel\n");
735 		goto err;
736 	}
737 
738 	ret = dmaengine_slave_config(master->dma_tx, tx_conf);
739 	if (ret) {
740 		dev_err(dev, "failed to configure TX channel\n");
741 		goto err;
742 	}
743 
744 	return 0;
745 
746 err:
747 	dma_release_channel(master->dma_tx);
748 err_tx:
749 	dma_release_channel(master->dma_rx);
750 	return ret;
751 }
752 
753 static int spi_qup_probe(struct platform_device *pdev)
754 {
755 	struct spi_master *master;
756 	struct clk *iclk, *cclk;
757 	struct spi_qup *controller;
758 	struct resource *res;
759 	struct device *dev;
760 	void __iomem *base;
761 	u32 max_freq, iomode, num_cs;
762 	int ret, irq, size;
763 
764 	dev = &pdev->dev;
765 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
766 	base = devm_ioremap_resource(dev, res);
767 	if (IS_ERR(base))
768 		return PTR_ERR(base);
769 
770 	irq = platform_get_irq(pdev, 0);
771 	if (irq < 0)
772 		return irq;
773 
774 	cclk = devm_clk_get(dev, "core");
775 	if (IS_ERR(cclk))
776 		return PTR_ERR(cclk);
777 
778 	iclk = devm_clk_get(dev, "iface");
779 	if (IS_ERR(iclk))
780 		return PTR_ERR(iclk);
781 
782 	/* This is optional parameter */
783 	if (of_property_read_u32(dev->of_node, "spi-max-frequency", &max_freq))
784 		max_freq = SPI_MAX_RATE;
785 
786 	if (!max_freq || max_freq > SPI_MAX_RATE) {
787 		dev_err(dev, "invalid clock frequency %d\n", max_freq);
788 		return -ENXIO;
789 	}
790 
791 	ret = clk_prepare_enable(cclk);
792 	if (ret) {
793 		dev_err(dev, "cannot enable core clock\n");
794 		return ret;
795 	}
796 
797 	ret = clk_prepare_enable(iclk);
798 	if (ret) {
799 		clk_disable_unprepare(cclk);
800 		dev_err(dev, "cannot enable iface clock\n");
801 		return ret;
802 	}
803 
804 	master = spi_alloc_master(dev, sizeof(struct spi_qup));
805 	if (!master) {
806 		clk_disable_unprepare(cclk);
807 		clk_disable_unprepare(iclk);
808 		dev_err(dev, "cannot allocate master\n");
809 		return -ENOMEM;
810 	}
811 
812 	/* use num-cs unless not present or out of range */
813 	if (of_property_read_u32(dev->of_node, "num-cs", &num_cs) ||
814 	    num_cs > SPI_NUM_CHIPSELECTS)
815 		master->num_chipselect = SPI_NUM_CHIPSELECTS;
816 	else
817 		master->num_chipselect = num_cs;
818 
819 	master->bus_num = pdev->id;
820 	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
821 	master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
822 	master->max_speed_hz = max_freq;
823 	master->transfer_one = spi_qup_transfer_one;
824 	master->dev.of_node = pdev->dev.of_node;
825 	master->auto_runtime_pm = true;
826 	master->dma_alignment = dma_get_cache_alignment();
827 	master->max_dma_len = SPI_MAX_DMA_XFER;
828 
829 	platform_set_drvdata(pdev, master);
830 
831 	controller = spi_master_get_devdata(master);
832 
833 	controller->dev = dev;
834 	controller->base = base;
835 	controller->iclk = iclk;
836 	controller->cclk = cclk;
837 	controller->irq = irq;
838 
839 	ret = spi_qup_init_dma(master, res->start);
840 	if (ret == -EPROBE_DEFER)
841 		goto error;
842 	else if (!ret)
843 		master->can_dma = spi_qup_can_dma;
844 
845 	/* set v1 flag if device is version 1 */
846 	if (of_device_is_compatible(dev->of_node, "qcom,spi-qup-v1.1.1"))
847 		controller->qup_v1 = 1;
848 
849 	spin_lock_init(&controller->lock);
850 	init_completion(&controller->done);
851 
852 	iomode = readl_relaxed(base + QUP_IO_M_MODES);
853 
854 	size = QUP_IO_M_OUTPUT_BLOCK_SIZE(iomode);
855 	if (size)
856 		controller->out_blk_sz = size * 16;
857 	else
858 		controller->out_blk_sz = 4;
859 
860 	size = QUP_IO_M_INPUT_BLOCK_SIZE(iomode);
861 	if (size)
862 		controller->in_blk_sz = size * 16;
863 	else
864 		controller->in_blk_sz = 4;
865 
866 	size = QUP_IO_M_OUTPUT_FIFO_SIZE(iomode);
867 	controller->out_fifo_sz = controller->out_blk_sz * (2 << size);
868 
869 	size = QUP_IO_M_INPUT_FIFO_SIZE(iomode);
870 	controller->in_fifo_sz = controller->in_blk_sz * (2 << size);
871 
872 	dev_info(dev, "IN:block:%d, fifo:%d, OUT:block:%d, fifo:%d\n",
873 		 controller->in_blk_sz, controller->in_fifo_sz,
874 		 controller->out_blk_sz, controller->out_fifo_sz);
875 
876 	writel_relaxed(1, base + QUP_SW_RESET);
877 
878 	ret = spi_qup_set_state(controller, QUP_STATE_RESET);
879 	if (ret) {
880 		dev_err(dev, "cannot set RESET state\n");
881 		goto error_dma;
882 	}
883 
884 	writel_relaxed(0, base + QUP_OPERATIONAL);
885 	writel_relaxed(0, base + QUP_IO_M_MODES);
886 
887 	if (!controller->qup_v1)
888 		writel_relaxed(0, base + QUP_OPERATIONAL_MASK);
889 
890 	writel_relaxed(SPI_ERROR_CLK_UNDER_RUN | SPI_ERROR_CLK_OVER_RUN,
891 		       base + SPI_ERROR_FLAGS_EN);
892 
893 	/* if earlier version of the QUP, disable INPUT_OVERRUN */
894 	if (controller->qup_v1)
895 		writel_relaxed(QUP_ERROR_OUTPUT_OVER_RUN |
896 			QUP_ERROR_INPUT_UNDER_RUN | QUP_ERROR_OUTPUT_UNDER_RUN,
897 			base + QUP_ERROR_FLAGS_EN);
898 
899 	writel_relaxed(0, base + SPI_CONFIG);
900 	writel_relaxed(SPI_IO_C_NO_TRI_STATE, base + SPI_IO_CONTROL);
901 
902 	ret = devm_request_irq(dev, irq, spi_qup_qup_irq,
903 			       IRQF_TRIGGER_HIGH, pdev->name, controller);
904 	if (ret)
905 		goto error_dma;
906 
907 	pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
908 	pm_runtime_use_autosuspend(dev);
909 	pm_runtime_set_active(dev);
910 	pm_runtime_enable(dev);
911 
912 	ret = devm_spi_register_master(dev, master);
913 	if (ret)
914 		goto disable_pm;
915 
916 	return 0;
917 
918 disable_pm:
919 	pm_runtime_disable(&pdev->dev);
920 error_dma:
921 	spi_qup_release_dma(master);
922 error:
923 	clk_disable_unprepare(cclk);
924 	clk_disable_unprepare(iclk);
925 	spi_master_put(master);
926 	return ret;
927 }
928 
929 #ifdef CONFIG_PM
930 static int spi_qup_pm_suspend_runtime(struct device *device)
931 {
932 	struct spi_master *master = dev_get_drvdata(device);
933 	struct spi_qup *controller = spi_master_get_devdata(master);
934 	u32 config;
935 
936 	/* Enable clocks auto gaiting */
937 	config = readl(controller->base + QUP_CONFIG);
938 	config |= QUP_CONFIG_CLOCK_AUTO_GATE;
939 	writel_relaxed(config, controller->base + QUP_CONFIG);
940 	return 0;
941 }
942 
943 static int spi_qup_pm_resume_runtime(struct device *device)
944 {
945 	struct spi_master *master = dev_get_drvdata(device);
946 	struct spi_qup *controller = spi_master_get_devdata(master);
947 	u32 config;
948 
949 	/* Disable clocks auto gaiting */
950 	config = readl_relaxed(controller->base + QUP_CONFIG);
951 	config &= ~QUP_CONFIG_CLOCK_AUTO_GATE;
952 	writel_relaxed(config, controller->base + QUP_CONFIG);
953 	return 0;
954 }
955 #endif /* CONFIG_PM */
956 
957 #ifdef CONFIG_PM_SLEEP
958 static int spi_qup_suspend(struct device *device)
959 {
960 	struct spi_master *master = dev_get_drvdata(device);
961 	struct spi_qup *controller = spi_master_get_devdata(master);
962 	int ret;
963 
964 	ret = spi_master_suspend(master);
965 	if (ret)
966 		return ret;
967 
968 	ret = spi_qup_set_state(controller, QUP_STATE_RESET);
969 	if (ret)
970 		return ret;
971 
972 	clk_disable_unprepare(controller->cclk);
973 	clk_disable_unprepare(controller->iclk);
974 	return 0;
975 }
976 
977 static int spi_qup_resume(struct device *device)
978 {
979 	struct spi_master *master = dev_get_drvdata(device);
980 	struct spi_qup *controller = spi_master_get_devdata(master);
981 	int ret;
982 
983 	ret = clk_prepare_enable(controller->iclk);
984 	if (ret)
985 		return ret;
986 
987 	ret = clk_prepare_enable(controller->cclk);
988 	if (ret)
989 		return ret;
990 
991 	ret = spi_qup_set_state(controller, QUP_STATE_RESET);
992 	if (ret)
993 		return ret;
994 
995 	return spi_master_resume(master);
996 }
997 #endif /* CONFIG_PM_SLEEP */
998 
999 static int spi_qup_remove(struct platform_device *pdev)
1000 {
1001 	struct spi_master *master = dev_get_drvdata(&pdev->dev);
1002 	struct spi_qup *controller = spi_master_get_devdata(master);
1003 	int ret;
1004 
1005 	ret = pm_runtime_get_sync(&pdev->dev);
1006 	if (ret < 0)
1007 		return ret;
1008 
1009 	ret = spi_qup_set_state(controller, QUP_STATE_RESET);
1010 	if (ret)
1011 		return ret;
1012 
1013 	spi_qup_release_dma(master);
1014 
1015 	clk_disable_unprepare(controller->cclk);
1016 	clk_disable_unprepare(controller->iclk);
1017 
1018 	pm_runtime_put_noidle(&pdev->dev);
1019 	pm_runtime_disable(&pdev->dev);
1020 	return 0;
1021 }
1022 
1023 static const struct of_device_id spi_qup_dt_match[] = {
1024 	{ .compatible = "qcom,spi-qup-v1.1.1", },
1025 	{ .compatible = "qcom,spi-qup-v2.1.1", },
1026 	{ .compatible = "qcom,spi-qup-v2.2.1", },
1027 	{ }
1028 };
1029 MODULE_DEVICE_TABLE(of, spi_qup_dt_match);
1030 
1031 static const struct dev_pm_ops spi_qup_dev_pm_ops = {
1032 	SET_SYSTEM_SLEEP_PM_OPS(spi_qup_suspend, spi_qup_resume)
1033 	SET_RUNTIME_PM_OPS(spi_qup_pm_suspend_runtime,
1034 			   spi_qup_pm_resume_runtime,
1035 			   NULL)
1036 };
1037 
1038 static struct platform_driver spi_qup_driver = {
1039 	.driver = {
1040 		.name		= "spi_qup",
1041 		.pm		= &spi_qup_dev_pm_ops,
1042 		.of_match_table = spi_qup_dt_match,
1043 	},
1044 	.probe = spi_qup_probe,
1045 	.remove = spi_qup_remove,
1046 };
1047 module_platform_driver(spi_qup_driver);
1048 
1049 MODULE_LICENSE("GPL v2");
1050 MODULE_ALIAS("platform:spi_qup");
1051