xref: /openbmc/linux/drivers/spi/spi-bcm-qspi.c (revision ee4d62c4)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Driver for Broadcom BRCMSTB, NSP,  NS2, Cygnus SPI Controllers
4  *
5  * Copyright 2016 Broadcom
6  */
7 
8 #include <linux/clk.h>
9 #include <linux/delay.h>
10 #include <linux/device.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/io.h>
14 #include <linux/ioport.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/of.h>
18 #include <linux/of_irq.h>
19 #include <linux/platform_device.h>
20 #include <linux/slab.h>
21 #include <linux/spi/spi.h>
22 #include <linux/spi/spi-mem.h>
23 #include <linux/sysfs.h>
24 #include <linux/types.h>
25 #include "spi-bcm-qspi.h"
26 
27 #define DRIVER_NAME "bcm_qspi"
28 
29 
30 /* BSPI register offsets */
31 #define BSPI_REVISION_ID			0x000
32 #define BSPI_SCRATCH				0x004
33 #define BSPI_MAST_N_BOOT_CTRL			0x008
34 #define BSPI_BUSY_STATUS			0x00c
35 #define BSPI_INTR_STATUS			0x010
36 #define BSPI_B0_STATUS				0x014
37 #define BSPI_B0_CTRL				0x018
38 #define BSPI_B1_STATUS				0x01c
39 #define BSPI_B1_CTRL				0x020
40 #define BSPI_STRAP_OVERRIDE_CTRL		0x024
41 #define BSPI_FLEX_MODE_ENABLE			0x028
42 #define BSPI_BITS_PER_CYCLE			0x02c
43 #define BSPI_BITS_PER_PHASE			0x030
44 #define BSPI_CMD_AND_MODE_BYTE			0x034
45 #define BSPI_BSPI_FLASH_UPPER_ADDR_BYTE	0x038
46 #define BSPI_BSPI_XOR_VALUE			0x03c
47 #define BSPI_BSPI_XOR_ENABLE			0x040
48 #define BSPI_BSPI_PIO_MODE_ENABLE		0x044
49 #define BSPI_BSPI_PIO_IODIR			0x048
50 #define BSPI_BSPI_PIO_DATA			0x04c
51 
52 /* RAF register offsets */
53 #define BSPI_RAF_START_ADDR			0x100
54 #define BSPI_RAF_NUM_WORDS			0x104
55 #define BSPI_RAF_CTRL				0x108
56 #define BSPI_RAF_FULLNESS			0x10c
57 #define BSPI_RAF_WATERMARK			0x110
58 #define BSPI_RAF_STATUS			0x114
59 #define BSPI_RAF_READ_DATA			0x118
60 #define BSPI_RAF_WORD_CNT			0x11c
61 #define BSPI_RAF_CURR_ADDR			0x120
62 
63 /* Override mode masks */
64 #define BSPI_STRAP_OVERRIDE_CTRL_OVERRIDE	BIT(0)
65 #define BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL	BIT(1)
66 #define BSPI_STRAP_OVERRIDE_CTRL_ADDR_4BYTE	BIT(2)
67 #define BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD	BIT(3)
68 #define BSPI_STRAP_OVERRIDE_CTRL_ENDAIN_MODE	BIT(4)
69 
70 #define BSPI_ADDRLEN_3BYTES			3
71 #define BSPI_ADDRLEN_4BYTES			4
72 
73 #define BSPI_RAF_STATUS_FIFO_EMPTY_MASK	BIT(1)
74 
75 #define BSPI_RAF_CTRL_START_MASK		BIT(0)
76 #define BSPI_RAF_CTRL_CLEAR_MASK		BIT(1)
77 
78 #define BSPI_BPP_MODE_SELECT_MASK		BIT(8)
79 #define BSPI_BPP_ADDR_SELECT_MASK		BIT(16)
80 
81 #define BSPI_READ_LENGTH			256
82 
83 /* MSPI register offsets */
84 #define MSPI_SPCR0_LSB				0x000
85 #define MSPI_SPCR0_MSB				0x004
86 #define MSPI_SPCR0_MSB_CPHA			BIT(0)
87 #define MSPI_SPCR0_MSB_CPOL			BIT(1)
88 #define MSPI_SPCR0_MSB_BITS_SHIFT		0x2
89 #define MSPI_SPCR1_LSB				0x008
90 #define MSPI_SPCR1_MSB				0x00c
91 #define MSPI_NEWQP				0x010
92 #define MSPI_ENDQP				0x014
93 #define MSPI_SPCR2				0x018
94 #define MSPI_MSPI_STATUS			0x020
95 #define MSPI_CPTQP				0x024
96 #define MSPI_SPCR3				0x028
97 #define MSPI_REV				0x02c
98 #define MSPI_TXRAM				0x040
99 #define MSPI_RXRAM				0x0c0
100 #define MSPI_CDRAM				0x140
101 #define MSPI_WRITE_LOCK			0x180
102 
103 #define MSPI_MASTER_BIT			BIT(7)
104 
105 #define MSPI_NUM_CDRAM				16
106 #define MSPI_CDRAM_CONT_BIT			BIT(7)
107 #define MSPI_CDRAM_BITSE_BIT			BIT(6)
108 #define MSPI_CDRAM_DT_BIT			BIT(5)
109 #define MSPI_CDRAM_PCS				0xf
110 
111 #define MSPI_SPCR2_SPE				BIT(6)
112 #define MSPI_SPCR2_CONT_AFTER_CMD		BIT(7)
113 
114 #define MSPI_SPCR3_FASTBR			BIT(0)
115 #define MSPI_SPCR3_FASTDT			BIT(1)
116 #define MSPI_SPCR3_SYSCLKSEL_MASK		GENMASK(11, 10)
117 #define MSPI_SPCR3_SYSCLKSEL_27			(MSPI_SPCR3_SYSCLKSEL_MASK & \
118 						 ~(BIT(10) | BIT(11)))
119 #define MSPI_SPCR3_SYSCLKSEL_108		(MSPI_SPCR3_SYSCLKSEL_MASK & \
120 						 BIT(11))
121 #define MSPI_SPCR3_TXRXDAM_MASK			GENMASK(4, 2)
122 #define MSPI_SPCR3_DAM_8BYTE			0
123 #define MSPI_SPCR3_DAM_16BYTE			(BIT(2) | BIT(4))
124 #define MSPI_SPCR3_DAM_32BYTE			(BIT(3) | BIT(5))
125 #define MSPI_SPCR3_DATA_REG_SZ			BIT(8)
126 #define MSPI_SPCR3_CPHARX			BIT(9)
127 
128 #define MSPI_MSPI_STATUS_SPIF			BIT(0)
129 
130 #define INTR_BASE_BIT_SHIFT			0x02
131 #define INTR_COUNT				0x07
132 
133 #define NUM_CHIPSELECT				4
134 #define QSPI_SPBR_MAX				255U
135 #define MSPI_BASE_FREQ				27000000UL
136 
137 #define OPCODE_DIOR				0xBB
138 #define OPCODE_QIOR				0xEB
139 #define OPCODE_DIOR_4B				0xBC
140 #define OPCODE_QIOR_4B				0xEC
141 
142 #define MAX_CMD_SIZE				6
143 
144 #define ADDR_4MB_MASK				GENMASK(22, 0)
145 
146 /* stop at end of transfer, no other reason */
147 #define TRANS_STATUS_BREAK_NONE		0
148 /* stop at end of spi_message */
149 #define TRANS_STATUS_BREAK_EOM			1
150 /* stop at end of spi_transfer if delay */
151 #define TRANS_STATUS_BREAK_DELAY		2
152 /* stop at end of spi_transfer if cs_change */
153 #define TRANS_STATUS_BREAK_CS_CHANGE		4
154 /* stop if we run out of bytes */
155 #define TRANS_STATUS_BREAK_NO_BYTES		8
156 
157 /* events that make us stop filling TX slots */
158 #define TRANS_STATUS_BREAK_TX (TRANS_STATUS_BREAK_EOM |		\
159 			       TRANS_STATUS_BREAK_DELAY |		\
160 			       TRANS_STATUS_BREAK_CS_CHANGE)
161 
162 /* events that make us deassert CS */
163 #define TRANS_STATUS_BREAK_DESELECT (TRANS_STATUS_BREAK_EOM |		\
164 				     TRANS_STATUS_BREAK_CS_CHANGE)
165 
166 /*
167  * Used for writing and reading data in the right order
168  * to TXRAM and RXRAM when used as 32-bit registers respectively
169  */
170 #define swap4bytes(__val) \
171 	((((__val) >> 24) & 0x000000FF) | (((__val) >>  8) & 0x0000FF00) | \
172 	 (((__val) <<  8) & 0x00FF0000) | (((__val) << 24) & 0xFF000000))
173 
174 struct bcm_qspi_parms {
175 	u32 speed_hz;
176 	u8 mode;
177 	u8 bits_per_word;
178 };
179 
180 struct bcm_xfer_mode {
181 	bool flex_mode;
182 	unsigned int width;
183 	unsigned int addrlen;
184 	unsigned int hp;
185 };
186 
187 enum base_type {
188 	MSPI,
189 	BSPI,
190 	CHIP_SELECT,
191 	BASEMAX,
192 };
193 
194 enum irq_source {
195 	SINGLE_L2,
196 	MUXED_L1,
197 };
198 
199 struct bcm_qspi_irq {
200 	const char *irq_name;
201 	const irq_handler_t irq_handler;
202 	int irq_source;
203 	u32 mask;
204 };
205 
206 struct bcm_qspi_dev_id {
207 	const struct bcm_qspi_irq *irqp;
208 	void *dev;
209 };
210 
211 
212 struct qspi_trans {
213 	struct spi_transfer *trans;
214 	int byte;
215 	bool mspi_last_trans;
216 };
217 
218 struct bcm_qspi {
219 	struct platform_device *pdev;
220 	struct spi_master *master;
221 	struct clk *clk;
222 	u32 base_clk;
223 	u32 max_speed_hz;
224 	void __iomem *base[BASEMAX];
225 
226 	/* Some SoCs provide custom interrupt status register(s) */
227 	struct bcm_qspi_soc_intc	*soc_intc;
228 
229 	struct bcm_qspi_parms last_parms;
230 	struct qspi_trans  trans_pos;
231 	int curr_cs;
232 	int bspi_maj_rev;
233 	int bspi_min_rev;
234 	int bspi_enabled;
235 	const struct spi_mem_op *bspi_rf_op;
236 	u32 bspi_rf_op_idx;
237 	u32 bspi_rf_op_len;
238 	u32 bspi_rf_op_status;
239 	struct bcm_xfer_mode xfer_mode;
240 	u32 s3_strap_override_ctrl;
241 	bool bspi_mode;
242 	bool big_endian;
243 	int num_irqs;
244 	struct bcm_qspi_dev_id *dev_ids;
245 	struct completion mspi_done;
246 	struct completion bspi_done;
247 	u8 mspi_maj_rev;
248 	u8 mspi_min_rev;
249 	bool mspi_spcr3_sysclk;
250 };
251 
252 static inline bool has_bspi(struct bcm_qspi *qspi)
253 {
254 	return qspi->bspi_mode;
255 }
256 
257 /* hardware supports spcr3 and fast baud-rate  */
258 static inline bool bcm_qspi_has_fastbr(struct bcm_qspi *qspi)
259 {
260 	if (!has_bspi(qspi) &&
261 	    ((qspi->mspi_maj_rev >= 1) &&
262 	     (qspi->mspi_min_rev >= 5)))
263 		return true;
264 
265 	return false;
266 }
267 
268 /* hardware supports sys clk 108Mhz  */
269 static inline bool bcm_qspi_has_sysclk_108(struct bcm_qspi *qspi)
270 {
271 	if (!has_bspi(qspi) && (qspi->mspi_spcr3_sysclk ||
272 	    ((qspi->mspi_maj_rev >= 1) &&
273 	     (qspi->mspi_min_rev >= 6))))
274 		return true;
275 
276 	return false;
277 }
278 
279 static inline int bcm_qspi_spbr_min(struct bcm_qspi *qspi)
280 {
281 	if (bcm_qspi_has_fastbr(qspi))
282 		return (bcm_qspi_has_sysclk_108(qspi) ? 4 : 1);
283 	else
284 		return 8;
285 }
286 
287 /* Read qspi controller register*/
288 static inline u32 bcm_qspi_read(struct bcm_qspi *qspi, enum base_type type,
289 				unsigned int offset)
290 {
291 	return bcm_qspi_readl(qspi->big_endian, qspi->base[type] + offset);
292 }
293 
294 /* Write qspi controller register*/
295 static inline void bcm_qspi_write(struct bcm_qspi *qspi, enum base_type type,
296 				  unsigned int offset, unsigned int data)
297 {
298 	bcm_qspi_writel(qspi->big_endian, data, qspi->base[type] + offset);
299 }
300 
301 /* BSPI helpers */
302 static int bcm_qspi_bspi_busy_poll(struct bcm_qspi *qspi)
303 {
304 	int i;
305 
306 	/* this should normally finish within 10us */
307 	for (i = 0; i < 1000; i++) {
308 		if (!(bcm_qspi_read(qspi, BSPI, BSPI_BUSY_STATUS) & 1))
309 			return 0;
310 		udelay(1);
311 	}
312 	dev_warn(&qspi->pdev->dev, "timeout waiting for !busy_status\n");
313 	return -EIO;
314 }
315 
316 static inline bool bcm_qspi_bspi_ver_three(struct bcm_qspi *qspi)
317 {
318 	if (qspi->bspi_maj_rev < 4)
319 		return true;
320 	return false;
321 }
322 
323 static void bcm_qspi_bspi_flush_prefetch_buffers(struct bcm_qspi *qspi)
324 {
325 	bcm_qspi_bspi_busy_poll(qspi);
326 	/* Force rising edge for the b0/b1 'flush' field */
327 	bcm_qspi_write(qspi, BSPI, BSPI_B0_CTRL, 1);
328 	bcm_qspi_write(qspi, BSPI, BSPI_B1_CTRL, 1);
329 	bcm_qspi_write(qspi, BSPI, BSPI_B0_CTRL, 0);
330 	bcm_qspi_write(qspi, BSPI, BSPI_B1_CTRL, 0);
331 }
332 
333 static int bcm_qspi_bspi_lr_is_fifo_empty(struct bcm_qspi *qspi)
334 {
335 	return (bcm_qspi_read(qspi, BSPI, BSPI_RAF_STATUS) &
336 				BSPI_RAF_STATUS_FIFO_EMPTY_MASK);
337 }
338 
339 static inline u32 bcm_qspi_bspi_lr_read_fifo(struct bcm_qspi *qspi)
340 {
341 	u32 data = bcm_qspi_read(qspi, BSPI, BSPI_RAF_READ_DATA);
342 
343 	/* BSPI v3 LR is LE only, convert data to host endianness */
344 	if (bcm_qspi_bspi_ver_three(qspi))
345 		data = le32_to_cpu(data);
346 
347 	return data;
348 }
349 
350 static inline void bcm_qspi_bspi_lr_start(struct bcm_qspi *qspi)
351 {
352 	bcm_qspi_bspi_busy_poll(qspi);
353 	bcm_qspi_write(qspi, BSPI, BSPI_RAF_CTRL,
354 		       BSPI_RAF_CTRL_START_MASK);
355 }
356 
357 static inline void bcm_qspi_bspi_lr_clear(struct bcm_qspi *qspi)
358 {
359 	bcm_qspi_write(qspi, BSPI, BSPI_RAF_CTRL,
360 		       BSPI_RAF_CTRL_CLEAR_MASK);
361 	bcm_qspi_bspi_flush_prefetch_buffers(qspi);
362 }
363 
364 static void bcm_qspi_bspi_lr_data_read(struct bcm_qspi *qspi)
365 {
366 	u32 *buf = (u32 *)qspi->bspi_rf_op->data.buf.in;
367 	u32 data = 0;
368 
369 	dev_dbg(&qspi->pdev->dev, "xfer %p rx %p rxlen %d\n", qspi->bspi_rf_op,
370 		qspi->bspi_rf_op->data.buf.in, qspi->bspi_rf_op_len);
371 	while (!bcm_qspi_bspi_lr_is_fifo_empty(qspi)) {
372 		data = bcm_qspi_bspi_lr_read_fifo(qspi);
373 		if (likely(qspi->bspi_rf_op_len >= 4) &&
374 		    IS_ALIGNED((uintptr_t)buf, 4)) {
375 			buf[qspi->bspi_rf_op_idx++] = data;
376 			qspi->bspi_rf_op_len -= 4;
377 		} else {
378 			/* Read out remaining bytes, make sure*/
379 			u8 *cbuf = (u8 *)&buf[qspi->bspi_rf_op_idx];
380 
381 			data = cpu_to_le32(data);
382 			while (qspi->bspi_rf_op_len) {
383 				*cbuf++ = (u8)data;
384 				data >>= 8;
385 				qspi->bspi_rf_op_len--;
386 			}
387 		}
388 	}
389 }
390 
391 static void bcm_qspi_bspi_set_xfer_params(struct bcm_qspi *qspi, u8 cmd_byte,
392 					  int bpp, int bpc, int flex_mode)
393 {
394 	bcm_qspi_write(qspi, BSPI, BSPI_FLEX_MODE_ENABLE, 0);
395 	bcm_qspi_write(qspi, BSPI, BSPI_BITS_PER_CYCLE, bpc);
396 	bcm_qspi_write(qspi, BSPI, BSPI_BITS_PER_PHASE, bpp);
397 	bcm_qspi_write(qspi, BSPI, BSPI_CMD_AND_MODE_BYTE, cmd_byte);
398 	bcm_qspi_write(qspi, BSPI, BSPI_FLEX_MODE_ENABLE, flex_mode);
399 }
400 
401 static int bcm_qspi_bspi_set_flex_mode(struct bcm_qspi *qspi,
402 				       const struct spi_mem_op *op, int hp)
403 {
404 	int bpc = 0, bpp = 0;
405 	u8 command = op->cmd.opcode;
406 	int width = op->data.buswidth ? op->data.buswidth : SPI_NBITS_SINGLE;
407 	int addrlen = op->addr.nbytes;
408 	int flex_mode = 1;
409 
410 	dev_dbg(&qspi->pdev->dev, "set flex mode w %x addrlen %x hp %d\n",
411 		width, addrlen, hp);
412 
413 	if (addrlen == BSPI_ADDRLEN_4BYTES)
414 		bpp = BSPI_BPP_ADDR_SELECT_MASK;
415 
416 	bpp |= (op->dummy.nbytes * 8) / op->dummy.buswidth;
417 
418 	switch (width) {
419 	case SPI_NBITS_SINGLE:
420 		if (addrlen == BSPI_ADDRLEN_3BYTES)
421 			/* default mode, does not need flex_cmd */
422 			flex_mode = 0;
423 		break;
424 	case SPI_NBITS_DUAL:
425 		bpc = 0x00000001;
426 		if (hp) {
427 			bpc |= 0x00010100; /* address and mode are 2-bit */
428 			bpp = BSPI_BPP_MODE_SELECT_MASK;
429 		}
430 		break;
431 	case SPI_NBITS_QUAD:
432 		bpc = 0x00000002;
433 		if (hp) {
434 			bpc |= 0x00020200; /* address and mode are 4-bit */
435 			bpp |= BSPI_BPP_MODE_SELECT_MASK;
436 		}
437 		break;
438 	default:
439 		return -EINVAL;
440 	}
441 
442 	bcm_qspi_bspi_set_xfer_params(qspi, command, bpp, bpc, flex_mode);
443 
444 	return 0;
445 }
446 
447 static int bcm_qspi_bspi_set_override(struct bcm_qspi *qspi,
448 				      const struct spi_mem_op *op, int hp)
449 {
450 	int width = op->data.buswidth ? op->data.buswidth : SPI_NBITS_SINGLE;
451 	int addrlen = op->addr.nbytes;
452 	u32 data = bcm_qspi_read(qspi, BSPI, BSPI_STRAP_OVERRIDE_CTRL);
453 
454 	dev_dbg(&qspi->pdev->dev, "set override mode w %x addrlen %x hp %d\n",
455 		width, addrlen, hp);
456 
457 	switch (width) {
458 	case SPI_NBITS_SINGLE:
459 		/* clear quad/dual mode */
460 		data &= ~(BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD |
461 			  BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL);
462 		break;
463 	case SPI_NBITS_QUAD:
464 		/* clear dual mode and set quad mode */
465 		data &= ~BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL;
466 		data |= BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD;
467 		break;
468 	case SPI_NBITS_DUAL:
469 		/* clear quad mode set dual mode */
470 		data &= ~BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD;
471 		data |= BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL;
472 		break;
473 	default:
474 		return -EINVAL;
475 	}
476 
477 	if (addrlen == BSPI_ADDRLEN_4BYTES)
478 		/* set 4byte mode*/
479 		data |= BSPI_STRAP_OVERRIDE_CTRL_ADDR_4BYTE;
480 	else
481 		/* clear 4 byte mode */
482 		data &= ~BSPI_STRAP_OVERRIDE_CTRL_ADDR_4BYTE;
483 
484 	/* set the override mode */
485 	data |=	BSPI_STRAP_OVERRIDE_CTRL_OVERRIDE;
486 	bcm_qspi_write(qspi, BSPI, BSPI_STRAP_OVERRIDE_CTRL, data);
487 	bcm_qspi_bspi_set_xfer_params(qspi, op->cmd.opcode, 0, 0, 0);
488 
489 	return 0;
490 }
491 
492 static int bcm_qspi_bspi_set_mode(struct bcm_qspi *qspi,
493 				  const struct spi_mem_op *op, int hp)
494 {
495 	int error = 0;
496 	int width = op->data.buswidth ? op->data.buswidth : SPI_NBITS_SINGLE;
497 	int addrlen = op->addr.nbytes;
498 
499 	/* default mode */
500 	qspi->xfer_mode.flex_mode = true;
501 
502 	if (!bcm_qspi_bspi_ver_three(qspi)) {
503 		u32 val, mask;
504 
505 		val = bcm_qspi_read(qspi, BSPI, BSPI_STRAP_OVERRIDE_CTRL);
506 		mask = BSPI_STRAP_OVERRIDE_CTRL_OVERRIDE;
507 		if (val & mask || qspi->s3_strap_override_ctrl & mask) {
508 			qspi->xfer_mode.flex_mode = false;
509 			bcm_qspi_write(qspi, BSPI, BSPI_FLEX_MODE_ENABLE, 0);
510 			error = bcm_qspi_bspi_set_override(qspi, op, hp);
511 		}
512 	}
513 
514 	if (qspi->xfer_mode.flex_mode)
515 		error = bcm_qspi_bspi_set_flex_mode(qspi, op, hp);
516 
517 	if (error) {
518 		dev_warn(&qspi->pdev->dev,
519 			 "INVALID COMBINATION: width=%d addrlen=%d hp=%d\n",
520 			 width, addrlen, hp);
521 	} else if (qspi->xfer_mode.width != width ||
522 		   qspi->xfer_mode.addrlen != addrlen ||
523 		   qspi->xfer_mode.hp != hp) {
524 		qspi->xfer_mode.width = width;
525 		qspi->xfer_mode.addrlen = addrlen;
526 		qspi->xfer_mode.hp = hp;
527 		dev_dbg(&qspi->pdev->dev,
528 			"cs:%d %d-lane output, %d-byte address%s\n",
529 			qspi->curr_cs,
530 			qspi->xfer_mode.width,
531 			qspi->xfer_mode.addrlen,
532 			qspi->xfer_mode.hp != -1 ? ", hp mode" : "");
533 	}
534 
535 	return error;
536 }
537 
538 static void bcm_qspi_enable_bspi(struct bcm_qspi *qspi)
539 {
540 	if (!has_bspi(qspi))
541 		return;
542 
543 	qspi->bspi_enabled = 1;
544 	if ((bcm_qspi_read(qspi, BSPI, BSPI_MAST_N_BOOT_CTRL) & 1) == 0)
545 		return;
546 
547 	bcm_qspi_bspi_flush_prefetch_buffers(qspi);
548 	udelay(1);
549 	bcm_qspi_write(qspi, BSPI, BSPI_MAST_N_BOOT_CTRL, 0);
550 	udelay(1);
551 }
552 
553 static void bcm_qspi_disable_bspi(struct bcm_qspi *qspi)
554 {
555 	if (!has_bspi(qspi))
556 		return;
557 
558 	qspi->bspi_enabled = 0;
559 	if ((bcm_qspi_read(qspi, BSPI, BSPI_MAST_N_BOOT_CTRL) & 1))
560 		return;
561 
562 	bcm_qspi_bspi_busy_poll(qspi);
563 	bcm_qspi_write(qspi, BSPI, BSPI_MAST_N_BOOT_CTRL, 1);
564 	udelay(1);
565 }
566 
567 static void bcm_qspi_chip_select(struct bcm_qspi *qspi, int cs)
568 {
569 	u32 rd = 0;
570 	u32 wr = 0;
571 
572 	if (qspi->base[CHIP_SELECT]) {
573 		rd = bcm_qspi_read(qspi, CHIP_SELECT, 0);
574 		wr = (rd & ~0xff) | (1 << cs);
575 		if (rd == wr)
576 			return;
577 		bcm_qspi_write(qspi, CHIP_SELECT, 0, wr);
578 		usleep_range(10, 20);
579 	}
580 
581 	dev_dbg(&qspi->pdev->dev, "using cs:%d\n", cs);
582 	qspi->curr_cs = cs;
583 }
584 
585 /* MSPI helpers */
586 static void bcm_qspi_hw_set_parms(struct bcm_qspi *qspi,
587 				  const struct bcm_qspi_parms *xp)
588 {
589 	u32 spcr, spbr = 0;
590 
591 	if (!qspi->mspi_maj_rev)
592 		/* legacy controller */
593 		spcr = MSPI_MASTER_BIT;
594 	else
595 		spcr = 0;
596 
597 	/*
598 	 * Bits per transfer.  BITS determines the number of data bits
599 	 * transferred if the command control bit (BITSE of a
600 	 * CDRAM Register) is equal to 1.
601 	 * If CDRAM BITSE is equal to 0, 8 data bits are transferred
602 	 * regardless
603 	 */
604 	if (xp->bits_per_word != 16 && xp->bits_per_word != 64)
605 		spcr |= xp->bits_per_word << MSPI_SPCR0_MSB_BITS_SHIFT;
606 
607 	spcr |= xp->mode & (MSPI_SPCR0_MSB_CPHA | MSPI_SPCR0_MSB_CPOL);
608 	bcm_qspi_write(qspi, MSPI, MSPI_SPCR0_MSB, spcr);
609 
610 	if (bcm_qspi_has_fastbr(qspi)) {
611 		spcr = 0;
612 
613 		/* enable fastbr */
614 		spcr |=	MSPI_SPCR3_FASTBR;
615 
616 		if (bcm_qspi_has_sysclk_108(qspi)) {
617 			/* SYSCLK_108 */
618 			spcr |= MSPI_SPCR3_SYSCLKSEL_108;
619 			qspi->base_clk = MSPI_BASE_FREQ * 4;
620 		}
621 
622 		if (xp->bits_per_word > 16) {
623 			/* data_reg_size 1 (64bit) */
624 			spcr |=	MSPI_SPCR3_DATA_REG_SZ;
625 			/* TxRx RAM data access mode 2 for 32B and set fastdt */
626 			spcr |=	MSPI_SPCR3_DAM_32BYTE  | MSPI_SPCR3_FASTDT;
627 			/*
628 			 *  Set length of delay after transfer
629 			 *  DTL from 0(256) to 1
630 			 */
631 			bcm_qspi_write(qspi, MSPI, MSPI_SPCR1_LSB, 1);
632 		} else {
633 			/* data_reg_size[8] = 0 */
634 			spcr &=	~(MSPI_SPCR3_DATA_REG_SZ);
635 
636 			/*
637 			 * TxRx RAM access mode 8B
638 			 * and disable fastdt
639 			 */
640 			spcr &= ~(MSPI_SPCR3_DAM_32BYTE);
641 		}
642 		bcm_qspi_write(qspi, MSPI, MSPI_SPCR3, spcr);
643 	}
644 
645 	if (xp->speed_hz)
646 		spbr = qspi->base_clk / (2 * xp->speed_hz);
647 
648 	spbr = clamp_val(spbr, bcm_qspi_spbr_min(qspi), QSPI_SPBR_MAX);
649 	bcm_qspi_write(qspi, MSPI, MSPI_SPCR0_LSB, spbr);
650 
651 	qspi->last_parms = *xp;
652 }
653 
654 static void bcm_qspi_update_parms(struct bcm_qspi *qspi,
655 				  struct spi_device *spi,
656 				  struct spi_transfer *trans)
657 {
658 	struct bcm_qspi_parms xp;
659 
660 	xp.speed_hz = trans->speed_hz;
661 	xp.bits_per_word = trans->bits_per_word;
662 	xp.mode = spi->mode;
663 
664 	bcm_qspi_hw_set_parms(qspi, &xp);
665 }
666 
667 static int bcm_qspi_setup(struct spi_device *spi)
668 {
669 	struct bcm_qspi_parms *xp;
670 
671 	if (spi->bits_per_word > 64)
672 		return -EINVAL;
673 
674 	xp = spi_get_ctldata(spi);
675 	if (!xp) {
676 		xp = kzalloc(sizeof(*xp), GFP_KERNEL);
677 		if (!xp)
678 			return -ENOMEM;
679 		spi_set_ctldata(spi, xp);
680 	}
681 	xp->speed_hz = spi->max_speed_hz;
682 	xp->mode = spi->mode;
683 
684 	if (spi->bits_per_word)
685 		xp->bits_per_word = spi->bits_per_word;
686 	else
687 		xp->bits_per_word = 8;
688 
689 	return 0;
690 }
691 
692 static bool bcm_qspi_mspi_transfer_is_last(struct bcm_qspi *qspi,
693 					   struct qspi_trans *qt)
694 {
695 	if (qt->mspi_last_trans &&
696 	    spi_transfer_is_last(qspi->master, qt->trans))
697 		return true;
698 	else
699 		return false;
700 }
701 
702 static int update_qspi_trans_byte_count(struct bcm_qspi *qspi,
703 					struct qspi_trans *qt, int flags)
704 {
705 	int ret = TRANS_STATUS_BREAK_NONE;
706 
707 	/* count the last transferred bytes */
708 	if (qt->trans->bits_per_word <= 8)
709 		qt->byte++;
710 	else if (qt->trans->bits_per_word <= 16)
711 		qt->byte += 2;
712 	else if (qt->trans->bits_per_word <= 32)
713 		qt->byte += 4;
714 	else if (qt->trans->bits_per_word <= 64)
715 		qt->byte += 8;
716 
717 	if (qt->byte >= qt->trans->len) {
718 		/* we're at the end of the spi_transfer */
719 		/* in TX mode, need to pause for a delay or CS change */
720 		if (qt->trans->delay.value &&
721 		    (flags & TRANS_STATUS_BREAK_DELAY))
722 			ret |= TRANS_STATUS_BREAK_DELAY;
723 		if (qt->trans->cs_change &&
724 		    (flags & TRANS_STATUS_BREAK_CS_CHANGE))
725 			ret |= TRANS_STATUS_BREAK_CS_CHANGE;
726 
727 		if (bcm_qspi_mspi_transfer_is_last(qspi, qt))
728 			ret |= TRANS_STATUS_BREAK_EOM;
729 		else
730 			ret |= TRANS_STATUS_BREAK_NO_BYTES;
731 
732 		qt->trans = NULL;
733 	}
734 
735 	dev_dbg(&qspi->pdev->dev, "trans %p len %d byte %d ret %x\n",
736 		qt->trans, qt->trans ? qt->trans->len : 0, qt->byte, ret);
737 	return ret;
738 }
739 
740 static inline u8 read_rxram_slot_u8(struct bcm_qspi *qspi, int slot)
741 {
742 	u32 slot_offset = MSPI_RXRAM + (slot << 3) + 0x4;
743 
744 	/* mask out reserved bits */
745 	return bcm_qspi_read(qspi, MSPI, slot_offset) & 0xff;
746 }
747 
748 static inline u16 read_rxram_slot_u16(struct bcm_qspi *qspi, int slot)
749 {
750 	u32 reg_offset = MSPI_RXRAM;
751 	u32 lsb_offset = reg_offset + (slot << 3) + 0x4;
752 	u32 msb_offset = reg_offset + (slot << 3);
753 
754 	return (bcm_qspi_read(qspi, MSPI, lsb_offset) & 0xff) |
755 		((bcm_qspi_read(qspi, MSPI, msb_offset) & 0xff) << 8);
756 }
757 
758 static inline u32 read_rxram_slot_u32(struct bcm_qspi *qspi, int slot)
759 {
760 	u32 reg_offset = MSPI_RXRAM;
761 	u32 offset = reg_offset + (slot << 3);
762 	u32 val;
763 
764 	val = bcm_qspi_read(qspi, MSPI, offset);
765 	val = swap4bytes(val);
766 
767 	return val;
768 }
769 
770 static inline u64 read_rxram_slot_u64(struct bcm_qspi *qspi, int slot)
771 {
772 	u32 reg_offset = MSPI_RXRAM;
773 	u32 lsb_offset = reg_offset + (slot << 3) + 0x4;
774 	u32 msb_offset = reg_offset + (slot << 3);
775 	u32 msb, lsb;
776 
777 	msb = bcm_qspi_read(qspi, MSPI, msb_offset);
778 	msb = swap4bytes(msb);
779 	lsb = bcm_qspi_read(qspi, MSPI, lsb_offset);
780 	lsb = swap4bytes(lsb);
781 
782 	return ((u64)msb << 32 | lsb);
783 }
784 
785 static void read_from_hw(struct bcm_qspi *qspi, int slots)
786 {
787 	struct qspi_trans tp;
788 	int slot;
789 
790 	bcm_qspi_disable_bspi(qspi);
791 
792 	if (slots > MSPI_NUM_CDRAM) {
793 		/* should never happen */
794 		dev_err(&qspi->pdev->dev, "%s: too many slots!\n", __func__);
795 		return;
796 	}
797 
798 	tp = qspi->trans_pos;
799 
800 	for (slot = 0; slot < slots; slot++) {
801 		if (tp.trans->bits_per_word <= 8) {
802 			u8 *buf = tp.trans->rx_buf;
803 
804 			if (buf)
805 				buf[tp.byte] = read_rxram_slot_u8(qspi, slot);
806 			dev_dbg(&qspi->pdev->dev, "RD %02x\n",
807 				buf ? buf[tp.byte] : 0x0);
808 		} else if (tp.trans->bits_per_word <= 16) {
809 			u16 *buf = tp.trans->rx_buf;
810 
811 			if (buf)
812 				buf[tp.byte / 2] = read_rxram_slot_u16(qspi,
813 								      slot);
814 			dev_dbg(&qspi->pdev->dev, "RD %04x\n",
815 				buf ? buf[tp.byte / 2] : 0x0);
816 		} else if (tp.trans->bits_per_word <= 32) {
817 			u32 *buf = tp.trans->rx_buf;
818 
819 			if (buf)
820 				buf[tp.byte / 4] = read_rxram_slot_u32(qspi,
821 								      slot);
822 			dev_dbg(&qspi->pdev->dev, "RD %08x\n",
823 				buf ? buf[tp.byte / 4] : 0x0);
824 
825 		} else if (tp.trans->bits_per_word <= 64) {
826 			u64 *buf = tp.trans->rx_buf;
827 
828 			if (buf)
829 				buf[tp.byte / 8] = read_rxram_slot_u64(qspi,
830 								      slot);
831 			dev_dbg(&qspi->pdev->dev, "RD %llx\n",
832 				buf ? buf[tp.byte / 8] : 0x0);
833 
834 
835 		}
836 
837 		update_qspi_trans_byte_count(qspi, &tp,
838 					     TRANS_STATUS_BREAK_NONE);
839 	}
840 
841 	qspi->trans_pos = tp;
842 }
843 
844 static inline void write_txram_slot_u8(struct bcm_qspi *qspi, int slot,
845 				       u8 val)
846 {
847 	u32 reg_offset = MSPI_TXRAM + (slot << 3);
848 
849 	/* mask out reserved bits */
850 	bcm_qspi_write(qspi, MSPI, reg_offset, val);
851 }
852 
853 static inline void write_txram_slot_u16(struct bcm_qspi *qspi, int slot,
854 					u16 val)
855 {
856 	u32 reg_offset = MSPI_TXRAM;
857 	u32 msb_offset = reg_offset + (slot << 3);
858 	u32 lsb_offset = reg_offset + (slot << 3) + 0x4;
859 
860 	bcm_qspi_write(qspi, MSPI, msb_offset, (val >> 8));
861 	bcm_qspi_write(qspi, MSPI, lsb_offset, (val & 0xff));
862 }
863 
864 static inline void write_txram_slot_u32(struct bcm_qspi *qspi, int slot,
865 					u32 val)
866 {
867 	u32 reg_offset = MSPI_TXRAM;
868 	u32 msb_offset = reg_offset + (slot << 3);
869 
870 	bcm_qspi_write(qspi, MSPI, msb_offset, swap4bytes(val));
871 }
872 
873 static inline void write_txram_slot_u64(struct bcm_qspi *qspi, int slot,
874 					u64 val)
875 {
876 	u32 reg_offset = MSPI_TXRAM;
877 	u32 msb_offset = reg_offset + (slot << 3);
878 	u32 lsb_offset = reg_offset + (slot << 3) + 0x4;
879 	u32 msb = upper_32_bits(val);
880 	u32 lsb = lower_32_bits(val);
881 
882 	bcm_qspi_write(qspi, MSPI, msb_offset, swap4bytes(msb));
883 	bcm_qspi_write(qspi, MSPI, lsb_offset, swap4bytes(lsb));
884 }
885 
886 static inline u32 read_cdram_slot(struct bcm_qspi *qspi, int slot)
887 {
888 	return bcm_qspi_read(qspi, MSPI, MSPI_CDRAM + (slot << 2));
889 }
890 
891 static inline void write_cdram_slot(struct bcm_qspi *qspi, int slot, u32 val)
892 {
893 	bcm_qspi_write(qspi, MSPI, (MSPI_CDRAM + (slot << 2)), val);
894 }
895 
896 /* Return number of slots written */
897 static int write_to_hw(struct bcm_qspi *qspi, struct spi_device *spi)
898 {
899 	struct qspi_trans tp;
900 	int slot = 0, tstatus = 0;
901 	u32 mspi_cdram = 0;
902 
903 	bcm_qspi_disable_bspi(qspi);
904 	tp = qspi->trans_pos;
905 	bcm_qspi_update_parms(qspi, spi, tp.trans);
906 
907 	/* Run until end of transfer or reached the max data */
908 	while (!tstatus && slot < MSPI_NUM_CDRAM) {
909 		mspi_cdram = MSPI_CDRAM_CONT_BIT;
910 		if (tp.trans->bits_per_word <= 8) {
911 			const u8 *buf = tp.trans->tx_buf;
912 			u8 val = buf ? buf[tp.byte] : 0x00;
913 
914 			write_txram_slot_u8(qspi, slot, val);
915 			dev_dbg(&qspi->pdev->dev, "WR %02x\n", val);
916 		} else if (tp.trans->bits_per_word <= 16) {
917 			const u16 *buf = tp.trans->tx_buf;
918 			u16 val = buf ? buf[tp.byte / 2] : 0x0000;
919 
920 			write_txram_slot_u16(qspi, slot, val);
921 			dev_dbg(&qspi->pdev->dev, "WR %04x\n", val);
922 		} else if (tp.trans->bits_per_word <= 32) {
923 			const u32 *buf = tp.trans->tx_buf;
924 			u32 val = buf ? buf[tp.byte/4] : 0x0;
925 
926 			write_txram_slot_u32(qspi, slot, val);
927 			dev_dbg(&qspi->pdev->dev, "WR %08x\n", val);
928 		} else if (tp.trans->bits_per_word <= 64) {
929 			const u64 *buf = tp.trans->tx_buf;
930 			u64 val = (buf ? buf[tp.byte/8] : 0x0);
931 
932 			/* use the length of delay from SPCR1_LSB */
933 			if (bcm_qspi_has_fastbr(qspi))
934 				mspi_cdram |= MSPI_CDRAM_DT_BIT;
935 
936 			write_txram_slot_u64(qspi, slot, val);
937 			dev_dbg(&qspi->pdev->dev, "WR %llx\n", val);
938 		}
939 
940 		mspi_cdram |= ((tp.trans->bits_per_word <= 8) ? 0 :
941 			       MSPI_CDRAM_BITSE_BIT);
942 
943 		if (has_bspi(qspi))
944 			mspi_cdram &= ~1;
945 		else
946 			mspi_cdram |= (~(1 << spi->chip_select) &
947 				       MSPI_CDRAM_PCS);
948 
949 		write_cdram_slot(qspi, slot, mspi_cdram);
950 
951 		tstatus = update_qspi_trans_byte_count(qspi, &tp,
952 						       TRANS_STATUS_BREAK_TX);
953 		slot++;
954 	}
955 
956 	if (!slot) {
957 		dev_err(&qspi->pdev->dev, "%s: no data to send?", __func__);
958 		goto done;
959 	}
960 
961 	dev_dbg(&qspi->pdev->dev, "submitting %d slots\n", slot);
962 	bcm_qspi_write(qspi, MSPI, MSPI_NEWQP, 0);
963 	bcm_qspi_write(qspi, MSPI, MSPI_ENDQP, slot - 1);
964 
965 	/*
966 	 *  case 1) EOM =1, cs_change =0: SSb inactive
967 	 *  case 2) EOM =1, cs_change =1: SSb stay active
968 	 *  case 3) EOM =0, cs_change =0: SSb stay active
969 	 *  case 4) EOM =0, cs_change =1: SSb inactive
970 	 */
971 	if (((tstatus & TRANS_STATUS_BREAK_DESELECT)
972 	     == TRANS_STATUS_BREAK_CS_CHANGE) ||
973 	    ((tstatus & TRANS_STATUS_BREAK_DESELECT)
974 	     == TRANS_STATUS_BREAK_EOM)) {
975 		mspi_cdram = read_cdram_slot(qspi, slot - 1) &
976 			~MSPI_CDRAM_CONT_BIT;
977 		write_cdram_slot(qspi, slot - 1, mspi_cdram);
978 	}
979 
980 	if (has_bspi(qspi))
981 		bcm_qspi_write(qspi, MSPI, MSPI_WRITE_LOCK, 1);
982 
983 	/* Must flush previous writes before starting MSPI operation */
984 	mb();
985 	/* Set cont | spe | spifie */
986 	bcm_qspi_write(qspi, MSPI, MSPI_SPCR2, 0xe0);
987 
988 done:
989 	return slot;
990 }
991 
992 static int bcm_qspi_bspi_exec_mem_op(struct spi_device *spi,
993 				     const struct spi_mem_op *op)
994 {
995 	struct bcm_qspi *qspi = spi_master_get_devdata(spi->master);
996 	u32 addr = 0, len, rdlen, len_words, from = 0;
997 	int ret = 0;
998 	unsigned long timeo = msecs_to_jiffies(100);
999 	struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
1000 
1001 	if (bcm_qspi_bspi_ver_three(qspi))
1002 		if (op->addr.nbytes == BSPI_ADDRLEN_4BYTES)
1003 			return -EIO;
1004 
1005 	from = op->addr.val;
1006 	if (!spi->cs_gpiod)
1007 		bcm_qspi_chip_select(qspi, spi->chip_select);
1008 	bcm_qspi_write(qspi, MSPI, MSPI_WRITE_LOCK, 0);
1009 
1010 	/*
1011 	 * when using flex mode we need to send
1012 	 * the upper address byte to bspi
1013 	 */
1014 	if (!bcm_qspi_bspi_ver_three(qspi)) {
1015 		addr = from & 0xff000000;
1016 		bcm_qspi_write(qspi, BSPI,
1017 			       BSPI_BSPI_FLASH_UPPER_ADDR_BYTE, addr);
1018 	}
1019 
1020 	if (!qspi->xfer_mode.flex_mode)
1021 		addr = from;
1022 	else
1023 		addr = from & 0x00ffffff;
1024 
1025 	if (bcm_qspi_bspi_ver_three(qspi) == true)
1026 		addr = (addr + 0xc00000) & 0xffffff;
1027 
1028 	/*
1029 	 * read into the entire buffer by breaking the reads
1030 	 * into RAF buffer read lengths
1031 	 */
1032 	len = op->data.nbytes;
1033 	qspi->bspi_rf_op_idx = 0;
1034 
1035 	do {
1036 		if (len > BSPI_READ_LENGTH)
1037 			rdlen = BSPI_READ_LENGTH;
1038 		else
1039 			rdlen = len;
1040 
1041 		reinit_completion(&qspi->bspi_done);
1042 		bcm_qspi_enable_bspi(qspi);
1043 		len_words = (rdlen + 3) >> 2;
1044 		qspi->bspi_rf_op = op;
1045 		qspi->bspi_rf_op_status = 0;
1046 		qspi->bspi_rf_op_len = rdlen;
1047 		dev_dbg(&qspi->pdev->dev,
1048 			"bspi xfr addr 0x%x len 0x%x", addr, rdlen);
1049 		bcm_qspi_write(qspi, BSPI, BSPI_RAF_START_ADDR, addr);
1050 		bcm_qspi_write(qspi, BSPI, BSPI_RAF_NUM_WORDS, len_words);
1051 		bcm_qspi_write(qspi, BSPI, BSPI_RAF_WATERMARK, 0);
1052 		if (qspi->soc_intc) {
1053 			/*
1054 			 * clear soc MSPI and BSPI interrupts and enable
1055 			 * BSPI interrupts.
1056 			 */
1057 			soc_intc->bcm_qspi_int_ack(soc_intc, MSPI_BSPI_DONE);
1058 			soc_intc->bcm_qspi_int_set(soc_intc, BSPI_DONE, true);
1059 		}
1060 
1061 		/* Must flush previous writes before starting BSPI operation */
1062 		mb();
1063 		bcm_qspi_bspi_lr_start(qspi);
1064 		if (!wait_for_completion_timeout(&qspi->bspi_done, timeo)) {
1065 			dev_err(&qspi->pdev->dev, "timeout waiting for BSPI\n");
1066 			ret = -ETIMEDOUT;
1067 			break;
1068 		}
1069 
1070 		/* set msg return length */
1071 		addr += rdlen;
1072 		len -= rdlen;
1073 	} while (len);
1074 
1075 	return ret;
1076 }
1077 
1078 static int bcm_qspi_transfer_one(struct spi_master *master,
1079 				 struct spi_device *spi,
1080 				 struct spi_transfer *trans)
1081 {
1082 	struct bcm_qspi *qspi = spi_master_get_devdata(master);
1083 	int slots;
1084 	unsigned long timeo = msecs_to_jiffies(100);
1085 
1086 	if (!spi->cs_gpiod)
1087 		bcm_qspi_chip_select(qspi, spi->chip_select);
1088 	qspi->trans_pos.trans = trans;
1089 	qspi->trans_pos.byte = 0;
1090 
1091 	while (qspi->trans_pos.byte < trans->len) {
1092 		reinit_completion(&qspi->mspi_done);
1093 
1094 		slots = write_to_hw(qspi, spi);
1095 		if (!wait_for_completion_timeout(&qspi->mspi_done, timeo)) {
1096 			dev_err(&qspi->pdev->dev, "timeout waiting for MSPI\n");
1097 			return -ETIMEDOUT;
1098 		}
1099 
1100 		read_from_hw(qspi, slots);
1101 	}
1102 	bcm_qspi_enable_bspi(qspi);
1103 
1104 	return 0;
1105 }
1106 
1107 static int bcm_qspi_mspi_exec_mem_op(struct spi_device *spi,
1108 				     const struct spi_mem_op *op)
1109 {
1110 	struct spi_master *master = spi->master;
1111 	struct bcm_qspi *qspi = spi_master_get_devdata(master);
1112 	struct spi_transfer t[2];
1113 	u8 cmd[6] = { };
1114 	int ret, i;
1115 
1116 	memset(cmd, 0, sizeof(cmd));
1117 	memset(t, 0, sizeof(t));
1118 
1119 	/* tx */
1120 	/* opcode is in cmd[0] */
1121 	cmd[0] = op->cmd.opcode;
1122 	for (i = 0; i < op->addr.nbytes; i++)
1123 		cmd[1 + i] = op->addr.val >> (8 * (op->addr.nbytes - i - 1));
1124 
1125 	t[0].tx_buf = cmd;
1126 	t[0].len = op->addr.nbytes + op->dummy.nbytes + 1;
1127 	t[0].bits_per_word = spi->bits_per_word;
1128 	t[0].tx_nbits = op->cmd.buswidth;
1129 	/* lets mspi know that this is not last transfer */
1130 	qspi->trans_pos.mspi_last_trans = false;
1131 	ret = bcm_qspi_transfer_one(master, spi, &t[0]);
1132 
1133 	/* rx */
1134 	qspi->trans_pos.mspi_last_trans = true;
1135 	if (!ret) {
1136 		/* rx */
1137 		t[1].rx_buf = op->data.buf.in;
1138 		t[1].len = op->data.nbytes;
1139 		t[1].rx_nbits =  op->data.buswidth;
1140 		t[1].bits_per_word = spi->bits_per_word;
1141 		ret = bcm_qspi_transfer_one(master, spi, &t[1]);
1142 	}
1143 
1144 	return ret;
1145 }
1146 
1147 static int bcm_qspi_exec_mem_op(struct spi_mem *mem,
1148 				const struct spi_mem_op *op)
1149 {
1150 	struct spi_device *spi = mem->spi;
1151 	struct bcm_qspi *qspi = spi_master_get_devdata(spi->master);
1152 	int ret = 0;
1153 	bool mspi_read = false;
1154 	u32 addr = 0, len;
1155 	u_char *buf;
1156 
1157 	if (!op->data.nbytes || !op->addr.nbytes || op->addr.nbytes > 4 ||
1158 	    op->data.dir != SPI_MEM_DATA_IN)
1159 		return -ENOTSUPP;
1160 
1161 	buf = op->data.buf.in;
1162 	addr = op->addr.val;
1163 	len = op->data.nbytes;
1164 
1165 	if (bcm_qspi_bspi_ver_three(qspi) == true) {
1166 		/*
1167 		 * The address coming into this function is a raw flash offset.
1168 		 * But for BSPI <= V3, we need to convert it to a remapped BSPI
1169 		 * address. If it crosses a 4MB boundary, just revert back to
1170 		 * using MSPI.
1171 		 */
1172 		addr = (addr + 0xc00000) & 0xffffff;
1173 
1174 		if ((~ADDR_4MB_MASK & addr) ^
1175 		    (~ADDR_4MB_MASK & (addr + len - 1)))
1176 			mspi_read = true;
1177 	}
1178 
1179 	/* non-aligned and very short transfers are handled by MSPI */
1180 	if (!IS_ALIGNED((uintptr_t)addr, 4) || !IS_ALIGNED((uintptr_t)buf, 4) ||
1181 	    len < 4)
1182 		mspi_read = true;
1183 
1184 	if (mspi_read)
1185 		return bcm_qspi_mspi_exec_mem_op(spi, op);
1186 
1187 	ret = bcm_qspi_bspi_set_mode(qspi, op, 0);
1188 
1189 	if (!ret)
1190 		ret = bcm_qspi_bspi_exec_mem_op(spi, op);
1191 
1192 	return ret;
1193 }
1194 
1195 static void bcm_qspi_cleanup(struct spi_device *spi)
1196 {
1197 	struct bcm_qspi_parms *xp = spi_get_ctldata(spi);
1198 
1199 	kfree(xp);
1200 }
1201 
1202 static irqreturn_t bcm_qspi_mspi_l2_isr(int irq, void *dev_id)
1203 {
1204 	struct bcm_qspi_dev_id *qspi_dev_id = dev_id;
1205 	struct bcm_qspi *qspi = qspi_dev_id->dev;
1206 	u32 status = bcm_qspi_read(qspi, MSPI, MSPI_MSPI_STATUS);
1207 
1208 	if (status & MSPI_MSPI_STATUS_SPIF) {
1209 		struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
1210 		/* clear interrupt */
1211 		status &= ~MSPI_MSPI_STATUS_SPIF;
1212 		bcm_qspi_write(qspi, MSPI, MSPI_MSPI_STATUS, status);
1213 		if (qspi->soc_intc)
1214 			soc_intc->bcm_qspi_int_ack(soc_intc, MSPI_DONE);
1215 		complete(&qspi->mspi_done);
1216 		return IRQ_HANDLED;
1217 	}
1218 
1219 	return IRQ_NONE;
1220 }
1221 
1222 static irqreturn_t bcm_qspi_bspi_lr_l2_isr(int irq, void *dev_id)
1223 {
1224 	struct bcm_qspi_dev_id *qspi_dev_id = dev_id;
1225 	struct bcm_qspi *qspi = qspi_dev_id->dev;
1226 	struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
1227 	u32 status = qspi_dev_id->irqp->mask;
1228 
1229 	if (qspi->bspi_enabled && qspi->bspi_rf_op) {
1230 		bcm_qspi_bspi_lr_data_read(qspi);
1231 		if (qspi->bspi_rf_op_len == 0) {
1232 			qspi->bspi_rf_op = NULL;
1233 			if (qspi->soc_intc) {
1234 				/* disable soc BSPI interrupt */
1235 				soc_intc->bcm_qspi_int_set(soc_intc, BSPI_DONE,
1236 							   false);
1237 				/* indicate done */
1238 				status = INTR_BSPI_LR_SESSION_DONE_MASK;
1239 			}
1240 
1241 			if (qspi->bspi_rf_op_status)
1242 				bcm_qspi_bspi_lr_clear(qspi);
1243 			else
1244 				bcm_qspi_bspi_flush_prefetch_buffers(qspi);
1245 		}
1246 
1247 		if (qspi->soc_intc)
1248 			/* clear soc BSPI interrupt */
1249 			soc_intc->bcm_qspi_int_ack(soc_intc, BSPI_DONE);
1250 	}
1251 
1252 	status &= INTR_BSPI_LR_SESSION_DONE_MASK;
1253 	if (qspi->bspi_enabled && status && qspi->bspi_rf_op_len == 0)
1254 		complete(&qspi->bspi_done);
1255 
1256 	return IRQ_HANDLED;
1257 }
1258 
1259 static irqreturn_t bcm_qspi_bspi_lr_err_l2_isr(int irq, void *dev_id)
1260 {
1261 	struct bcm_qspi_dev_id *qspi_dev_id = dev_id;
1262 	struct bcm_qspi *qspi = qspi_dev_id->dev;
1263 	struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
1264 
1265 	dev_err(&qspi->pdev->dev, "BSPI INT error\n");
1266 	qspi->bspi_rf_op_status = -EIO;
1267 	if (qspi->soc_intc)
1268 		/* clear soc interrupt */
1269 		soc_intc->bcm_qspi_int_ack(soc_intc, BSPI_ERR);
1270 
1271 	complete(&qspi->bspi_done);
1272 	return IRQ_HANDLED;
1273 }
1274 
1275 static irqreturn_t bcm_qspi_l1_isr(int irq, void *dev_id)
1276 {
1277 	struct bcm_qspi_dev_id *qspi_dev_id = dev_id;
1278 	struct bcm_qspi *qspi = qspi_dev_id->dev;
1279 	struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
1280 	irqreturn_t ret = IRQ_NONE;
1281 
1282 	if (soc_intc) {
1283 		u32 status = soc_intc->bcm_qspi_get_int_status(soc_intc);
1284 
1285 		if (status & MSPI_DONE)
1286 			ret = bcm_qspi_mspi_l2_isr(irq, dev_id);
1287 		else if (status & BSPI_DONE)
1288 			ret = bcm_qspi_bspi_lr_l2_isr(irq, dev_id);
1289 		else if (status & BSPI_ERR)
1290 			ret = bcm_qspi_bspi_lr_err_l2_isr(irq, dev_id);
1291 	}
1292 
1293 	return ret;
1294 }
1295 
1296 static const struct bcm_qspi_irq qspi_irq_tab[] = {
1297 	{
1298 		.irq_name = "spi_lr_fullness_reached",
1299 		.irq_handler = bcm_qspi_bspi_lr_l2_isr,
1300 		.mask = INTR_BSPI_LR_FULLNESS_REACHED_MASK,
1301 	},
1302 	{
1303 		.irq_name = "spi_lr_session_aborted",
1304 		.irq_handler = bcm_qspi_bspi_lr_err_l2_isr,
1305 		.mask = INTR_BSPI_LR_SESSION_ABORTED_MASK,
1306 	},
1307 	{
1308 		.irq_name = "spi_lr_impatient",
1309 		.irq_handler = bcm_qspi_bspi_lr_err_l2_isr,
1310 		.mask = INTR_BSPI_LR_IMPATIENT_MASK,
1311 	},
1312 	{
1313 		.irq_name = "spi_lr_session_done",
1314 		.irq_handler = bcm_qspi_bspi_lr_l2_isr,
1315 		.mask = INTR_BSPI_LR_SESSION_DONE_MASK,
1316 	},
1317 #ifdef QSPI_INT_DEBUG
1318 	/* this interrupt is for debug purposes only, dont request irq */
1319 	{
1320 		.irq_name = "spi_lr_overread",
1321 		.irq_handler = bcm_qspi_bspi_lr_err_l2_isr,
1322 		.mask = INTR_BSPI_LR_OVERREAD_MASK,
1323 	},
1324 #endif
1325 	{
1326 		.irq_name = "mspi_done",
1327 		.irq_handler = bcm_qspi_mspi_l2_isr,
1328 		.mask = INTR_MSPI_DONE_MASK,
1329 	},
1330 	{
1331 		.irq_name = "mspi_halted",
1332 		.irq_handler = bcm_qspi_mspi_l2_isr,
1333 		.mask = INTR_MSPI_HALTED_MASK,
1334 	},
1335 	{
1336 		/* single muxed L1 interrupt source */
1337 		.irq_name = "spi_l1_intr",
1338 		.irq_handler = bcm_qspi_l1_isr,
1339 		.irq_source = MUXED_L1,
1340 		.mask = QSPI_INTERRUPTS_ALL,
1341 	},
1342 };
1343 
1344 static void bcm_qspi_bspi_init(struct bcm_qspi *qspi)
1345 {
1346 	u32 val = 0;
1347 
1348 	val = bcm_qspi_read(qspi, BSPI, BSPI_REVISION_ID);
1349 	qspi->bspi_maj_rev = (val >> 8) & 0xff;
1350 	qspi->bspi_min_rev = val & 0xff;
1351 	if (!(bcm_qspi_bspi_ver_three(qspi))) {
1352 		/* Force mapping of BSPI address -> flash offset */
1353 		bcm_qspi_write(qspi, BSPI, BSPI_BSPI_XOR_VALUE, 0);
1354 		bcm_qspi_write(qspi, BSPI, BSPI_BSPI_XOR_ENABLE, 1);
1355 	}
1356 	qspi->bspi_enabled = 1;
1357 	bcm_qspi_disable_bspi(qspi);
1358 	bcm_qspi_write(qspi, BSPI, BSPI_B0_CTRL, 0);
1359 	bcm_qspi_write(qspi, BSPI, BSPI_B1_CTRL, 0);
1360 }
1361 
1362 static void bcm_qspi_hw_init(struct bcm_qspi *qspi)
1363 {
1364 	struct bcm_qspi_parms parms;
1365 
1366 	bcm_qspi_write(qspi, MSPI, MSPI_SPCR1_LSB, 0);
1367 	bcm_qspi_write(qspi, MSPI, MSPI_SPCR1_MSB, 0);
1368 	bcm_qspi_write(qspi, MSPI, MSPI_NEWQP, 0);
1369 	bcm_qspi_write(qspi, MSPI, MSPI_ENDQP, 0);
1370 	bcm_qspi_write(qspi, MSPI, MSPI_SPCR2, 0x20);
1371 
1372 	parms.mode = SPI_MODE_3;
1373 	parms.bits_per_word = 8;
1374 	parms.speed_hz = qspi->max_speed_hz;
1375 	bcm_qspi_hw_set_parms(qspi, &parms);
1376 
1377 	if (has_bspi(qspi))
1378 		bcm_qspi_bspi_init(qspi);
1379 }
1380 
1381 static void bcm_qspi_hw_uninit(struct bcm_qspi *qspi)
1382 {
1383 	u32 status = bcm_qspi_read(qspi, MSPI, MSPI_MSPI_STATUS);
1384 
1385 	bcm_qspi_write(qspi, MSPI, MSPI_SPCR2, 0);
1386 	if (has_bspi(qspi))
1387 		bcm_qspi_write(qspi, MSPI, MSPI_WRITE_LOCK, 0);
1388 
1389 	/* clear interrupt */
1390 	bcm_qspi_write(qspi, MSPI, MSPI_MSPI_STATUS, status & ~1);
1391 }
1392 
1393 static const struct spi_controller_mem_ops bcm_qspi_mem_ops = {
1394 	.exec_op = bcm_qspi_exec_mem_op,
1395 };
1396 
1397 struct bcm_qspi_data {
1398 	bool	has_mspi_rev;
1399 	bool	has_spcr3_sysclk;
1400 };
1401 
1402 static const struct bcm_qspi_data bcm_qspi_no_rev_data = {
1403 	.has_mspi_rev	= false,
1404 	.has_spcr3_sysclk = false,
1405 };
1406 
1407 static const struct bcm_qspi_data bcm_qspi_rev_data = {
1408 	.has_mspi_rev	= true,
1409 	.has_spcr3_sysclk = false,
1410 };
1411 
1412 static const struct bcm_qspi_data bcm_qspi_spcr3_data = {
1413 	.has_mspi_rev	= true,
1414 	.has_spcr3_sysclk = true,
1415 };
1416 
1417 static const struct of_device_id bcm_qspi_of_match[] = {
1418 	{
1419 		.compatible = "brcm,spi-bcm7445-qspi",
1420 		.data = &bcm_qspi_rev_data,
1421 
1422 	},
1423 	{
1424 		.compatible = "brcm,spi-bcm-qspi",
1425 		.data = &bcm_qspi_no_rev_data,
1426 	},
1427 	{
1428 		.compatible = "brcm,spi-bcm7216-qspi",
1429 		.data = &bcm_qspi_spcr3_data,
1430 	},
1431 	{
1432 		.compatible = "brcm,spi-bcm7278-qspi",
1433 		.data = &bcm_qspi_spcr3_data,
1434 	},
1435 	{},
1436 };
1437 MODULE_DEVICE_TABLE(of, bcm_qspi_of_match);
1438 
1439 int bcm_qspi_probe(struct platform_device *pdev,
1440 		   struct bcm_qspi_soc_intc *soc_intc)
1441 {
1442 	const struct of_device_id *of_id = NULL;
1443 	const struct bcm_qspi_data *data;
1444 	struct device *dev = &pdev->dev;
1445 	struct bcm_qspi *qspi;
1446 	struct spi_master *master;
1447 	struct resource *res;
1448 	int irq, ret = 0, num_ints = 0;
1449 	u32 val;
1450 	u32 rev = 0;
1451 	const char *name = NULL;
1452 	int num_irqs = ARRAY_SIZE(qspi_irq_tab);
1453 
1454 	/* We only support device-tree instantiation */
1455 	if (!dev->of_node)
1456 		return -ENODEV;
1457 
1458 	of_id = of_match_node(bcm_qspi_of_match, dev->of_node);
1459 	if (!of_id)
1460 		return -ENODEV;
1461 
1462 	data = of_id->data;
1463 
1464 	master = devm_spi_alloc_master(dev, sizeof(struct bcm_qspi));
1465 	if (!master) {
1466 		dev_err(dev, "error allocating spi_master\n");
1467 		return -ENOMEM;
1468 	}
1469 
1470 	qspi = spi_master_get_devdata(master);
1471 
1472 	qspi->clk = devm_clk_get_optional(&pdev->dev, NULL);
1473 	if (IS_ERR(qspi->clk))
1474 		return PTR_ERR(qspi->clk);
1475 
1476 	qspi->pdev = pdev;
1477 	qspi->trans_pos.trans = NULL;
1478 	qspi->trans_pos.byte = 0;
1479 	qspi->trans_pos.mspi_last_trans = true;
1480 	qspi->master = master;
1481 
1482 	master->bus_num = -1;
1483 	master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_RX_DUAL | SPI_RX_QUAD;
1484 	master->setup = bcm_qspi_setup;
1485 	master->transfer_one = bcm_qspi_transfer_one;
1486 	master->mem_ops = &bcm_qspi_mem_ops;
1487 	master->cleanup = bcm_qspi_cleanup;
1488 	master->dev.of_node = dev->of_node;
1489 	master->num_chipselect = NUM_CHIPSELECT;
1490 	master->use_gpio_descriptors = true;
1491 
1492 	qspi->big_endian = of_device_is_big_endian(dev->of_node);
1493 
1494 	if (!of_property_read_u32(dev->of_node, "num-cs", &val))
1495 		master->num_chipselect = val;
1496 
1497 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hif_mspi");
1498 	if (!res)
1499 		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1500 						   "mspi");
1501 
1502 	if (res) {
1503 		qspi->base[MSPI]  = devm_ioremap_resource(dev, res);
1504 		if (IS_ERR(qspi->base[MSPI]))
1505 			return PTR_ERR(qspi->base[MSPI]);
1506 	} else {
1507 		return 0;
1508 	}
1509 
1510 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "bspi");
1511 	if (res) {
1512 		qspi->base[BSPI]  = devm_ioremap_resource(dev, res);
1513 		if (IS_ERR(qspi->base[BSPI]))
1514 			return PTR_ERR(qspi->base[BSPI]);
1515 		qspi->bspi_mode = true;
1516 	} else {
1517 		qspi->bspi_mode = false;
1518 	}
1519 
1520 	dev_info(dev, "using %smspi mode\n", qspi->bspi_mode ? "bspi-" : "");
1521 
1522 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs_reg");
1523 	if (res) {
1524 		qspi->base[CHIP_SELECT]  = devm_ioremap_resource(dev, res);
1525 		if (IS_ERR(qspi->base[CHIP_SELECT]))
1526 			return PTR_ERR(qspi->base[CHIP_SELECT]);
1527 	}
1528 
1529 	qspi->dev_ids = kcalloc(num_irqs, sizeof(struct bcm_qspi_dev_id),
1530 				GFP_KERNEL);
1531 	if (!qspi->dev_ids)
1532 		return -ENOMEM;
1533 
1534 	/*
1535 	 * Some SoCs integrate spi controller (e.g., its interrupt bits)
1536 	 * in specific ways
1537 	 */
1538 	if (soc_intc) {
1539 		qspi->soc_intc = soc_intc;
1540 		soc_intc->bcm_qspi_int_set(soc_intc, MSPI_DONE, true);
1541 	} else {
1542 		qspi->soc_intc = NULL;
1543 	}
1544 
1545 	if (qspi->clk) {
1546 		ret = clk_prepare_enable(qspi->clk);
1547 		if (ret) {
1548 			dev_err(dev, "failed to prepare clock\n");
1549 			goto qspi_probe_err;
1550 		}
1551 		qspi->base_clk = clk_get_rate(qspi->clk);
1552 	} else {
1553 		qspi->base_clk = MSPI_BASE_FREQ;
1554 	}
1555 
1556 	if (data->has_mspi_rev) {
1557 		rev = bcm_qspi_read(qspi, MSPI, MSPI_REV);
1558 		/* some older revs do not have a MSPI_REV register */
1559 		if ((rev & 0xff) == 0xff)
1560 			rev = 0;
1561 	}
1562 
1563 	qspi->mspi_maj_rev = (rev >> 4) & 0xf;
1564 	qspi->mspi_min_rev = rev & 0xf;
1565 	qspi->mspi_spcr3_sysclk = data->has_spcr3_sysclk;
1566 
1567 	qspi->max_speed_hz = qspi->base_clk / (bcm_qspi_spbr_min(qspi) * 2);
1568 
1569 	/*
1570 	 * On SW resets it is possible to have the mask still enabled
1571 	 * Need to disable the mask and clear the status while we init
1572 	 */
1573 	bcm_qspi_hw_uninit(qspi);
1574 
1575 	for (val = 0; val < num_irqs; val++) {
1576 		irq = -1;
1577 		name = qspi_irq_tab[val].irq_name;
1578 		if (qspi_irq_tab[val].irq_source == SINGLE_L2) {
1579 			/* get the l2 interrupts */
1580 			irq = platform_get_irq_byname_optional(pdev, name);
1581 		} else if (!num_ints && soc_intc) {
1582 			/* all mspi, bspi intrs muxed to one L1 intr */
1583 			irq = platform_get_irq(pdev, 0);
1584 		}
1585 
1586 		if (irq  >= 0) {
1587 			ret = devm_request_irq(&pdev->dev, irq,
1588 					       qspi_irq_tab[val].irq_handler, 0,
1589 					       name,
1590 					       &qspi->dev_ids[val]);
1591 			if (ret < 0) {
1592 				dev_err(&pdev->dev, "IRQ %s not found\n", name);
1593 				goto qspi_probe_err;
1594 			}
1595 
1596 			qspi->dev_ids[val].dev = qspi;
1597 			qspi->dev_ids[val].irqp = &qspi_irq_tab[val];
1598 			num_ints++;
1599 			dev_dbg(&pdev->dev, "registered IRQ %s %d\n",
1600 				qspi_irq_tab[val].irq_name,
1601 				irq);
1602 		}
1603 	}
1604 
1605 	if (!num_ints) {
1606 		dev_err(&pdev->dev, "no IRQs registered, cannot init driver\n");
1607 		ret = -EINVAL;
1608 		goto qspi_probe_err;
1609 	}
1610 
1611 	bcm_qspi_hw_init(qspi);
1612 	init_completion(&qspi->mspi_done);
1613 	init_completion(&qspi->bspi_done);
1614 	qspi->curr_cs = -1;
1615 
1616 	platform_set_drvdata(pdev, qspi);
1617 
1618 	qspi->xfer_mode.width = -1;
1619 	qspi->xfer_mode.addrlen = -1;
1620 	qspi->xfer_mode.hp = -1;
1621 
1622 	ret = spi_register_master(master);
1623 	if (ret < 0) {
1624 		dev_err(dev, "can't register master\n");
1625 		goto qspi_reg_err;
1626 	}
1627 
1628 	return 0;
1629 
1630 qspi_reg_err:
1631 	bcm_qspi_hw_uninit(qspi);
1632 	clk_disable_unprepare(qspi->clk);
1633 qspi_probe_err:
1634 	kfree(qspi->dev_ids);
1635 	return ret;
1636 }
1637 /* probe function to be called by SoC specific platform driver probe */
1638 EXPORT_SYMBOL_GPL(bcm_qspi_probe);
1639 
1640 int bcm_qspi_remove(struct platform_device *pdev)
1641 {
1642 	struct bcm_qspi *qspi = platform_get_drvdata(pdev);
1643 
1644 	spi_unregister_master(qspi->master);
1645 	bcm_qspi_hw_uninit(qspi);
1646 	clk_disable_unprepare(qspi->clk);
1647 	kfree(qspi->dev_ids);
1648 
1649 	return 0;
1650 }
1651 /* function to be called by SoC specific platform driver remove() */
1652 EXPORT_SYMBOL_GPL(bcm_qspi_remove);
1653 
1654 static int __maybe_unused bcm_qspi_suspend(struct device *dev)
1655 {
1656 	struct bcm_qspi *qspi = dev_get_drvdata(dev);
1657 
1658 	/* store the override strap value */
1659 	if (!bcm_qspi_bspi_ver_three(qspi))
1660 		qspi->s3_strap_override_ctrl =
1661 			bcm_qspi_read(qspi, BSPI, BSPI_STRAP_OVERRIDE_CTRL);
1662 
1663 	spi_master_suspend(qspi->master);
1664 	clk_disable_unprepare(qspi->clk);
1665 	bcm_qspi_hw_uninit(qspi);
1666 
1667 	return 0;
1668 };
1669 
1670 static int __maybe_unused bcm_qspi_resume(struct device *dev)
1671 {
1672 	struct bcm_qspi *qspi = dev_get_drvdata(dev);
1673 	int ret = 0;
1674 
1675 	bcm_qspi_hw_init(qspi);
1676 	bcm_qspi_chip_select(qspi, qspi->curr_cs);
1677 	if (qspi->soc_intc)
1678 		/* enable MSPI interrupt */
1679 		qspi->soc_intc->bcm_qspi_int_set(qspi->soc_intc, MSPI_DONE,
1680 						 true);
1681 
1682 	ret = clk_prepare_enable(qspi->clk);
1683 	if (!ret)
1684 		spi_master_resume(qspi->master);
1685 
1686 	return ret;
1687 }
1688 
1689 SIMPLE_DEV_PM_OPS(bcm_qspi_pm_ops, bcm_qspi_suspend, bcm_qspi_resume);
1690 
1691 /* pm_ops to be called by SoC specific platform driver */
1692 EXPORT_SYMBOL_GPL(bcm_qspi_pm_ops);
1693 
1694 MODULE_AUTHOR("Kamal Dasu");
1695 MODULE_DESCRIPTION("Broadcom QSPI driver");
1696 MODULE_LICENSE("GPL v2");
1697 MODULE_ALIAS("platform:" DRIVER_NAME);
1698