xref: /openbmc/linux/drivers/spi/spi-bcm-qspi.c (revision 7d7ae873b5e0f46d19e5dc818d1a7809e4b7cc81)
1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * Driver for Broadcom BRCMSTB, NSP,  NS2, Cygnus SPI Controllers
4   *
5   * Copyright 2016 Broadcom
6   */
7  
8  #include <linux/clk.h>
9  #include <linux/delay.h>
10  #include <linux/device.h>
11  #include <linux/init.h>
12  #include <linux/interrupt.h>
13  #include <linux/io.h>
14  #include <linux/ioport.h>
15  #include <linux/kernel.h>
16  #include <linux/module.h>
17  #include <linux/of.h>
18  #include <linux/of_irq.h>
19  #include <linux/platform_device.h>
20  #include <linux/slab.h>
21  #include <linux/spi/spi.h>
22  #include <linux/mtd/spi-nor.h>
23  #include <linux/sysfs.h>
24  #include <linux/types.h>
25  #include "spi-bcm-qspi.h"
26  
27  #define DRIVER_NAME "bcm_qspi"
28  
29  
30  /* BSPI register offsets */
31  #define BSPI_REVISION_ID			0x000
32  #define BSPI_SCRATCH				0x004
33  #define BSPI_MAST_N_BOOT_CTRL			0x008
34  #define BSPI_BUSY_STATUS			0x00c
35  #define BSPI_INTR_STATUS			0x010
36  #define BSPI_B0_STATUS				0x014
37  #define BSPI_B0_CTRL				0x018
38  #define BSPI_B1_STATUS				0x01c
39  #define BSPI_B1_CTRL				0x020
40  #define BSPI_STRAP_OVERRIDE_CTRL		0x024
41  #define BSPI_FLEX_MODE_ENABLE			0x028
42  #define BSPI_BITS_PER_CYCLE			0x02c
43  #define BSPI_BITS_PER_PHASE			0x030
44  #define BSPI_CMD_AND_MODE_BYTE			0x034
45  #define BSPI_BSPI_FLASH_UPPER_ADDR_BYTE	0x038
46  #define BSPI_BSPI_XOR_VALUE			0x03c
47  #define BSPI_BSPI_XOR_ENABLE			0x040
48  #define BSPI_BSPI_PIO_MODE_ENABLE		0x044
49  #define BSPI_BSPI_PIO_IODIR			0x048
50  #define BSPI_BSPI_PIO_DATA			0x04c
51  
52  /* RAF register offsets */
53  #define BSPI_RAF_START_ADDR			0x100
54  #define BSPI_RAF_NUM_WORDS			0x104
55  #define BSPI_RAF_CTRL				0x108
56  #define BSPI_RAF_FULLNESS			0x10c
57  #define BSPI_RAF_WATERMARK			0x110
58  #define BSPI_RAF_STATUS			0x114
59  #define BSPI_RAF_READ_DATA			0x118
60  #define BSPI_RAF_WORD_CNT			0x11c
61  #define BSPI_RAF_CURR_ADDR			0x120
62  
63  /* Override mode masks */
64  #define BSPI_STRAP_OVERRIDE_CTRL_OVERRIDE	BIT(0)
65  #define BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL	BIT(1)
66  #define BSPI_STRAP_OVERRIDE_CTRL_ADDR_4BYTE	BIT(2)
67  #define BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD	BIT(3)
68  #define BSPI_STRAP_OVERRIDE_CTRL_ENDAIN_MODE	BIT(4)
69  
70  #define BSPI_ADDRLEN_3BYTES			3
71  #define BSPI_ADDRLEN_4BYTES			4
72  
73  #define BSPI_RAF_STATUS_FIFO_EMPTY_MASK	BIT(1)
74  
75  #define BSPI_RAF_CTRL_START_MASK		BIT(0)
76  #define BSPI_RAF_CTRL_CLEAR_MASK		BIT(1)
77  
78  #define BSPI_BPP_MODE_SELECT_MASK		BIT(8)
79  #define BSPI_BPP_ADDR_SELECT_MASK		BIT(16)
80  
81  #define BSPI_READ_LENGTH			256
82  
83  /* MSPI register offsets */
84  #define MSPI_SPCR0_LSB				0x000
85  #define MSPI_SPCR0_MSB				0x004
86  #define MSPI_SPCR0_MSB_CPHA			BIT(0)
87  #define MSPI_SPCR0_MSB_CPOL			BIT(1)
88  #define MSPI_SPCR0_MSB_BITS_SHIFT		0x2
89  #define MSPI_SPCR1_LSB				0x008
90  #define MSPI_SPCR1_MSB				0x00c
91  #define MSPI_NEWQP				0x010
92  #define MSPI_ENDQP				0x014
93  #define MSPI_SPCR2				0x018
94  #define MSPI_MSPI_STATUS			0x020
95  #define MSPI_CPTQP				0x024
96  #define MSPI_SPCR3				0x028
97  #define MSPI_REV				0x02c
98  #define MSPI_TXRAM				0x040
99  #define MSPI_RXRAM				0x0c0
100  #define MSPI_CDRAM				0x140
101  #define MSPI_WRITE_LOCK			0x180
102  
103  #define MSPI_MASTER_BIT			BIT(7)
104  
105  #define MSPI_NUM_CDRAM				16
106  #define MSPI_CDRAM_OUTP				BIT(8)
107  #define MSPI_CDRAM_CONT_BIT			BIT(7)
108  #define MSPI_CDRAM_BITSE_BIT			BIT(6)
109  #define MSPI_CDRAM_DT_BIT			BIT(5)
110  #define MSPI_CDRAM_PCS				0xf
111  
112  #define MSPI_SPCR2_SPE				BIT(6)
113  #define MSPI_SPCR2_CONT_AFTER_CMD		BIT(7)
114  
115  #define MSPI_SPCR3_FASTBR			BIT(0)
116  #define MSPI_SPCR3_FASTDT			BIT(1)
117  #define MSPI_SPCR3_SYSCLKSEL_MASK		GENMASK(11, 10)
118  #define MSPI_SPCR3_SYSCLKSEL_27			(MSPI_SPCR3_SYSCLKSEL_MASK & \
119  						 ~(BIT(10) | BIT(11)))
120  #define MSPI_SPCR3_SYSCLKSEL_108		(MSPI_SPCR3_SYSCLKSEL_MASK & \
121  						 BIT(11))
122  #define MSPI_SPCR3_TXRXDAM_MASK			GENMASK(4, 2)
123  #define MSPI_SPCR3_DAM_8BYTE			0
124  #define MSPI_SPCR3_DAM_16BYTE			(BIT(2) | BIT(4))
125  #define MSPI_SPCR3_DAM_32BYTE			(BIT(3) | BIT(5))
126  #define MSPI_SPCR3_HALFDUPLEX			BIT(6)
127  #define MSPI_SPCR3_HDOUTTYPE			BIT(7)
128  #define MSPI_SPCR3_DATA_REG_SZ			BIT(8)
129  #define MSPI_SPCR3_CPHARX			BIT(9)
130  
131  #define MSPI_MSPI_STATUS_SPIF			BIT(0)
132  
133  #define INTR_BASE_BIT_SHIFT			0x02
134  #define INTR_COUNT				0x07
135  
136  #define NUM_CHIPSELECT				4
137  #define QSPI_SPBR_MAX				255U
138  #define MSPI_BASE_FREQ				27000000UL
139  
140  #define OPCODE_DIOR				0xBB
141  #define OPCODE_QIOR				0xEB
142  #define OPCODE_DIOR_4B				0xBC
143  #define OPCODE_QIOR_4B				0xEC
144  
145  #define MAX_CMD_SIZE				6
146  
147  #define ADDR_4MB_MASK				GENMASK(22, 0)
148  
149  /* stop at end of transfer, no other reason */
150  #define TRANS_STATUS_BREAK_NONE		0
151  /* stop at end of spi_message */
152  #define TRANS_STATUS_BREAK_EOM			1
153  /* stop at end of spi_transfer if delay */
154  #define TRANS_STATUS_BREAK_DELAY		2
155  /* stop at end of spi_transfer if cs_change */
156  #define TRANS_STATUS_BREAK_CS_CHANGE		4
157  /* stop if we run out of bytes */
158  #define TRANS_STATUS_BREAK_NO_BYTES		8
159  
160  /* events that make us stop filling TX slots */
161  #define TRANS_STATUS_BREAK_TX (TRANS_STATUS_BREAK_EOM |		\
162  			       TRANS_STATUS_BREAK_DELAY |		\
163  			       TRANS_STATUS_BREAK_CS_CHANGE)
164  
165  /* events that make us deassert CS */
166  #define TRANS_STATUS_BREAK_DESELECT (TRANS_STATUS_BREAK_EOM |		\
167  				     TRANS_STATUS_BREAK_CS_CHANGE)
168  
169  /*
170   * Used for writing and reading data in the right order
171   * to TXRAM and RXRAM when used as 32-bit registers respectively
172   */
173  #define swap4bytes(__val) \
174  	((((__val) >> 24) & 0x000000FF) | (((__val) >>  8) & 0x0000FF00) | \
175  	 (((__val) <<  8) & 0x00FF0000) | (((__val) << 24) & 0xFF000000))
176  
177  struct bcm_qspi_parms {
178  	u32 speed_hz;
179  	u8 mode;
180  	u8 bits_per_word;
181  };
182  
183  struct bcm_xfer_mode {
184  	bool flex_mode;
185  	unsigned int width;
186  	unsigned int addrlen;
187  	unsigned int hp;
188  };
189  
190  enum base_type {
191  	MSPI,
192  	BSPI,
193  	CHIP_SELECT,
194  	BASEMAX,
195  };
196  
197  enum irq_source {
198  	SINGLE_L2,
199  	MUXED_L1,
200  };
201  
202  struct bcm_qspi_irq {
203  	const char *irq_name;
204  	const irq_handler_t irq_handler;
205  	int irq_source;
206  	u32 mask;
207  };
208  
209  struct bcm_qspi_dev_id {
210  	const struct bcm_qspi_irq *irqp;
211  	void *dev;
212  };
213  
214  
215  struct qspi_trans {
216  	struct spi_transfer *trans;
217  	int byte;
218  	bool mspi_last_trans;
219  };
220  
221  struct bcm_qspi {
222  	struct platform_device *pdev;
223  	struct spi_controller *host;
224  	struct clk *clk;
225  	u32 base_clk;
226  	u32 max_speed_hz;
227  	void __iomem *base[BASEMAX];
228  
229  	/* Some SoCs provide custom interrupt status register(s) */
230  	struct bcm_qspi_soc_intc	*soc_intc;
231  
232  	struct bcm_qspi_parms last_parms;
233  	struct qspi_trans  trans_pos;
234  	int curr_cs;
235  	int bspi_maj_rev;
236  	int bspi_min_rev;
237  	int bspi_enabled;
238  	const struct spi_mem_op *bspi_rf_op;
239  	u32 bspi_rf_op_idx;
240  	u32 bspi_rf_op_len;
241  	u32 bspi_rf_op_status;
242  	struct bcm_xfer_mode xfer_mode;
243  	u32 s3_strap_override_ctrl;
244  	bool bspi_mode;
245  	bool big_endian;
246  	int num_irqs;
247  	struct bcm_qspi_dev_id *dev_ids;
248  	struct completion mspi_done;
249  	struct completion bspi_done;
250  	u8 mspi_maj_rev;
251  	u8 mspi_min_rev;
252  	bool mspi_spcr3_sysclk;
253  };
254  
has_bspi(struct bcm_qspi * qspi)255  static inline bool has_bspi(struct bcm_qspi *qspi)
256  {
257  	return qspi->bspi_mode;
258  }
259  
260  /* hardware supports spcr3 and fast baud-rate  */
bcm_qspi_has_fastbr(struct bcm_qspi * qspi)261  static inline bool bcm_qspi_has_fastbr(struct bcm_qspi *qspi)
262  {
263  	if (!has_bspi(qspi) &&
264  	    ((qspi->mspi_maj_rev >= 1) &&
265  	     (qspi->mspi_min_rev >= 5)))
266  		return true;
267  
268  	return false;
269  }
270  
271  /* hardware supports sys clk 108Mhz  */
bcm_qspi_has_sysclk_108(struct bcm_qspi * qspi)272  static inline bool bcm_qspi_has_sysclk_108(struct bcm_qspi *qspi)
273  {
274  	if (!has_bspi(qspi) && (qspi->mspi_spcr3_sysclk ||
275  	    ((qspi->mspi_maj_rev >= 1) &&
276  	     (qspi->mspi_min_rev >= 6))))
277  		return true;
278  
279  	return false;
280  }
281  
bcm_qspi_spbr_min(struct bcm_qspi * qspi)282  static inline int bcm_qspi_spbr_min(struct bcm_qspi *qspi)
283  {
284  	if (bcm_qspi_has_fastbr(qspi))
285  		return (bcm_qspi_has_sysclk_108(qspi) ? 4 : 1);
286  	else
287  		return 8;
288  }
289  
bcm_qspi_calc_spbr(u32 clk_speed_hz,const struct bcm_qspi_parms * xp)290  static u32 bcm_qspi_calc_spbr(u32 clk_speed_hz,
291  			      const struct bcm_qspi_parms *xp)
292  {
293  	u32 spbr = 0;
294  
295  	/* SPBR = System Clock/(2 * SCK Baud Rate) */
296  	if (xp->speed_hz)
297  		spbr = clk_speed_hz / (xp->speed_hz * 2);
298  
299  	return spbr;
300  }
301  
302  /* Read qspi controller register*/
bcm_qspi_read(struct bcm_qspi * qspi,enum base_type type,unsigned int offset)303  static inline u32 bcm_qspi_read(struct bcm_qspi *qspi, enum base_type type,
304  				unsigned int offset)
305  {
306  	return bcm_qspi_readl(qspi->big_endian, qspi->base[type] + offset);
307  }
308  
309  /* Write qspi controller register*/
bcm_qspi_write(struct bcm_qspi * qspi,enum base_type type,unsigned int offset,unsigned int data)310  static inline void bcm_qspi_write(struct bcm_qspi *qspi, enum base_type type,
311  				  unsigned int offset, unsigned int data)
312  {
313  	bcm_qspi_writel(qspi->big_endian, data, qspi->base[type] + offset);
314  }
315  
316  /* BSPI helpers */
bcm_qspi_bspi_busy_poll(struct bcm_qspi * qspi)317  static int bcm_qspi_bspi_busy_poll(struct bcm_qspi *qspi)
318  {
319  	int i;
320  
321  	/* this should normally finish within 10us */
322  	for (i = 0; i < 1000; i++) {
323  		if (!(bcm_qspi_read(qspi, BSPI, BSPI_BUSY_STATUS) & 1))
324  			return 0;
325  		udelay(1);
326  	}
327  	dev_warn(&qspi->pdev->dev, "timeout waiting for !busy_status\n");
328  	return -EIO;
329  }
330  
bcm_qspi_bspi_ver_three(struct bcm_qspi * qspi)331  static inline bool bcm_qspi_bspi_ver_three(struct bcm_qspi *qspi)
332  {
333  	if (qspi->bspi_maj_rev < 4)
334  		return true;
335  	return false;
336  }
337  
bcm_qspi_bspi_flush_prefetch_buffers(struct bcm_qspi * qspi)338  static void bcm_qspi_bspi_flush_prefetch_buffers(struct bcm_qspi *qspi)
339  {
340  	bcm_qspi_bspi_busy_poll(qspi);
341  	/* Force rising edge for the b0/b1 'flush' field */
342  	bcm_qspi_write(qspi, BSPI, BSPI_B0_CTRL, 1);
343  	bcm_qspi_write(qspi, BSPI, BSPI_B1_CTRL, 1);
344  	bcm_qspi_write(qspi, BSPI, BSPI_B0_CTRL, 0);
345  	bcm_qspi_write(qspi, BSPI, BSPI_B1_CTRL, 0);
346  }
347  
bcm_qspi_bspi_lr_is_fifo_empty(struct bcm_qspi * qspi)348  static int bcm_qspi_bspi_lr_is_fifo_empty(struct bcm_qspi *qspi)
349  {
350  	return (bcm_qspi_read(qspi, BSPI, BSPI_RAF_STATUS) &
351  				BSPI_RAF_STATUS_FIFO_EMPTY_MASK);
352  }
353  
bcm_qspi_bspi_lr_read_fifo(struct bcm_qspi * qspi)354  static inline u32 bcm_qspi_bspi_lr_read_fifo(struct bcm_qspi *qspi)
355  {
356  	u32 data = bcm_qspi_read(qspi, BSPI, BSPI_RAF_READ_DATA);
357  
358  	/* BSPI v3 LR is LE only, convert data to host endianness */
359  	if (bcm_qspi_bspi_ver_three(qspi))
360  		data = le32_to_cpu(data);
361  
362  	return data;
363  }
364  
bcm_qspi_bspi_lr_start(struct bcm_qspi * qspi)365  static inline void bcm_qspi_bspi_lr_start(struct bcm_qspi *qspi)
366  {
367  	bcm_qspi_bspi_busy_poll(qspi);
368  	bcm_qspi_write(qspi, BSPI, BSPI_RAF_CTRL,
369  		       BSPI_RAF_CTRL_START_MASK);
370  }
371  
bcm_qspi_bspi_lr_clear(struct bcm_qspi * qspi)372  static inline void bcm_qspi_bspi_lr_clear(struct bcm_qspi *qspi)
373  {
374  	bcm_qspi_write(qspi, BSPI, BSPI_RAF_CTRL,
375  		       BSPI_RAF_CTRL_CLEAR_MASK);
376  	bcm_qspi_bspi_flush_prefetch_buffers(qspi);
377  }
378  
bcm_qspi_bspi_lr_data_read(struct bcm_qspi * qspi)379  static void bcm_qspi_bspi_lr_data_read(struct bcm_qspi *qspi)
380  {
381  	u32 *buf = (u32 *)qspi->bspi_rf_op->data.buf.in;
382  	u32 data = 0;
383  
384  	dev_dbg(&qspi->pdev->dev, "xfer %p rx %p rxlen %d\n", qspi->bspi_rf_op,
385  		qspi->bspi_rf_op->data.buf.in, qspi->bspi_rf_op_len);
386  	while (!bcm_qspi_bspi_lr_is_fifo_empty(qspi)) {
387  		data = bcm_qspi_bspi_lr_read_fifo(qspi);
388  		if (likely(qspi->bspi_rf_op_len >= 4) &&
389  		    IS_ALIGNED((uintptr_t)buf, 4)) {
390  			buf[qspi->bspi_rf_op_idx++] = data;
391  			qspi->bspi_rf_op_len -= 4;
392  		} else {
393  			/* Read out remaining bytes, make sure*/
394  			u8 *cbuf = (u8 *)&buf[qspi->bspi_rf_op_idx];
395  
396  			data = cpu_to_le32(data);
397  			while (qspi->bspi_rf_op_len) {
398  				*cbuf++ = (u8)data;
399  				data >>= 8;
400  				qspi->bspi_rf_op_len--;
401  			}
402  		}
403  	}
404  }
405  
bcm_qspi_bspi_set_xfer_params(struct bcm_qspi * qspi,u8 cmd_byte,int bpp,int bpc,int flex_mode)406  static void bcm_qspi_bspi_set_xfer_params(struct bcm_qspi *qspi, u8 cmd_byte,
407  					  int bpp, int bpc, int flex_mode)
408  {
409  	bcm_qspi_write(qspi, BSPI, BSPI_FLEX_MODE_ENABLE, 0);
410  	bcm_qspi_write(qspi, BSPI, BSPI_BITS_PER_CYCLE, bpc);
411  	bcm_qspi_write(qspi, BSPI, BSPI_BITS_PER_PHASE, bpp);
412  	bcm_qspi_write(qspi, BSPI, BSPI_CMD_AND_MODE_BYTE, cmd_byte);
413  	bcm_qspi_write(qspi, BSPI, BSPI_FLEX_MODE_ENABLE, flex_mode);
414  }
415  
bcm_qspi_bspi_set_flex_mode(struct bcm_qspi * qspi,const struct spi_mem_op * op,int hp)416  static int bcm_qspi_bspi_set_flex_mode(struct bcm_qspi *qspi,
417  				       const struct spi_mem_op *op, int hp)
418  {
419  	int bpc = 0, bpp = 0;
420  	u8 command = op->cmd.opcode;
421  	int width = op->data.buswidth ? op->data.buswidth : SPI_NBITS_SINGLE;
422  	int addrlen = op->addr.nbytes;
423  	int flex_mode = 1;
424  
425  	dev_dbg(&qspi->pdev->dev, "set flex mode w %x addrlen %x hp %d\n",
426  		width, addrlen, hp);
427  
428  	if (addrlen == BSPI_ADDRLEN_4BYTES)
429  		bpp = BSPI_BPP_ADDR_SELECT_MASK;
430  
431  	if (op->dummy.nbytes)
432  		bpp |= (op->dummy.nbytes * 8) / op->dummy.buswidth;
433  
434  	switch (width) {
435  	case SPI_NBITS_SINGLE:
436  		if (addrlen == BSPI_ADDRLEN_3BYTES)
437  			/* default mode, does not need flex_cmd */
438  			flex_mode = 0;
439  		break;
440  	case SPI_NBITS_DUAL:
441  		bpc = 0x00000001;
442  		if (hp) {
443  			bpc |= 0x00010100; /* address and mode are 2-bit */
444  			bpp = BSPI_BPP_MODE_SELECT_MASK;
445  		}
446  		break;
447  	case SPI_NBITS_QUAD:
448  		bpc = 0x00000002;
449  		if (hp) {
450  			bpc |= 0x00020200; /* address and mode are 4-bit */
451  			bpp |= BSPI_BPP_MODE_SELECT_MASK;
452  		}
453  		break;
454  	default:
455  		return -EINVAL;
456  	}
457  
458  	bcm_qspi_bspi_set_xfer_params(qspi, command, bpp, bpc, flex_mode);
459  
460  	return 0;
461  }
462  
bcm_qspi_bspi_set_override(struct bcm_qspi * qspi,const struct spi_mem_op * op,int hp)463  static int bcm_qspi_bspi_set_override(struct bcm_qspi *qspi,
464  				      const struct spi_mem_op *op, int hp)
465  {
466  	int width = op->data.buswidth ? op->data.buswidth : SPI_NBITS_SINGLE;
467  	int addrlen = op->addr.nbytes;
468  	u32 data = bcm_qspi_read(qspi, BSPI, BSPI_STRAP_OVERRIDE_CTRL);
469  
470  	dev_dbg(&qspi->pdev->dev, "set override mode w %x addrlen %x hp %d\n",
471  		width, addrlen, hp);
472  
473  	switch (width) {
474  	case SPI_NBITS_SINGLE:
475  		/* clear quad/dual mode */
476  		data &= ~(BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD |
477  			  BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL);
478  		break;
479  	case SPI_NBITS_QUAD:
480  		/* clear dual mode and set quad mode */
481  		data &= ~BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL;
482  		data |= BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD;
483  		break;
484  	case SPI_NBITS_DUAL:
485  		/* clear quad mode set dual mode */
486  		data &= ~BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD;
487  		data |= BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL;
488  		break;
489  	default:
490  		return -EINVAL;
491  	}
492  
493  	if (addrlen == BSPI_ADDRLEN_4BYTES)
494  		/* set 4byte mode*/
495  		data |= BSPI_STRAP_OVERRIDE_CTRL_ADDR_4BYTE;
496  	else
497  		/* clear 4 byte mode */
498  		data &= ~BSPI_STRAP_OVERRIDE_CTRL_ADDR_4BYTE;
499  
500  	/* set the override mode */
501  	data |=	BSPI_STRAP_OVERRIDE_CTRL_OVERRIDE;
502  	bcm_qspi_write(qspi, BSPI, BSPI_STRAP_OVERRIDE_CTRL, data);
503  	bcm_qspi_bspi_set_xfer_params(qspi, op->cmd.opcode, 0, 0, 0);
504  
505  	return 0;
506  }
507  
bcm_qspi_bspi_set_mode(struct bcm_qspi * qspi,const struct spi_mem_op * op,int hp)508  static int bcm_qspi_bspi_set_mode(struct bcm_qspi *qspi,
509  				  const struct spi_mem_op *op, int hp)
510  {
511  	int error = 0;
512  	int width = op->data.buswidth ? op->data.buswidth : SPI_NBITS_SINGLE;
513  	int addrlen = op->addr.nbytes;
514  
515  	/* default mode */
516  	qspi->xfer_mode.flex_mode = true;
517  
518  	if (!bcm_qspi_bspi_ver_three(qspi)) {
519  		u32 val, mask;
520  
521  		val = bcm_qspi_read(qspi, BSPI, BSPI_STRAP_OVERRIDE_CTRL);
522  		mask = BSPI_STRAP_OVERRIDE_CTRL_OVERRIDE;
523  		if (val & mask || qspi->s3_strap_override_ctrl & mask) {
524  			qspi->xfer_mode.flex_mode = false;
525  			bcm_qspi_write(qspi, BSPI, BSPI_FLEX_MODE_ENABLE, 0);
526  			error = bcm_qspi_bspi_set_override(qspi, op, hp);
527  		}
528  	}
529  
530  	if (qspi->xfer_mode.flex_mode)
531  		error = bcm_qspi_bspi_set_flex_mode(qspi, op, hp);
532  
533  	if (error) {
534  		dev_warn(&qspi->pdev->dev,
535  			 "INVALID COMBINATION: width=%d addrlen=%d hp=%d\n",
536  			 width, addrlen, hp);
537  	} else if (qspi->xfer_mode.width != width ||
538  		   qspi->xfer_mode.addrlen != addrlen ||
539  		   qspi->xfer_mode.hp != hp) {
540  		qspi->xfer_mode.width = width;
541  		qspi->xfer_mode.addrlen = addrlen;
542  		qspi->xfer_mode.hp = hp;
543  		dev_dbg(&qspi->pdev->dev,
544  			"cs:%d %d-lane output, %d-byte address%s\n",
545  			qspi->curr_cs,
546  			qspi->xfer_mode.width,
547  			qspi->xfer_mode.addrlen,
548  			qspi->xfer_mode.hp != -1 ? ", hp mode" : "");
549  	}
550  
551  	return error;
552  }
553  
bcm_qspi_enable_bspi(struct bcm_qspi * qspi)554  static void bcm_qspi_enable_bspi(struct bcm_qspi *qspi)
555  {
556  	if (!has_bspi(qspi))
557  		return;
558  
559  	qspi->bspi_enabled = 1;
560  	if ((bcm_qspi_read(qspi, BSPI, BSPI_MAST_N_BOOT_CTRL) & 1) == 0)
561  		return;
562  
563  	bcm_qspi_bspi_flush_prefetch_buffers(qspi);
564  	udelay(1);
565  	bcm_qspi_write(qspi, BSPI, BSPI_MAST_N_BOOT_CTRL, 0);
566  	udelay(1);
567  }
568  
bcm_qspi_disable_bspi(struct bcm_qspi * qspi)569  static void bcm_qspi_disable_bspi(struct bcm_qspi *qspi)
570  {
571  	if (!has_bspi(qspi))
572  		return;
573  
574  	qspi->bspi_enabled = 0;
575  	if ((bcm_qspi_read(qspi, BSPI, BSPI_MAST_N_BOOT_CTRL) & 1))
576  		return;
577  
578  	bcm_qspi_bspi_busy_poll(qspi);
579  	bcm_qspi_write(qspi, BSPI, BSPI_MAST_N_BOOT_CTRL, 1);
580  	udelay(1);
581  }
582  
bcm_qspi_chip_select(struct bcm_qspi * qspi,int cs)583  static void bcm_qspi_chip_select(struct bcm_qspi *qspi, int cs)
584  {
585  	u32 rd = 0;
586  	u32 wr = 0;
587  
588  	if (cs >= 0 && qspi->base[CHIP_SELECT]) {
589  		rd = bcm_qspi_read(qspi, CHIP_SELECT, 0);
590  		wr = (rd & ~0xff) | (1 << cs);
591  		if (rd == wr)
592  			return;
593  		bcm_qspi_write(qspi, CHIP_SELECT, 0, wr);
594  		usleep_range(10, 20);
595  	}
596  
597  	dev_dbg(&qspi->pdev->dev, "using cs:%d\n", cs);
598  	qspi->curr_cs = cs;
599  }
600  
bcmspi_parms_did_change(const struct bcm_qspi_parms * const cur,const struct bcm_qspi_parms * const prev)601  static bool bcmspi_parms_did_change(const struct bcm_qspi_parms * const cur,
602  				    const struct bcm_qspi_parms * const prev)
603  {
604  	return (cur->speed_hz != prev->speed_hz) ||
605  		(cur->mode != prev->mode) ||
606  		(cur->bits_per_word != prev->bits_per_word);
607  }
608  
609  
610  /* MSPI helpers */
bcm_qspi_hw_set_parms(struct bcm_qspi * qspi,const struct bcm_qspi_parms * xp)611  static void bcm_qspi_hw_set_parms(struct bcm_qspi *qspi,
612  				  const struct bcm_qspi_parms *xp)
613  {
614  	u32 spcr, spbr = 0;
615  
616  	if (!bcmspi_parms_did_change(xp, &qspi->last_parms))
617  		return;
618  
619  	if (!qspi->mspi_maj_rev)
620  		/* legacy controller */
621  		spcr = MSPI_MASTER_BIT;
622  	else
623  		spcr = 0;
624  
625  	/*
626  	 * Bits per transfer.  BITS determines the number of data bits
627  	 * transferred if the command control bit (BITSE of a
628  	 * CDRAM Register) is equal to 1.
629  	 * If CDRAM BITSE is equal to 0, 8 data bits are transferred
630  	 * regardless
631  	 */
632  	if (xp->bits_per_word != 16 && xp->bits_per_word != 64)
633  		spcr |= xp->bits_per_word << MSPI_SPCR0_MSB_BITS_SHIFT;
634  
635  	spcr |= xp->mode & (MSPI_SPCR0_MSB_CPHA | MSPI_SPCR0_MSB_CPOL);
636  	bcm_qspi_write(qspi, MSPI, MSPI_SPCR0_MSB, spcr);
637  
638  	if (bcm_qspi_has_fastbr(qspi)) {
639  		spcr = 0;
640  
641  		/* enable fastbr */
642  		spcr |=	MSPI_SPCR3_FASTBR;
643  
644  		if (xp->mode & SPI_3WIRE)
645  			spcr |= MSPI_SPCR3_HALFDUPLEX |  MSPI_SPCR3_HDOUTTYPE;
646  
647  		if (bcm_qspi_has_sysclk_108(qspi)) {
648  			/* check requested baud rate before moving to 108Mhz */
649  			spbr = bcm_qspi_calc_spbr(MSPI_BASE_FREQ * 4, xp);
650  			if (spbr > QSPI_SPBR_MAX) {
651  				/* use SYSCLK_27Mhz for slower baud rates */
652  				spcr &= ~MSPI_SPCR3_SYSCLKSEL_MASK;
653  				qspi->base_clk = MSPI_BASE_FREQ;
654  			} else {
655  				/* SYSCLK_108Mhz */
656  				spcr |= MSPI_SPCR3_SYSCLKSEL_108;
657  				qspi->base_clk = MSPI_BASE_FREQ * 4;
658  			}
659  		}
660  
661  		if (xp->bits_per_word > 16) {
662  			/* data_reg_size 1 (64bit) */
663  			spcr |=	MSPI_SPCR3_DATA_REG_SZ;
664  			/* TxRx RAM data access mode 2 for 32B and set fastdt */
665  			spcr |=	MSPI_SPCR3_DAM_32BYTE  | MSPI_SPCR3_FASTDT;
666  			/*
667  			 *  Set length of delay after transfer
668  			 *  DTL from 0(256) to 1
669  			 */
670  			bcm_qspi_write(qspi, MSPI, MSPI_SPCR1_LSB, 1);
671  		} else {
672  			/* data_reg_size[8] = 0 */
673  			spcr &=	~(MSPI_SPCR3_DATA_REG_SZ);
674  
675  			/*
676  			 * TxRx RAM access mode 8B
677  			 * and disable fastdt
678  			 */
679  			spcr &= ~(MSPI_SPCR3_DAM_32BYTE);
680  		}
681  		bcm_qspi_write(qspi, MSPI, MSPI_SPCR3, spcr);
682  	}
683  
684  	/* SCK Baud Rate = System Clock/(2 * SPBR) */
685  	qspi->max_speed_hz = qspi->base_clk / (bcm_qspi_spbr_min(qspi) * 2);
686  	spbr = bcm_qspi_calc_spbr(qspi->base_clk, xp);
687  	spbr = clamp_val(spbr, bcm_qspi_spbr_min(qspi), QSPI_SPBR_MAX);
688  	bcm_qspi_write(qspi, MSPI, MSPI_SPCR0_LSB, spbr);
689  
690  	qspi->last_parms = *xp;
691  }
692  
bcm_qspi_update_parms(struct bcm_qspi * qspi,struct spi_device * spi,struct spi_transfer * trans)693  static void bcm_qspi_update_parms(struct bcm_qspi *qspi,
694  				  struct spi_device *spi,
695  				  struct spi_transfer *trans)
696  {
697  	struct bcm_qspi_parms xp;
698  
699  	xp.speed_hz = trans->speed_hz;
700  	xp.bits_per_word = trans->bits_per_word;
701  	xp.mode = spi->mode;
702  
703  	bcm_qspi_hw_set_parms(qspi, &xp);
704  }
705  
bcm_qspi_setup(struct spi_device * spi)706  static int bcm_qspi_setup(struct spi_device *spi)
707  {
708  	struct bcm_qspi_parms *xp;
709  
710  	if (spi->bits_per_word > 64)
711  		return -EINVAL;
712  
713  	xp = spi_get_ctldata(spi);
714  	if (!xp) {
715  		xp = kzalloc(sizeof(*xp), GFP_KERNEL);
716  		if (!xp)
717  			return -ENOMEM;
718  		spi_set_ctldata(spi, xp);
719  	}
720  	xp->speed_hz = spi->max_speed_hz;
721  	xp->mode = spi->mode;
722  
723  	if (spi->bits_per_word)
724  		xp->bits_per_word = spi->bits_per_word;
725  	else
726  		xp->bits_per_word = 8;
727  
728  	return 0;
729  }
730  
bcm_qspi_mspi_transfer_is_last(struct bcm_qspi * qspi,struct qspi_trans * qt)731  static bool bcm_qspi_mspi_transfer_is_last(struct bcm_qspi *qspi,
732  					   struct qspi_trans *qt)
733  {
734  	if (qt->mspi_last_trans &&
735  	    spi_transfer_is_last(qspi->host, qt->trans))
736  		return true;
737  	else
738  		return false;
739  }
740  
update_qspi_trans_byte_count(struct bcm_qspi * qspi,struct qspi_trans * qt,int flags)741  static int update_qspi_trans_byte_count(struct bcm_qspi *qspi,
742  					struct qspi_trans *qt, int flags)
743  {
744  	int ret = TRANS_STATUS_BREAK_NONE;
745  
746  	/* count the last transferred bytes */
747  	if (qt->trans->bits_per_word <= 8)
748  		qt->byte++;
749  	else if (qt->trans->bits_per_word <= 16)
750  		qt->byte += 2;
751  	else if (qt->trans->bits_per_word <= 32)
752  		qt->byte += 4;
753  	else if (qt->trans->bits_per_word <= 64)
754  		qt->byte += 8;
755  
756  	if (qt->byte >= qt->trans->len) {
757  		/* we're at the end of the spi_transfer */
758  		/* in TX mode, need to pause for a delay or CS change */
759  		if (qt->trans->delay.value &&
760  		    (flags & TRANS_STATUS_BREAK_DELAY))
761  			ret |= TRANS_STATUS_BREAK_DELAY;
762  		if (qt->trans->cs_change &&
763  		    (flags & TRANS_STATUS_BREAK_CS_CHANGE))
764  			ret |= TRANS_STATUS_BREAK_CS_CHANGE;
765  
766  		if (bcm_qspi_mspi_transfer_is_last(qspi, qt))
767  			ret |= TRANS_STATUS_BREAK_EOM;
768  		else
769  			ret |= TRANS_STATUS_BREAK_NO_BYTES;
770  
771  		qt->trans = NULL;
772  	}
773  
774  	dev_dbg(&qspi->pdev->dev, "trans %p len %d byte %d ret %x\n",
775  		qt->trans, qt->trans ? qt->trans->len : 0, qt->byte, ret);
776  	return ret;
777  }
778  
read_rxram_slot_u8(struct bcm_qspi * qspi,int slot)779  static inline u8 read_rxram_slot_u8(struct bcm_qspi *qspi, int slot)
780  {
781  	u32 slot_offset = MSPI_RXRAM + (slot << 3) + 0x4;
782  
783  	/* mask out reserved bits */
784  	return bcm_qspi_read(qspi, MSPI, slot_offset) & 0xff;
785  }
786  
read_rxram_slot_u16(struct bcm_qspi * qspi,int slot)787  static inline u16 read_rxram_slot_u16(struct bcm_qspi *qspi, int slot)
788  {
789  	u32 reg_offset = MSPI_RXRAM;
790  	u32 lsb_offset = reg_offset + (slot << 3) + 0x4;
791  	u32 msb_offset = reg_offset + (slot << 3);
792  
793  	return (bcm_qspi_read(qspi, MSPI, lsb_offset) & 0xff) |
794  		((bcm_qspi_read(qspi, MSPI, msb_offset) & 0xff) << 8);
795  }
796  
read_rxram_slot_u32(struct bcm_qspi * qspi,int slot)797  static inline u32 read_rxram_slot_u32(struct bcm_qspi *qspi, int slot)
798  {
799  	u32 reg_offset = MSPI_RXRAM;
800  	u32 offset = reg_offset + (slot << 3);
801  	u32 val;
802  
803  	val = bcm_qspi_read(qspi, MSPI, offset);
804  	val = swap4bytes(val);
805  
806  	return val;
807  }
808  
read_rxram_slot_u64(struct bcm_qspi * qspi,int slot)809  static inline u64 read_rxram_slot_u64(struct bcm_qspi *qspi, int slot)
810  {
811  	u32 reg_offset = MSPI_RXRAM;
812  	u32 lsb_offset = reg_offset + (slot << 3) + 0x4;
813  	u32 msb_offset = reg_offset + (slot << 3);
814  	u32 msb, lsb;
815  
816  	msb = bcm_qspi_read(qspi, MSPI, msb_offset);
817  	msb = swap4bytes(msb);
818  	lsb = bcm_qspi_read(qspi, MSPI, lsb_offset);
819  	lsb = swap4bytes(lsb);
820  
821  	return ((u64)msb << 32 | lsb);
822  }
823  
read_from_hw(struct bcm_qspi * qspi,int slots)824  static void read_from_hw(struct bcm_qspi *qspi, int slots)
825  {
826  	struct qspi_trans tp;
827  	int slot;
828  
829  	bcm_qspi_disable_bspi(qspi);
830  
831  	if (slots > MSPI_NUM_CDRAM) {
832  		/* should never happen */
833  		dev_err(&qspi->pdev->dev, "%s: too many slots!\n", __func__);
834  		return;
835  	}
836  
837  	tp = qspi->trans_pos;
838  
839  	for (slot = 0; slot < slots; slot++) {
840  		if (tp.trans->bits_per_word <= 8) {
841  			u8 *buf = tp.trans->rx_buf;
842  
843  			if (buf)
844  				buf[tp.byte] = read_rxram_slot_u8(qspi, slot);
845  			dev_dbg(&qspi->pdev->dev, "RD %02x\n",
846  				buf ? buf[tp.byte] : 0x0);
847  		} else if (tp.trans->bits_per_word <= 16) {
848  			u16 *buf = tp.trans->rx_buf;
849  
850  			if (buf)
851  				buf[tp.byte / 2] = read_rxram_slot_u16(qspi,
852  								      slot);
853  			dev_dbg(&qspi->pdev->dev, "RD %04x\n",
854  				buf ? buf[tp.byte / 2] : 0x0);
855  		} else if (tp.trans->bits_per_word <= 32) {
856  			u32 *buf = tp.trans->rx_buf;
857  
858  			if (buf)
859  				buf[tp.byte / 4] = read_rxram_slot_u32(qspi,
860  								      slot);
861  			dev_dbg(&qspi->pdev->dev, "RD %08x\n",
862  				buf ? buf[tp.byte / 4] : 0x0);
863  
864  		} else if (tp.trans->bits_per_word <= 64) {
865  			u64 *buf = tp.trans->rx_buf;
866  
867  			if (buf)
868  				buf[tp.byte / 8] = read_rxram_slot_u64(qspi,
869  								      slot);
870  			dev_dbg(&qspi->pdev->dev, "RD %llx\n",
871  				buf ? buf[tp.byte / 8] : 0x0);
872  
873  
874  		}
875  
876  		update_qspi_trans_byte_count(qspi, &tp,
877  					     TRANS_STATUS_BREAK_NONE);
878  	}
879  
880  	qspi->trans_pos = tp;
881  }
882  
write_txram_slot_u8(struct bcm_qspi * qspi,int slot,u8 val)883  static inline void write_txram_slot_u8(struct bcm_qspi *qspi, int slot,
884  				       u8 val)
885  {
886  	u32 reg_offset = MSPI_TXRAM + (slot << 3);
887  
888  	/* mask out reserved bits */
889  	bcm_qspi_write(qspi, MSPI, reg_offset, val);
890  }
891  
write_txram_slot_u16(struct bcm_qspi * qspi,int slot,u16 val)892  static inline void write_txram_slot_u16(struct bcm_qspi *qspi, int slot,
893  					u16 val)
894  {
895  	u32 reg_offset = MSPI_TXRAM;
896  	u32 msb_offset = reg_offset + (slot << 3);
897  	u32 lsb_offset = reg_offset + (slot << 3) + 0x4;
898  
899  	bcm_qspi_write(qspi, MSPI, msb_offset, (val >> 8));
900  	bcm_qspi_write(qspi, MSPI, lsb_offset, (val & 0xff));
901  }
902  
write_txram_slot_u32(struct bcm_qspi * qspi,int slot,u32 val)903  static inline void write_txram_slot_u32(struct bcm_qspi *qspi, int slot,
904  					u32 val)
905  {
906  	u32 reg_offset = MSPI_TXRAM;
907  	u32 msb_offset = reg_offset + (slot << 3);
908  
909  	bcm_qspi_write(qspi, MSPI, msb_offset, swap4bytes(val));
910  }
911  
write_txram_slot_u64(struct bcm_qspi * qspi,int slot,u64 val)912  static inline void write_txram_slot_u64(struct bcm_qspi *qspi, int slot,
913  					u64 val)
914  {
915  	u32 reg_offset = MSPI_TXRAM;
916  	u32 msb_offset = reg_offset + (slot << 3);
917  	u32 lsb_offset = reg_offset + (slot << 3) + 0x4;
918  	u32 msb = upper_32_bits(val);
919  	u32 lsb = lower_32_bits(val);
920  
921  	bcm_qspi_write(qspi, MSPI, msb_offset, swap4bytes(msb));
922  	bcm_qspi_write(qspi, MSPI, lsb_offset, swap4bytes(lsb));
923  }
924  
read_cdram_slot(struct bcm_qspi * qspi,int slot)925  static inline u32 read_cdram_slot(struct bcm_qspi *qspi, int slot)
926  {
927  	return bcm_qspi_read(qspi, MSPI, MSPI_CDRAM + (slot << 2));
928  }
929  
write_cdram_slot(struct bcm_qspi * qspi,int slot,u32 val)930  static inline void write_cdram_slot(struct bcm_qspi *qspi, int slot, u32 val)
931  {
932  	bcm_qspi_write(qspi, MSPI, (MSPI_CDRAM + (slot << 2)), val);
933  }
934  
935  /* Return number of slots written */
write_to_hw(struct bcm_qspi * qspi,struct spi_device * spi)936  static int write_to_hw(struct bcm_qspi *qspi, struct spi_device *spi)
937  {
938  	struct qspi_trans tp;
939  	int slot = 0, tstatus = 0;
940  	u32 mspi_cdram = 0;
941  
942  	bcm_qspi_disable_bspi(qspi);
943  	tp = qspi->trans_pos;
944  	bcm_qspi_update_parms(qspi, spi, tp.trans);
945  
946  	/* Run until end of transfer or reached the max data */
947  	while (!tstatus && slot < MSPI_NUM_CDRAM) {
948  		mspi_cdram = MSPI_CDRAM_CONT_BIT;
949  		if (tp.trans->bits_per_word <= 8) {
950  			const u8 *buf = tp.trans->tx_buf;
951  			u8 val = buf ? buf[tp.byte] : 0x00;
952  
953  			write_txram_slot_u8(qspi, slot, val);
954  			dev_dbg(&qspi->pdev->dev, "WR %02x\n", val);
955  		} else if (tp.trans->bits_per_word <= 16) {
956  			const u16 *buf = tp.trans->tx_buf;
957  			u16 val = buf ? buf[tp.byte / 2] : 0x0000;
958  
959  			write_txram_slot_u16(qspi, slot, val);
960  			dev_dbg(&qspi->pdev->dev, "WR %04x\n", val);
961  		} else if (tp.trans->bits_per_word <= 32) {
962  			const u32 *buf = tp.trans->tx_buf;
963  			u32 val = buf ? buf[tp.byte/4] : 0x0;
964  
965  			write_txram_slot_u32(qspi, slot, val);
966  			dev_dbg(&qspi->pdev->dev, "WR %08x\n", val);
967  		} else if (tp.trans->bits_per_word <= 64) {
968  			const u64 *buf = tp.trans->tx_buf;
969  			u64 val = (buf ? buf[tp.byte/8] : 0x0);
970  
971  			/* use the length of delay from SPCR1_LSB */
972  			if (bcm_qspi_has_fastbr(qspi))
973  				mspi_cdram |= MSPI_CDRAM_DT_BIT;
974  
975  			write_txram_slot_u64(qspi, slot, val);
976  			dev_dbg(&qspi->pdev->dev, "WR %llx\n", val);
977  		}
978  
979  		mspi_cdram |= ((tp.trans->bits_per_word <= 8) ? 0 :
980  			       MSPI_CDRAM_BITSE_BIT);
981  
982  		/* set 3wrire halfduplex mode data from host to target */
983  		if ((spi->mode & SPI_3WIRE) && tp.trans->tx_buf)
984  			mspi_cdram |= MSPI_CDRAM_OUTP;
985  
986  		if (has_bspi(qspi))
987  			mspi_cdram &= ~1;
988  		else
989  			mspi_cdram |= (~(1 << spi_get_chipselect(spi, 0)) &
990  				       MSPI_CDRAM_PCS);
991  
992  		write_cdram_slot(qspi, slot, mspi_cdram);
993  
994  		tstatus = update_qspi_trans_byte_count(qspi, &tp,
995  						       TRANS_STATUS_BREAK_TX);
996  		slot++;
997  	}
998  
999  	if (!slot) {
1000  		dev_err(&qspi->pdev->dev, "%s: no data to send?", __func__);
1001  		goto done;
1002  	}
1003  
1004  	dev_dbg(&qspi->pdev->dev, "submitting %d slots\n", slot);
1005  	bcm_qspi_write(qspi, MSPI, MSPI_NEWQP, 0);
1006  	bcm_qspi_write(qspi, MSPI, MSPI_ENDQP, slot - 1);
1007  
1008  	/*
1009  	 *  case 1) EOM =1, cs_change =0: SSb inactive
1010  	 *  case 2) EOM =1, cs_change =1: SSb stay active
1011  	 *  case 3) EOM =0, cs_change =0: SSb stay active
1012  	 *  case 4) EOM =0, cs_change =1: SSb inactive
1013  	 */
1014  	if (((tstatus & TRANS_STATUS_BREAK_DESELECT)
1015  	     == TRANS_STATUS_BREAK_CS_CHANGE) ||
1016  	    ((tstatus & TRANS_STATUS_BREAK_DESELECT)
1017  	     == TRANS_STATUS_BREAK_EOM)) {
1018  		mspi_cdram = read_cdram_slot(qspi, slot - 1) &
1019  			~MSPI_CDRAM_CONT_BIT;
1020  		write_cdram_slot(qspi, slot - 1, mspi_cdram);
1021  	}
1022  
1023  	if (has_bspi(qspi))
1024  		bcm_qspi_write(qspi, MSPI, MSPI_WRITE_LOCK, 1);
1025  
1026  	/* Must flush previous writes before starting MSPI operation */
1027  	mb();
1028  	/* Set cont | spe | spifie */
1029  	bcm_qspi_write(qspi, MSPI, MSPI_SPCR2, 0xe0);
1030  
1031  done:
1032  	return slot;
1033  }
1034  
bcm_qspi_bspi_exec_mem_op(struct spi_device * spi,const struct spi_mem_op * op)1035  static int bcm_qspi_bspi_exec_mem_op(struct spi_device *spi,
1036  				     const struct spi_mem_op *op)
1037  {
1038  	struct bcm_qspi *qspi = spi_controller_get_devdata(spi->controller);
1039  	u32 addr = 0, len, rdlen, len_words, from = 0;
1040  	int ret = 0;
1041  	unsigned long timeo = msecs_to_jiffies(100);
1042  	struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
1043  
1044  	if (bcm_qspi_bspi_ver_three(qspi))
1045  		if (op->addr.nbytes == BSPI_ADDRLEN_4BYTES)
1046  			return -EIO;
1047  
1048  	from = op->addr.val;
1049  	if (!spi_get_csgpiod(spi, 0))
1050  		bcm_qspi_chip_select(qspi, spi_get_chipselect(spi, 0));
1051  	bcm_qspi_write(qspi, MSPI, MSPI_WRITE_LOCK, 0);
1052  
1053  	/*
1054  	 * when using flex mode we need to send
1055  	 * the upper address byte to bspi
1056  	 */
1057  	if (!bcm_qspi_bspi_ver_three(qspi)) {
1058  		addr = from & 0xff000000;
1059  		bcm_qspi_write(qspi, BSPI,
1060  			       BSPI_BSPI_FLASH_UPPER_ADDR_BYTE, addr);
1061  	}
1062  
1063  	if (!qspi->xfer_mode.flex_mode)
1064  		addr = from;
1065  	else
1066  		addr = from & 0x00ffffff;
1067  
1068  	if (bcm_qspi_bspi_ver_three(qspi) == true)
1069  		addr = (addr + 0xc00000) & 0xffffff;
1070  
1071  	/*
1072  	 * read into the entire buffer by breaking the reads
1073  	 * into RAF buffer read lengths
1074  	 */
1075  	len = op->data.nbytes;
1076  	qspi->bspi_rf_op_idx = 0;
1077  
1078  	do {
1079  		if (len > BSPI_READ_LENGTH)
1080  			rdlen = BSPI_READ_LENGTH;
1081  		else
1082  			rdlen = len;
1083  
1084  		reinit_completion(&qspi->bspi_done);
1085  		bcm_qspi_enable_bspi(qspi);
1086  		len_words = (rdlen + 3) >> 2;
1087  		qspi->bspi_rf_op = op;
1088  		qspi->bspi_rf_op_status = 0;
1089  		qspi->bspi_rf_op_len = rdlen;
1090  		dev_dbg(&qspi->pdev->dev,
1091  			"bspi xfr addr 0x%x len 0x%x", addr, rdlen);
1092  		bcm_qspi_write(qspi, BSPI, BSPI_RAF_START_ADDR, addr);
1093  		bcm_qspi_write(qspi, BSPI, BSPI_RAF_NUM_WORDS, len_words);
1094  		bcm_qspi_write(qspi, BSPI, BSPI_RAF_WATERMARK, 0);
1095  		if (qspi->soc_intc) {
1096  			/*
1097  			 * clear soc MSPI and BSPI interrupts and enable
1098  			 * BSPI interrupts.
1099  			 */
1100  			soc_intc->bcm_qspi_int_ack(soc_intc, MSPI_BSPI_DONE);
1101  			soc_intc->bcm_qspi_int_set(soc_intc, BSPI_DONE, true);
1102  		}
1103  
1104  		/* Must flush previous writes before starting BSPI operation */
1105  		mb();
1106  		bcm_qspi_bspi_lr_start(qspi);
1107  		if (!wait_for_completion_timeout(&qspi->bspi_done, timeo)) {
1108  			dev_err(&qspi->pdev->dev, "timeout waiting for BSPI\n");
1109  			ret = -ETIMEDOUT;
1110  			break;
1111  		}
1112  
1113  		/* set msg return length */
1114  		addr += rdlen;
1115  		len -= rdlen;
1116  	} while (len);
1117  
1118  	return ret;
1119  }
1120  
bcm_qspi_transfer_one(struct spi_controller * host,struct spi_device * spi,struct spi_transfer * trans)1121  static int bcm_qspi_transfer_one(struct spi_controller *host,
1122  				 struct spi_device *spi,
1123  				 struct spi_transfer *trans)
1124  {
1125  	struct bcm_qspi *qspi = spi_controller_get_devdata(host);
1126  	int slots;
1127  	unsigned long timeo = msecs_to_jiffies(100);
1128  
1129  	if (!spi_get_csgpiod(spi, 0))
1130  		bcm_qspi_chip_select(qspi, spi_get_chipselect(spi, 0));
1131  	qspi->trans_pos.trans = trans;
1132  	qspi->trans_pos.byte = 0;
1133  
1134  	while (qspi->trans_pos.byte < trans->len) {
1135  		reinit_completion(&qspi->mspi_done);
1136  
1137  		slots = write_to_hw(qspi, spi);
1138  		if (!wait_for_completion_timeout(&qspi->mspi_done, timeo)) {
1139  			dev_err(&qspi->pdev->dev, "timeout waiting for MSPI\n");
1140  			return -ETIMEDOUT;
1141  		}
1142  
1143  		read_from_hw(qspi, slots);
1144  	}
1145  	bcm_qspi_enable_bspi(qspi);
1146  
1147  	return 0;
1148  }
1149  
bcm_qspi_mspi_exec_mem_op(struct spi_device * spi,const struct spi_mem_op * op)1150  static int bcm_qspi_mspi_exec_mem_op(struct spi_device *spi,
1151  				     const struct spi_mem_op *op)
1152  {
1153  	struct spi_controller *host = spi->controller;
1154  	struct bcm_qspi *qspi = spi_controller_get_devdata(host);
1155  	struct spi_transfer t[2];
1156  	u8 cmd[6] = { };
1157  	int ret, i;
1158  
1159  	memset(cmd, 0, sizeof(cmd));
1160  	memset(t, 0, sizeof(t));
1161  
1162  	/* tx */
1163  	/* opcode is in cmd[0] */
1164  	cmd[0] = op->cmd.opcode;
1165  	for (i = 0; i < op->addr.nbytes; i++)
1166  		cmd[1 + i] = op->addr.val >> (8 * (op->addr.nbytes - i - 1));
1167  
1168  	t[0].tx_buf = cmd;
1169  	t[0].len = op->addr.nbytes + op->dummy.nbytes + 1;
1170  	t[0].bits_per_word = spi->bits_per_word;
1171  	t[0].tx_nbits = op->cmd.buswidth;
1172  	/* lets mspi know that this is not last transfer */
1173  	qspi->trans_pos.mspi_last_trans = false;
1174  	ret = bcm_qspi_transfer_one(host, spi, &t[0]);
1175  
1176  	/* rx */
1177  	qspi->trans_pos.mspi_last_trans = true;
1178  	if (!ret) {
1179  		/* rx */
1180  		t[1].rx_buf = op->data.buf.in;
1181  		t[1].len = op->data.nbytes;
1182  		t[1].rx_nbits =  op->data.buswidth;
1183  		t[1].bits_per_word = spi->bits_per_word;
1184  		ret = bcm_qspi_transfer_one(host, spi, &t[1]);
1185  	}
1186  
1187  	return ret;
1188  }
1189  
bcm_qspi_exec_mem_op(struct spi_mem * mem,const struct spi_mem_op * op)1190  static int bcm_qspi_exec_mem_op(struct spi_mem *mem,
1191  				const struct spi_mem_op *op)
1192  {
1193  	struct spi_device *spi = mem->spi;
1194  	struct bcm_qspi *qspi = spi_controller_get_devdata(spi->controller);
1195  	int ret = 0;
1196  	bool mspi_read = false;
1197  	u32 addr = 0, len;
1198  	u_char *buf;
1199  
1200  	if (!op->data.nbytes || !op->addr.nbytes || op->addr.nbytes > 4 ||
1201  	    op->data.dir != SPI_MEM_DATA_IN)
1202  		return -ENOTSUPP;
1203  
1204  	buf = op->data.buf.in;
1205  	addr = op->addr.val;
1206  	len = op->data.nbytes;
1207  
1208  	if (has_bspi(qspi) && bcm_qspi_bspi_ver_three(qspi) == true) {
1209  		/*
1210  		 * The address coming into this function is a raw flash offset.
1211  		 * But for BSPI <= V3, we need to convert it to a remapped BSPI
1212  		 * address. If it crosses a 4MB boundary, just revert back to
1213  		 * using MSPI.
1214  		 */
1215  		addr = (addr + 0xc00000) & 0xffffff;
1216  
1217  		if ((~ADDR_4MB_MASK & addr) ^
1218  		    (~ADDR_4MB_MASK & (addr + len - 1)))
1219  			mspi_read = true;
1220  	}
1221  
1222  	/* non-aligned and very short transfers are handled by MSPI */
1223  	if (!IS_ALIGNED((uintptr_t)addr, 4) || !IS_ALIGNED((uintptr_t)buf, 4) ||
1224  	    len < 4 || op->cmd.opcode == SPINOR_OP_RDSFDP)
1225  		mspi_read = true;
1226  
1227  	if (!has_bspi(qspi) || mspi_read)
1228  		return bcm_qspi_mspi_exec_mem_op(spi, op);
1229  
1230  	ret = bcm_qspi_bspi_set_mode(qspi, op, 0);
1231  
1232  	if (!ret)
1233  		ret = bcm_qspi_bspi_exec_mem_op(spi, op);
1234  
1235  	return ret;
1236  }
1237  
bcm_qspi_cleanup(struct spi_device * spi)1238  static void bcm_qspi_cleanup(struct spi_device *spi)
1239  {
1240  	struct bcm_qspi_parms *xp = spi_get_ctldata(spi);
1241  
1242  	kfree(xp);
1243  }
1244  
bcm_qspi_mspi_l2_isr(int irq,void * dev_id)1245  static irqreturn_t bcm_qspi_mspi_l2_isr(int irq, void *dev_id)
1246  {
1247  	struct bcm_qspi_dev_id *qspi_dev_id = dev_id;
1248  	struct bcm_qspi *qspi = qspi_dev_id->dev;
1249  	u32 status = bcm_qspi_read(qspi, MSPI, MSPI_MSPI_STATUS);
1250  
1251  	if (status & MSPI_MSPI_STATUS_SPIF) {
1252  		struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
1253  		/* clear interrupt */
1254  		status &= ~MSPI_MSPI_STATUS_SPIF;
1255  		bcm_qspi_write(qspi, MSPI, MSPI_MSPI_STATUS, status);
1256  		if (qspi->soc_intc)
1257  			soc_intc->bcm_qspi_int_ack(soc_intc, MSPI_DONE);
1258  		complete(&qspi->mspi_done);
1259  		return IRQ_HANDLED;
1260  	}
1261  
1262  	return IRQ_NONE;
1263  }
1264  
bcm_qspi_bspi_lr_l2_isr(int irq,void * dev_id)1265  static irqreturn_t bcm_qspi_bspi_lr_l2_isr(int irq, void *dev_id)
1266  {
1267  	struct bcm_qspi_dev_id *qspi_dev_id = dev_id;
1268  	struct bcm_qspi *qspi = qspi_dev_id->dev;
1269  	struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
1270  	u32 status = qspi_dev_id->irqp->mask;
1271  
1272  	if (qspi->bspi_enabled && qspi->bspi_rf_op) {
1273  		bcm_qspi_bspi_lr_data_read(qspi);
1274  		if (qspi->bspi_rf_op_len == 0) {
1275  			qspi->bspi_rf_op = NULL;
1276  			if (qspi->soc_intc) {
1277  				/* disable soc BSPI interrupt */
1278  				soc_intc->bcm_qspi_int_set(soc_intc, BSPI_DONE,
1279  							   false);
1280  				/* indicate done */
1281  				status = INTR_BSPI_LR_SESSION_DONE_MASK;
1282  			}
1283  
1284  			if (qspi->bspi_rf_op_status)
1285  				bcm_qspi_bspi_lr_clear(qspi);
1286  			else
1287  				bcm_qspi_bspi_flush_prefetch_buffers(qspi);
1288  		}
1289  
1290  		if (qspi->soc_intc)
1291  			/* clear soc BSPI interrupt */
1292  			soc_intc->bcm_qspi_int_ack(soc_intc, BSPI_DONE);
1293  	}
1294  
1295  	status &= INTR_BSPI_LR_SESSION_DONE_MASK;
1296  	if (qspi->bspi_enabled && status && qspi->bspi_rf_op_len == 0)
1297  		complete(&qspi->bspi_done);
1298  
1299  	return IRQ_HANDLED;
1300  }
1301  
bcm_qspi_bspi_lr_err_l2_isr(int irq,void * dev_id)1302  static irqreturn_t bcm_qspi_bspi_lr_err_l2_isr(int irq, void *dev_id)
1303  {
1304  	struct bcm_qspi_dev_id *qspi_dev_id = dev_id;
1305  	struct bcm_qspi *qspi = qspi_dev_id->dev;
1306  	struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
1307  
1308  	dev_err(&qspi->pdev->dev, "BSPI INT error\n");
1309  	qspi->bspi_rf_op_status = -EIO;
1310  	if (qspi->soc_intc)
1311  		/* clear soc interrupt */
1312  		soc_intc->bcm_qspi_int_ack(soc_intc, BSPI_ERR);
1313  
1314  	complete(&qspi->bspi_done);
1315  	return IRQ_HANDLED;
1316  }
1317  
bcm_qspi_l1_isr(int irq,void * dev_id)1318  static irqreturn_t bcm_qspi_l1_isr(int irq, void *dev_id)
1319  {
1320  	struct bcm_qspi_dev_id *qspi_dev_id = dev_id;
1321  	struct bcm_qspi *qspi = qspi_dev_id->dev;
1322  	struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
1323  	irqreturn_t ret = IRQ_NONE;
1324  
1325  	if (soc_intc) {
1326  		u32 status = soc_intc->bcm_qspi_get_int_status(soc_intc);
1327  
1328  		if (status & MSPI_DONE)
1329  			ret = bcm_qspi_mspi_l2_isr(irq, dev_id);
1330  		else if (status & BSPI_DONE)
1331  			ret = bcm_qspi_bspi_lr_l2_isr(irq, dev_id);
1332  		else if (status & BSPI_ERR)
1333  			ret = bcm_qspi_bspi_lr_err_l2_isr(irq, dev_id);
1334  	}
1335  
1336  	return ret;
1337  }
1338  
1339  static const struct bcm_qspi_irq qspi_irq_tab[] = {
1340  	{
1341  		.irq_name = "spi_lr_fullness_reached",
1342  		.irq_handler = bcm_qspi_bspi_lr_l2_isr,
1343  		.mask = INTR_BSPI_LR_FULLNESS_REACHED_MASK,
1344  	},
1345  	{
1346  		.irq_name = "spi_lr_session_aborted",
1347  		.irq_handler = bcm_qspi_bspi_lr_err_l2_isr,
1348  		.mask = INTR_BSPI_LR_SESSION_ABORTED_MASK,
1349  	},
1350  	{
1351  		.irq_name = "spi_lr_impatient",
1352  		.irq_handler = bcm_qspi_bspi_lr_err_l2_isr,
1353  		.mask = INTR_BSPI_LR_IMPATIENT_MASK,
1354  	},
1355  	{
1356  		.irq_name = "spi_lr_session_done",
1357  		.irq_handler = bcm_qspi_bspi_lr_l2_isr,
1358  		.mask = INTR_BSPI_LR_SESSION_DONE_MASK,
1359  	},
1360  #ifdef QSPI_INT_DEBUG
1361  	/* this interrupt is for debug purposes only, dont request irq */
1362  	{
1363  		.irq_name = "spi_lr_overread",
1364  		.irq_handler = bcm_qspi_bspi_lr_err_l2_isr,
1365  		.mask = INTR_BSPI_LR_OVERREAD_MASK,
1366  	},
1367  #endif
1368  	{
1369  		.irq_name = "mspi_done",
1370  		.irq_handler = bcm_qspi_mspi_l2_isr,
1371  		.mask = INTR_MSPI_DONE_MASK,
1372  	},
1373  	{
1374  		.irq_name = "mspi_halted",
1375  		.irq_handler = bcm_qspi_mspi_l2_isr,
1376  		.mask = INTR_MSPI_HALTED_MASK,
1377  	},
1378  	{
1379  		/* single muxed L1 interrupt source */
1380  		.irq_name = "spi_l1_intr",
1381  		.irq_handler = bcm_qspi_l1_isr,
1382  		.irq_source = MUXED_L1,
1383  		.mask = QSPI_INTERRUPTS_ALL,
1384  	},
1385  };
1386  
bcm_qspi_bspi_init(struct bcm_qspi * qspi)1387  static void bcm_qspi_bspi_init(struct bcm_qspi *qspi)
1388  {
1389  	u32 val = 0;
1390  
1391  	val = bcm_qspi_read(qspi, BSPI, BSPI_REVISION_ID);
1392  	qspi->bspi_maj_rev = (val >> 8) & 0xff;
1393  	qspi->bspi_min_rev = val & 0xff;
1394  	if (!(bcm_qspi_bspi_ver_three(qspi))) {
1395  		/* Force mapping of BSPI address -> flash offset */
1396  		bcm_qspi_write(qspi, BSPI, BSPI_BSPI_XOR_VALUE, 0);
1397  		bcm_qspi_write(qspi, BSPI, BSPI_BSPI_XOR_ENABLE, 1);
1398  	}
1399  	qspi->bspi_enabled = 1;
1400  	bcm_qspi_disable_bspi(qspi);
1401  	bcm_qspi_write(qspi, BSPI, BSPI_B0_CTRL, 0);
1402  	bcm_qspi_write(qspi, BSPI, BSPI_B1_CTRL, 0);
1403  }
1404  
bcm_qspi_hw_init(struct bcm_qspi * qspi)1405  static void bcm_qspi_hw_init(struct bcm_qspi *qspi)
1406  {
1407  	struct bcm_qspi_parms parms;
1408  
1409  	bcm_qspi_write(qspi, MSPI, MSPI_SPCR1_LSB, 0);
1410  	bcm_qspi_write(qspi, MSPI, MSPI_SPCR1_MSB, 0);
1411  	bcm_qspi_write(qspi, MSPI, MSPI_NEWQP, 0);
1412  	bcm_qspi_write(qspi, MSPI, MSPI_ENDQP, 0);
1413  	bcm_qspi_write(qspi, MSPI, MSPI_SPCR2, 0x20);
1414  
1415  	parms.mode = SPI_MODE_3;
1416  	parms.bits_per_word = 8;
1417  	parms.speed_hz = qspi->max_speed_hz;
1418  	bcm_qspi_hw_set_parms(qspi, &parms);
1419  
1420  	if (has_bspi(qspi))
1421  		bcm_qspi_bspi_init(qspi);
1422  }
1423  
bcm_qspi_hw_uninit(struct bcm_qspi * qspi)1424  static void bcm_qspi_hw_uninit(struct bcm_qspi *qspi)
1425  {
1426  	u32 status = bcm_qspi_read(qspi, MSPI, MSPI_MSPI_STATUS);
1427  
1428  	bcm_qspi_write(qspi, MSPI, MSPI_SPCR2, 0);
1429  	if (has_bspi(qspi))
1430  		bcm_qspi_write(qspi, MSPI, MSPI_WRITE_LOCK, 0);
1431  
1432  	/* clear interrupt */
1433  	bcm_qspi_write(qspi, MSPI, MSPI_MSPI_STATUS, status & ~1);
1434  }
1435  
1436  static const struct spi_controller_mem_ops bcm_qspi_mem_ops = {
1437  	.exec_op = bcm_qspi_exec_mem_op,
1438  };
1439  
1440  struct bcm_qspi_data {
1441  	bool	has_mspi_rev;
1442  	bool	has_spcr3_sysclk;
1443  };
1444  
1445  static const struct bcm_qspi_data bcm_qspi_no_rev_data = {
1446  	.has_mspi_rev	= false,
1447  	.has_spcr3_sysclk = false,
1448  };
1449  
1450  static const struct bcm_qspi_data bcm_qspi_rev_data = {
1451  	.has_mspi_rev	= true,
1452  	.has_spcr3_sysclk = false,
1453  };
1454  
1455  static const struct bcm_qspi_data bcm_qspi_spcr3_data = {
1456  	.has_mspi_rev	= true,
1457  	.has_spcr3_sysclk = true,
1458  };
1459  
1460  static const struct of_device_id bcm_qspi_of_match[] __maybe_unused = {
1461  	{
1462  		.compatible = "brcm,spi-bcm7445-qspi",
1463  		.data = &bcm_qspi_rev_data,
1464  
1465  	},
1466  	{
1467  		.compatible = "brcm,spi-bcm-qspi",
1468  		.data = &bcm_qspi_no_rev_data,
1469  	},
1470  	{
1471  		.compatible = "brcm,spi-bcm7216-qspi",
1472  		.data = &bcm_qspi_spcr3_data,
1473  	},
1474  	{
1475  		.compatible = "brcm,spi-bcm7278-qspi",
1476  		.data = &bcm_qspi_spcr3_data,
1477  	},
1478  	{},
1479  };
1480  MODULE_DEVICE_TABLE(of, bcm_qspi_of_match);
1481  
bcm_qspi_probe(struct platform_device * pdev,struct bcm_qspi_soc_intc * soc_intc)1482  int bcm_qspi_probe(struct platform_device *pdev,
1483  		   struct bcm_qspi_soc_intc *soc_intc)
1484  {
1485  	const struct of_device_id *of_id = NULL;
1486  	const struct bcm_qspi_data *data;
1487  	struct device *dev = &pdev->dev;
1488  	struct bcm_qspi *qspi;
1489  	struct spi_controller *host;
1490  	struct resource *res;
1491  	int irq, ret = 0, num_ints = 0;
1492  	u32 val;
1493  	u32 rev = 0;
1494  	const char *name = NULL;
1495  	int num_irqs = ARRAY_SIZE(qspi_irq_tab);
1496  
1497  	/* We only support device-tree instantiation */
1498  	if (!dev->of_node)
1499  		return -ENODEV;
1500  
1501  	of_id = of_match_node(bcm_qspi_of_match, dev->of_node);
1502  	if (!of_id)
1503  		return -ENODEV;
1504  
1505  	data = of_id->data;
1506  
1507  	host = devm_spi_alloc_host(dev, sizeof(struct bcm_qspi));
1508  	if (!host) {
1509  		dev_err(dev, "error allocating spi_controller\n");
1510  		return -ENOMEM;
1511  	}
1512  
1513  	qspi = spi_controller_get_devdata(host);
1514  
1515  	qspi->clk = devm_clk_get_optional(&pdev->dev, NULL);
1516  	if (IS_ERR(qspi->clk))
1517  		return PTR_ERR(qspi->clk);
1518  
1519  	qspi->pdev = pdev;
1520  	qspi->trans_pos.trans = NULL;
1521  	qspi->trans_pos.byte = 0;
1522  	qspi->trans_pos.mspi_last_trans = true;
1523  	qspi->host = host;
1524  
1525  	host->bus_num = -1;
1526  	host->mode_bits = SPI_CPHA | SPI_CPOL | SPI_RX_DUAL | SPI_RX_QUAD |
1527  				SPI_3WIRE;
1528  	host->setup = bcm_qspi_setup;
1529  	host->transfer_one = bcm_qspi_transfer_one;
1530  	host->mem_ops = &bcm_qspi_mem_ops;
1531  	host->cleanup = bcm_qspi_cleanup;
1532  	host->dev.of_node = dev->of_node;
1533  	host->num_chipselect = NUM_CHIPSELECT;
1534  	host->use_gpio_descriptors = true;
1535  
1536  	qspi->big_endian = of_device_is_big_endian(dev->of_node);
1537  
1538  	if (!of_property_read_u32(dev->of_node, "num-cs", &val))
1539  		host->num_chipselect = val;
1540  
1541  	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hif_mspi");
1542  	if (!res)
1543  		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1544  						   "mspi");
1545  
1546  	qspi->base[MSPI]  = devm_ioremap_resource(dev, res);
1547  	if (IS_ERR(qspi->base[MSPI]))
1548  		return PTR_ERR(qspi->base[MSPI]);
1549  
1550  	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "bspi");
1551  	if (res) {
1552  		qspi->base[BSPI]  = devm_ioremap_resource(dev, res);
1553  		if (IS_ERR(qspi->base[BSPI]))
1554  			return PTR_ERR(qspi->base[BSPI]);
1555  		qspi->bspi_mode = true;
1556  	} else {
1557  		qspi->bspi_mode = false;
1558  	}
1559  
1560  	dev_info(dev, "using %smspi mode\n", qspi->bspi_mode ? "bspi-" : "");
1561  
1562  	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs_reg");
1563  	if (res) {
1564  		qspi->base[CHIP_SELECT]  = devm_ioremap_resource(dev, res);
1565  		if (IS_ERR(qspi->base[CHIP_SELECT]))
1566  			return PTR_ERR(qspi->base[CHIP_SELECT]);
1567  	}
1568  
1569  	qspi->dev_ids = kcalloc(num_irqs, sizeof(struct bcm_qspi_dev_id),
1570  				GFP_KERNEL);
1571  	if (!qspi->dev_ids)
1572  		return -ENOMEM;
1573  
1574  	/*
1575  	 * Some SoCs integrate spi controller (e.g., its interrupt bits)
1576  	 * in specific ways
1577  	 */
1578  	if (soc_intc) {
1579  		qspi->soc_intc = soc_intc;
1580  		soc_intc->bcm_qspi_int_set(soc_intc, MSPI_DONE, true);
1581  	} else {
1582  		qspi->soc_intc = NULL;
1583  	}
1584  
1585  	if (qspi->clk) {
1586  		ret = clk_prepare_enable(qspi->clk);
1587  		if (ret) {
1588  			dev_err(dev, "failed to prepare clock\n");
1589  			goto qspi_probe_err;
1590  		}
1591  		qspi->base_clk = clk_get_rate(qspi->clk);
1592  	} else {
1593  		qspi->base_clk = MSPI_BASE_FREQ;
1594  	}
1595  
1596  	if (data->has_mspi_rev) {
1597  		rev = bcm_qspi_read(qspi, MSPI, MSPI_REV);
1598  		/* some older revs do not have a MSPI_REV register */
1599  		if ((rev & 0xff) == 0xff)
1600  			rev = 0;
1601  	}
1602  
1603  	qspi->mspi_maj_rev = (rev >> 4) & 0xf;
1604  	qspi->mspi_min_rev = rev & 0xf;
1605  	qspi->mspi_spcr3_sysclk = data->has_spcr3_sysclk;
1606  
1607  	qspi->max_speed_hz = qspi->base_clk / (bcm_qspi_spbr_min(qspi) * 2);
1608  
1609  	/*
1610  	 * On SW resets it is possible to have the mask still enabled
1611  	 * Need to disable the mask and clear the status while we init
1612  	 */
1613  	bcm_qspi_hw_uninit(qspi);
1614  
1615  	for (val = 0; val < num_irqs; val++) {
1616  		irq = -1;
1617  		name = qspi_irq_tab[val].irq_name;
1618  		if (qspi_irq_tab[val].irq_source == SINGLE_L2) {
1619  			/* get the l2 interrupts */
1620  			irq = platform_get_irq_byname_optional(pdev, name);
1621  		} else if (!num_ints && soc_intc) {
1622  			/* all mspi, bspi intrs muxed to one L1 intr */
1623  			irq = platform_get_irq(pdev, 0);
1624  		}
1625  
1626  		if (irq  >= 0) {
1627  			ret = devm_request_irq(&pdev->dev, irq,
1628  					       qspi_irq_tab[val].irq_handler, 0,
1629  					       name,
1630  					       &qspi->dev_ids[val]);
1631  			if (ret < 0) {
1632  				dev_err(&pdev->dev, "IRQ %s not found\n", name);
1633  				goto qspi_unprepare_err;
1634  			}
1635  
1636  			qspi->dev_ids[val].dev = qspi;
1637  			qspi->dev_ids[val].irqp = &qspi_irq_tab[val];
1638  			num_ints++;
1639  			dev_dbg(&pdev->dev, "registered IRQ %s %d\n",
1640  				qspi_irq_tab[val].irq_name,
1641  				irq);
1642  		}
1643  	}
1644  
1645  	if (!num_ints) {
1646  		dev_err(&pdev->dev, "no IRQs registered, cannot init driver\n");
1647  		ret = -EINVAL;
1648  		goto qspi_unprepare_err;
1649  	}
1650  
1651  	bcm_qspi_hw_init(qspi);
1652  	init_completion(&qspi->mspi_done);
1653  	init_completion(&qspi->bspi_done);
1654  	qspi->curr_cs = -1;
1655  
1656  	platform_set_drvdata(pdev, qspi);
1657  
1658  	qspi->xfer_mode.width = -1;
1659  	qspi->xfer_mode.addrlen = -1;
1660  	qspi->xfer_mode.hp = -1;
1661  
1662  	ret = spi_register_controller(host);
1663  	if (ret < 0) {
1664  		dev_err(dev, "can't register host\n");
1665  		goto qspi_reg_err;
1666  	}
1667  
1668  	return 0;
1669  
1670  qspi_reg_err:
1671  	bcm_qspi_hw_uninit(qspi);
1672  qspi_unprepare_err:
1673  	clk_disable_unprepare(qspi->clk);
1674  qspi_probe_err:
1675  	kfree(qspi->dev_ids);
1676  	return ret;
1677  }
1678  /* probe function to be called by SoC specific platform driver probe */
1679  EXPORT_SYMBOL_GPL(bcm_qspi_probe);
1680  
bcm_qspi_remove(struct platform_device * pdev)1681  void bcm_qspi_remove(struct platform_device *pdev)
1682  {
1683  	struct bcm_qspi *qspi = platform_get_drvdata(pdev);
1684  
1685  	spi_unregister_controller(qspi->host);
1686  	bcm_qspi_hw_uninit(qspi);
1687  	clk_disable_unprepare(qspi->clk);
1688  	kfree(qspi->dev_ids);
1689  }
1690  
1691  /* function to be called by SoC specific platform driver remove() */
1692  EXPORT_SYMBOL_GPL(bcm_qspi_remove);
1693  
bcm_qspi_suspend(struct device * dev)1694  static int __maybe_unused bcm_qspi_suspend(struct device *dev)
1695  {
1696  	struct bcm_qspi *qspi = dev_get_drvdata(dev);
1697  
1698  	/* store the override strap value */
1699  	if (!bcm_qspi_bspi_ver_three(qspi))
1700  		qspi->s3_strap_override_ctrl =
1701  			bcm_qspi_read(qspi, BSPI, BSPI_STRAP_OVERRIDE_CTRL);
1702  
1703  	spi_controller_suspend(qspi->host);
1704  	clk_disable_unprepare(qspi->clk);
1705  	bcm_qspi_hw_uninit(qspi);
1706  
1707  	return 0;
1708  };
1709  
bcm_qspi_resume(struct device * dev)1710  static int __maybe_unused bcm_qspi_resume(struct device *dev)
1711  {
1712  	struct bcm_qspi *qspi = dev_get_drvdata(dev);
1713  	int ret = 0;
1714  
1715  	bcm_qspi_hw_init(qspi);
1716  	bcm_qspi_chip_select(qspi, qspi->curr_cs);
1717  	if (qspi->soc_intc)
1718  		/* enable MSPI interrupt */
1719  		qspi->soc_intc->bcm_qspi_int_set(qspi->soc_intc, MSPI_DONE,
1720  						 true);
1721  
1722  	ret = clk_prepare_enable(qspi->clk);
1723  	if (!ret)
1724  		spi_controller_resume(qspi->host);
1725  
1726  	return ret;
1727  }
1728  
1729  SIMPLE_DEV_PM_OPS(bcm_qspi_pm_ops, bcm_qspi_suspend, bcm_qspi_resume);
1730  
1731  /* pm_ops to be called by SoC specific platform driver */
1732  EXPORT_SYMBOL_GPL(bcm_qspi_pm_ops);
1733  
1734  MODULE_AUTHOR("Kamal Dasu");
1735  MODULE_DESCRIPTION("Broadcom QSPI driver");
1736  MODULE_LICENSE("GPL v2");
1737  MODULE_ALIAS("platform:" DRIVER_NAME);
1738