1 // SPDX-License-Identifier: GPL-2.0+ 2 3 /* 4 * Freescale QuadSPI driver. 5 * 6 * Copyright (C) 2013 Freescale Semiconductor, Inc. 7 * Copyright (C) 2018 Bootlin 8 * Copyright (C) 2018 exceet electronics GmbH 9 * Copyright (C) 2018 Kontron Electronics GmbH 10 * 11 * Transition to SPI MEM interface: 12 * Authors: 13 * Boris Brezillon <bbrezillon@kernel.org> 14 * Frieder Schrempf <frieder.schrempf@kontron.de> 15 * Yogesh Gaur <yogeshnarayan.gaur@nxp.com> 16 * Suresh Gupta <suresh.gupta@nxp.com> 17 * 18 * Based on the original fsl-quadspi.c spi-nor driver: 19 * Author: Freescale Semiconductor, Inc. 20 * 21 */ 22 23 #include <linux/bitops.h> 24 #include <linux/clk.h> 25 #include <linux/completion.h> 26 #include <linux/delay.h> 27 #include <linux/err.h> 28 #include <linux/errno.h> 29 #include <linux/interrupt.h> 30 #include <linux/io.h> 31 #include <linux/iopoll.h> 32 #include <linux/jiffies.h> 33 #include <linux/kernel.h> 34 #include <linux/module.h> 35 #include <linux/mutex.h> 36 #include <linux/of.h> 37 #include <linux/of_device.h> 38 #include <linux/platform_device.h> 39 #include <linux/pm_qos.h> 40 #include <linux/sizes.h> 41 42 #include <linux/spi/spi.h> 43 #include <linux/spi/spi-mem.h> 44 45 /* 46 * The driver only uses one single LUT entry, that is updated on 47 * each call of exec_op(). Index 0 is preset at boot with a basic 48 * read operation, so let's use the last entry (15). 49 */ 50 #define SEQID_LUT 15 51 52 /* Registers used by the driver */ 53 #define QUADSPI_MCR 0x00 54 #define QUADSPI_MCR_RESERVED_MASK GENMASK(19, 16) 55 #define QUADSPI_MCR_MDIS_MASK BIT(14) 56 #define QUADSPI_MCR_CLR_TXF_MASK BIT(11) 57 #define QUADSPI_MCR_CLR_RXF_MASK BIT(10) 58 #define QUADSPI_MCR_DDR_EN_MASK BIT(7) 59 #define QUADSPI_MCR_END_CFG_MASK GENMASK(3, 2) 60 #define QUADSPI_MCR_SWRSTHD_MASK BIT(1) 61 #define QUADSPI_MCR_SWRSTSD_MASK BIT(0) 62 63 #define QUADSPI_IPCR 0x08 64 #define QUADSPI_IPCR_SEQID(x) ((x) << 24) 65 66 #define QUADSPI_FLSHCR 0x0c 67 #define QUADSPI_FLSHCR_TCSS_MASK GENMASK(3, 0) 68 #define QUADSPI_FLSHCR_TCSH_MASK GENMASK(11, 8) 69 #define QUADSPI_FLSHCR_TDH_MASK GENMASK(17, 16) 70 71 #define QUADSPI_BUF0CR 0x10 72 #define QUADSPI_BUF1CR 0x14 73 #define QUADSPI_BUF2CR 0x18 74 #define QUADSPI_BUFXCR_INVALID_MSTRID 0xe 75 76 #define QUADSPI_BUF3CR 0x1c 77 #define QUADSPI_BUF3CR_ALLMST_MASK BIT(31) 78 #define QUADSPI_BUF3CR_ADATSZ(x) ((x) << 8) 79 #define QUADSPI_BUF3CR_ADATSZ_MASK GENMASK(15, 8) 80 81 #define QUADSPI_BFGENCR 0x20 82 #define QUADSPI_BFGENCR_SEQID(x) ((x) << 12) 83 84 #define QUADSPI_BUF0IND 0x30 85 #define QUADSPI_BUF1IND 0x34 86 #define QUADSPI_BUF2IND 0x38 87 #define QUADSPI_SFAR 0x100 88 89 #define QUADSPI_SMPR 0x108 90 #define QUADSPI_SMPR_DDRSMP_MASK GENMASK(18, 16) 91 #define QUADSPI_SMPR_FSDLY_MASK BIT(6) 92 #define QUADSPI_SMPR_FSPHS_MASK BIT(5) 93 #define QUADSPI_SMPR_HSENA_MASK BIT(0) 94 95 #define QUADSPI_RBCT 0x110 96 #define QUADSPI_RBCT_WMRK_MASK GENMASK(4, 0) 97 #define QUADSPI_RBCT_RXBRD_USEIPS BIT(8) 98 99 #define QUADSPI_TBDR 0x154 100 101 #define QUADSPI_SR 0x15c 102 #define QUADSPI_SR_IP_ACC_MASK BIT(1) 103 #define QUADSPI_SR_AHB_ACC_MASK BIT(2) 104 105 #define QUADSPI_FR 0x160 106 #define QUADSPI_FR_TFF_MASK BIT(0) 107 108 #define QUADSPI_RSER 0x164 109 #define QUADSPI_RSER_TFIE BIT(0) 110 111 #define QUADSPI_SPTRCLR 0x16c 112 #define QUADSPI_SPTRCLR_IPPTRC BIT(8) 113 #define QUADSPI_SPTRCLR_BFPTRC BIT(0) 114 115 #define QUADSPI_SFA1AD 0x180 116 #define QUADSPI_SFA2AD 0x184 117 #define QUADSPI_SFB1AD 0x188 118 #define QUADSPI_SFB2AD 0x18c 119 #define QUADSPI_RBDR(x) (0x200 + ((x) * 4)) 120 121 #define QUADSPI_LUTKEY 0x300 122 #define QUADSPI_LUTKEY_VALUE 0x5AF05AF0 123 124 #define QUADSPI_LCKCR 0x304 125 #define QUADSPI_LCKER_LOCK BIT(0) 126 #define QUADSPI_LCKER_UNLOCK BIT(1) 127 128 #define QUADSPI_LUT_BASE 0x310 129 #define QUADSPI_LUT_OFFSET (SEQID_LUT * 4 * 4) 130 #define QUADSPI_LUT_REG(idx) \ 131 (QUADSPI_LUT_BASE + QUADSPI_LUT_OFFSET + (idx) * 4) 132 133 /* Instruction set for the LUT register */ 134 #define LUT_STOP 0 135 #define LUT_CMD 1 136 #define LUT_ADDR 2 137 #define LUT_DUMMY 3 138 #define LUT_MODE 4 139 #define LUT_MODE2 5 140 #define LUT_MODE4 6 141 #define LUT_FSL_READ 7 142 #define LUT_FSL_WRITE 8 143 #define LUT_JMP_ON_CS 9 144 #define LUT_ADDR_DDR 10 145 #define LUT_MODE_DDR 11 146 #define LUT_MODE2_DDR 12 147 #define LUT_MODE4_DDR 13 148 #define LUT_FSL_READ_DDR 14 149 #define LUT_FSL_WRITE_DDR 15 150 #define LUT_DATA_LEARN 16 151 152 /* 153 * The PAD definitions for LUT register. 154 * 155 * The pad stands for the number of IO lines [0:3]. 156 * For example, the quad read needs four IO lines, 157 * so you should use LUT_PAD(4). 158 */ 159 #define LUT_PAD(x) (fls(x) - 1) 160 161 /* 162 * Macro for constructing the LUT entries with the following 163 * register layout: 164 * 165 * --------------------------------------------------- 166 * | INSTR1 | PAD1 | OPRND1 | INSTR0 | PAD0 | OPRND0 | 167 * --------------------------------------------------- 168 */ 169 #define LUT_DEF(idx, ins, pad, opr) \ 170 ((((ins) << 10) | ((pad) << 8) | (opr)) << (((idx) % 2) * 16)) 171 172 /* Controller needs driver to swap endianness */ 173 #define QUADSPI_QUIRK_SWAP_ENDIAN BIT(0) 174 175 /* Controller needs 4x internal clock */ 176 #define QUADSPI_QUIRK_4X_INT_CLK BIT(1) 177 178 /* 179 * TKT253890, the controller needs the driver to fill the txfifo with 180 * 16 bytes at least to trigger a data transfer, even though the extra 181 * data won't be transferred. 182 */ 183 #define QUADSPI_QUIRK_TKT253890 BIT(2) 184 185 /* TKT245618, the controller cannot wake up from wait mode */ 186 #define QUADSPI_QUIRK_TKT245618 BIT(3) 187 188 /* 189 * Controller adds QSPI_AMBA_BASE (base address of the mapped memory) 190 * internally. No need to add it when setting SFXXAD and SFAR registers 191 */ 192 #define QUADSPI_QUIRK_BASE_INTERNAL BIT(4) 193 194 /* 195 * Controller uses TDH bits in register QUADSPI_FLSHCR. 196 * They need to be set in accordance with the DDR/SDR mode. 197 */ 198 #define QUADSPI_QUIRK_USE_TDH_SETTING BIT(5) 199 200 struct fsl_qspi_devtype_data { 201 unsigned int rxfifo; 202 unsigned int txfifo; 203 int invalid_mstrid; 204 unsigned int ahb_buf_size; 205 unsigned int quirks; 206 bool little_endian; 207 }; 208 209 static const struct fsl_qspi_devtype_data vybrid_data = { 210 .rxfifo = SZ_128, 211 .txfifo = SZ_64, 212 .invalid_mstrid = QUADSPI_BUFXCR_INVALID_MSTRID, 213 .ahb_buf_size = SZ_1K, 214 .quirks = QUADSPI_QUIRK_SWAP_ENDIAN, 215 .little_endian = true, 216 }; 217 218 static const struct fsl_qspi_devtype_data imx6sx_data = { 219 .rxfifo = SZ_128, 220 .txfifo = SZ_512, 221 .invalid_mstrid = QUADSPI_BUFXCR_INVALID_MSTRID, 222 .ahb_buf_size = SZ_1K, 223 .quirks = QUADSPI_QUIRK_4X_INT_CLK | QUADSPI_QUIRK_TKT245618, 224 .little_endian = true, 225 }; 226 227 static const struct fsl_qspi_devtype_data imx7d_data = { 228 .rxfifo = SZ_128, 229 .txfifo = SZ_512, 230 .invalid_mstrid = QUADSPI_BUFXCR_INVALID_MSTRID, 231 .ahb_buf_size = SZ_1K, 232 .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK | 233 QUADSPI_QUIRK_USE_TDH_SETTING, 234 .little_endian = true, 235 }; 236 237 static const struct fsl_qspi_devtype_data imx6ul_data = { 238 .rxfifo = SZ_128, 239 .txfifo = SZ_512, 240 .invalid_mstrid = QUADSPI_BUFXCR_INVALID_MSTRID, 241 .ahb_buf_size = SZ_1K, 242 .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK | 243 QUADSPI_QUIRK_USE_TDH_SETTING, 244 .little_endian = true, 245 }; 246 247 static const struct fsl_qspi_devtype_data ls1021a_data = { 248 .rxfifo = SZ_128, 249 .txfifo = SZ_64, 250 .invalid_mstrid = QUADSPI_BUFXCR_INVALID_MSTRID, 251 .ahb_buf_size = SZ_1K, 252 .quirks = 0, 253 .little_endian = false, 254 }; 255 256 static const struct fsl_qspi_devtype_data ls2080a_data = { 257 .rxfifo = SZ_128, 258 .txfifo = SZ_64, 259 .ahb_buf_size = SZ_1K, 260 .invalid_mstrid = 0x0, 261 .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_BASE_INTERNAL, 262 .little_endian = true, 263 }; 264 265 struct fsl_qspi { 266 void __iomem *iobase; 267 void __iomem *ahb_addr; 268 u32 memmap_phy; 269 struct clk *clk, *clk_en; 270 struct device *dev; 271 struct completion c; 272 const struct fsl_qspi_devtype_data *devtype_data; 273 struct mutex lock; 274 struct pm_qos_request pm_qos_req; 275 int selected; 276 }; 277 278 static inline int needs_swap_endian(struct fsl_qspi *q) 279 { 280 return q->devtype_data->quirks & QUADSPI_QUIRK_SWAP_ENDIAN; 281 } 282 283 static inline int needs_4x_clock(struct fsl_qspi *q) 284 { 285 return q->devtype_data->quirks & QUADSPI_QUIRK_4X_INT_CLK; 286 } 287 288 static inline int needs_fill_txfifo(struct fsl_qspi *q) 289 { 290 return q->devtype_data->quirks & QUADSPI_QUIRK_TKT253890; 291 } 292 293 static inline int needs_wakeup_wait_mode(struct fsl_qspi *q) 294 { 295 return q->devtype_data->quirks & QUADSPI_QUIRK_TKT245618; 296 } 297 298 static inline int needs_amba_base_offset(struct fsl_qspi *q) 299 { 300 return !(q->devtype_data->quirks & QUADSPI_QUIRK_BASE_INTERNAL); 301 } 302 303 static inline int needs_tdh_setting(struct fsl_qspi *q) 304 { 305 return q->devtype_data->quirks & QUADSPI_QUIRK_USE_TDH_SETTING; 306 } 307 308 /* 309 * An IC bug makes it necessary to rearrange the 32-bit data. 310 * Later chips, such as IMX6SLX, have fixed this bug. 311 */ 312 static inline u32 fsl_qspi_endian_xchg(struct fsl_qspi *q, u32 a) 313 { 314 return needs_swap_endian(q) ? __swab32(a) : a; 315 } 316 317 /* 318 * R/W functions for big- or little-endian registers: 319 * The QSPI controller's endianness is independent of 320 * the CPU core's endianness. So far, although the CPU 321 * core is little-endian the QSPI controller can use 322 * big-endian or little-endian. 323 */ 324 static void qspi_writel(struct fsl_qspi *q, u32 val, void __iomem *addr) 325 { 326 if (q->devtype_data->little_endian) 327 iowrite32(val, addr); 328 else 329 iowrite32be(val, addr); 330 } 331 332 static u32 qspi_readl(struct fsl_qspi *q, void __iomem *addr) 333 { 334 if (q->devtype_data->little_endian) 335 return ioread32(addr); 336 337 return ioread32be(addr); 338 } 339 340 static irqreturn_t fsl_qspi_irq_handler(int irq, void *dev_id) 341 { 342 struct fsl_qspi *q = dev_id; 343 u32 reg; 344 345 /* clear interrupt */ 346 reg = qspi_readl(q, q->iobase + QUADSPI_FR); 347 qspi_writel(q, reg, q->iobase + QUADSPI_FR); 348 349 if (reg & QUADSPI_FR_TFF_MASK) 350 complete(&q->c); 351 352 dev_dbg(q->dev, "QUADSPI_FR : 0x%.8x:0x%.8x\n", 0, reg); 353 return IRQ_HANDLED; 354 } 355 356 static int fsl_qspi_check_buswidth(struct fsl_qspi *q, u8 width) 357 { 358 switch (width) { 359 case 1: 360 case 2: 361 case 4: 362 return 0; 363 } 364 365 return -ENOTSUPP; 366 } 367 368 static bool fsl_qspi_supports_op(struct spi_mem *mem, 369 const struct spi_mem_op *op) 370 { 371 struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master); 372 int ret; 373 374 ret = fsl_qspi_check_buswidth(q, op->cmd.buswidth); 375 376 if (op->addr.nbytes) 377 ret |= fsl_qspi_check_buswidth(q, op->addr.buswidth); 378 379 if (op->dummy.nbytes) 380 ret |= fsl_qspi_check_buswidth(q, op->dummy.buswidth); 381 382 if (op->data.nbytes) 383 ret |= fsl_qspi_check_buswidth(q, op->data.buswidth); 384 385 if (ret) 386 return false; 387 388 /* 389 * The number of instructions needed for the op, needs 390 * to fit into a single LUT entry. 391 */ 392 if (op->addr.nbytes + 393 (op->dummy.nbytes ? 1:0) + 394 (op->data.nbytes ? 1:0) > 6) 395 return false; 396 397 /* Max 64 dummy clock cycles supported */ 398 if (op->dummy.nbytes && 399 (op->dummy.nbytes * 8 / op->dummy.buswidth > 64)) 400 return false; 401 402 /* Max data length, check controller limits and alignment */ 403 if (op->data.dir == SPI_MEM_DATA_IN && 404 (op->data.nbytes > q->devtype_data->ahb_buf_size || 405 (op->data.nbytes > q->devtype_data->rxfifo - 4 && 406 !IS_ALIGNED(op->data.nbytes, 8)))) 407 return false; 408 409 if (op->data.dir == SPI_MEM_DATA_OUT && 410 op->data.nbytes > q->devtype_data->txfifo) 411 return false; 412 413 return spi_mem_default_supports_op(mem, op); 414 } 415 416 static void fsl_qspi_prepare_lut(struct fsl_qspi *q, 417 const struct spi_mem_op *op) 418 { 419 void __iomem *base = q->iobase; 420 u32 lutval[4] = {}; 421 int lutidx = 1, i; 422 423 lutval[0] |= LUT_DEF(0, LUT_CMD, LUT_PAD(op->cmd.buswidth), 424 op->cmd.opcode); 425 426 /* 427 * For some unknown reason, using LUT_ADDR doesn't work in some 428 * cases (at least with only one byte long addresses), so 429 * let's use LUT_MODE to write the address bytes one by one 430 */ 431 for (i = 0; i < op->addr.nbytes; i++) { 432 u8 addrbyte = op->addr.val >> (8 * (op->addr.nbytes - i - 1)); 433 434 lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_MODE, 435 LUT_PAD(op->addr.buswidth), 436 addrbyte); 437 lutidx++; 438 } 439 440 if (op->dummy.nbytes) { 441 lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_DUMMY, 442 LUT_PAD(op->dummy.buswidth), 443 op->dummy.nbytes * 8 / 444 op->dummy.buswidth); 445 lutidx++; 446 } 447 448 if (op->data.nbytes) { 449 lutval[lutidx / 2] |= LUT_DEF(lutidx, 450 op->data.dir == SPI_MEM_DATA_IN ? 451 LUT_FSL_READ : LUT_FSL_WRITE, 452 LUT_PAD(op->data.buswidth), 453 0); 454 lutidx++; 455 } 456 457 lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_STOP, 0, 0); 458 459 /* unlock LUT */ 460 qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY); 461 qspi_writel(q, QUADSPI_LCKER_UNLOCK, q->iobase + QUADSPI_LCKCR); 462 463 /* fill LUT */ 464 for (i = 0; i < ARRAY_SIZE(lutval); i++) 465 qspi_writel(q, lutval[i], base + QUADSPI_LUT_REG(i)); 466 467 /* lock LUT */ 468 qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY); 469 qspi_writel(q, QUADSPI_LCKER_LOCK, q->iobase + QUADSPI_LCKCR); 470 } 471 472 static int fsl_qspi_clk_prep_enable(struct fsl_qspi *q) 473 { 474 int ret; 475 476 ret = clk_prepare_enable(q->clk_en); 477 if (ret) 478 return ret; 479 480 ret = clk_prepare_enable(q->clk); 481 if (ret) { 482 clk_disable_unprepare(q->clk_en); 483 return ret; 484 } 485 486 if (needs_wakeup_wait_mode(q)) 487 cpu_latency_qos_add_request(&q->pm_qos_req, 0); 488 489 return 0; 490 } 491 492 static void fsl_qspi_clk_disable_unprep(struct fsl_qspi *q) 493 { 494 if (needs_wakeup_wait_mode(q)) 495 cpu_latency_qos_remove_request(&q->pm_qos_req); 496 497 clk_disable_unprepare(q->clk); 498 clk_disable_unprepare(q->clk_en); 499 } 500 501 /* 502 * If we have changed the content of the flash by writing or erasing, or if we 503 * read from flash with a different offset into the page buffer, we need to 504 * invalidate the AHB buffer. If we do not do so, we may read out the wrong 505 * data. The spec tells us reset the AHB domain and Serial Flash domain at 506 * the same time. 507 */ 508 static void fsl_qspi_invalidate(struct fsl_qspi *q) 509 { 510 u32 reg; 511 512 reg = qspi_readl(q, q->iobase + QUADSPI_MCR); 513 reg |= QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK; 514 qspi_writel(q, reg, q->iobase + QUADSPI_MCR); 515 516 /* 517 * The minimum delay : 1 AHB + 2 SFCK clocks. 518 * Delay 1 us is enough. 519 */ 520 udelay(1); 521 522 reg &= ~(QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK); 523 qspi_writel(q, reg, q->iobase + QUADSPI_MCR); 524 } 525 526 static void fsl_qspi_select_mem(struct fsl_qspi *q, struct spi_device *spi) 527 { 528 unsigned long rate = spi->max_speed_hz; 529 int ret; 530 531 if (q->selected == spi->chip_select) 532 return; 533 534 if (needs_4x_clock(q)) 535 rate *= 4; 536 537 fsl_qspi_clk_disable_unprep(q); 538 539 ret = clk_set_rate(q->clk, rate); 540 if (ret) 541 return; 542 543 ret = fsl_qspi_clk_prep_enable(q); 544 if (ret) 545 return; 546 547 q->selected = spi->chip_select; 548 549 fsl_qspi_invalidate(q); 550 } 551 552 static void fsl_qspi_read_ahb(struct fsl_qspi *q, const struct spi_mem_op *op) 553 { 554 memcpy_fromio(op->data.buf.in, 555 q->ahb_addr + q->selected * q->devtype_data->ahb_buf_size, 556 op->data.nbytes); 557 } 558 559 static void fsl_qspi_fill_txfifo(struct fsl_qspi *q, 560 const struct spi_mem_op *op) 561 { 562 void __iomem *base = q->iobase; 563 int i; 564 u32 val; 565 566 for (i = 0; i < ALIGN_DOWN(op->data.nbytes, 4); i += 4) { 567 memcpy(&val, op->data.buf.out + i, 4); 568 val = fsl_qspi_endian_xchg(q, val); 569 qspi_writel(q, val, base + QUADSPI_TBDR); 570 } 571 572 if (i < op->data.nbytes) { 573 memcpy(&val, op->data.buf.out + i, op->data.nbytes - i); 574 val = fsl_qspi_endian_xchg(q, val); 575 qspi_writel(q, val, base + QUADSPI_TBDR); 576 } 577 578 if (needs_fill_txfifo(q)) { 579 for (i = op->data.nbytes; i < 16; i += 4) 580 qspi_writel(q, 0, base + QUADSPI_TBDR); 581 } 582 } 583 584 static void fsl_qspi_read_rxfifo(struct fsl_qspi *q, 585 const struct spi_mem_op *op) 586 { 587 void __iomem *base = q->iobase; 588 int i; 589 u8 *buf = op->data.buf.in; 590 u32 val; 591 592 for (i = 0; i < ALIGN_DOWN(op->data.nbytes, 4); i += 4) { 593 val = qspi_readl(q, base + QUADSPI_RBDR(i / 4)); 594 val = fsl_qspi_endian_xchg(q, val); 595 memcpy(buf + i, &val, 4); 596 } 597 598 if (i < op->data.nbytes) { 599 val = qspi_readl(q, base + QUADSPI_RBDR(i / 4)); 600 val = fsl_qspi_endian_xchg(q, val); 601 memcpy(buf + i, &val, op->data.nbytes - i); 602 } 603 } 604 605 static int fsl_qspi_do_op(struct fsl_qspi *q, const struct spi_mem_op *op) 606 { 607 void __iomem *base = q->iobase; 608 int err = 0; 609 610 init_completion(&q->c); 611 612 /* 613 * Always start the sequence at the same index since we update 614 * the LUT at each exec_op() call. And also specify the DATA 615 * length, since it's has not been specified in the LUT. 616 */ 617 qspi_writel(q, op->data.nbytes | QUADSPI_IPCR_SEQID(SEQID_LUT), 618 base + QUADSPI_IPCR); 619 620 /* Wait for the interrupt. */ 621 if (!wait_for_completion_timeout(&q->c, msecs_to_jiffies(1000))) 622 err = -ETIMEDOUT; 623 624 if (!err && op->data.nbytes && op->data.dir == SPI_MEM_DATA_IN) 625 fsl_qspi_read_rxfifo(q, op); 626 627 return err; 628 } 629 630 static int fsl_qspi_readl_poll_tout(struct fsl_qspi *q, void __iomem *base, 631 u32 mask, u32 delay_us, u32 timeout_us) 632 { 633 u32 reg; 634 635 if (!q->devtype_data->little_endian) 636 mask = (u32)cpu_to_be32(mask); 637 638 return readl_poll_timeout(base, reg, !(reg & mask), delay_us, 639 timeout_us); 640 } 641 642 static int fsl_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) 643 { 644 struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master); 645 void __iomem *base = q->iobase; 646 u32 addr_offset = 0; 647 int err = 0; 648 int invalid_mstrid = q->devtype_data->invalid_mstrid; 649 650 mutex_lock(&q->lock); 651 652 /* wait for the controller being ready */ 653 fsl_qspi_readl_poll_tout(q, base + QUADSPI_SR, (QUADSPI_SR_IP_ACC_MASK | 654 QUADSPI_SR_AHB_ACC_MASK), 10, 1000); 655 656 fsl_qspi_select_mem(q, mem->spi); 657 658 if (needs_amba_base_offset(q)) 659 addr_offset = q->memmap_phy; 660 661 qspi_writel(q, 662 q->selected * q->devtype_data->ahb_buf_size + addr_offset, 663 base + QUADSPI_SFAR); 664 665 qspi_writel(q, qspi_readl(q, base + QUADSPI_MCR) | 666 QUADSPI_MCR_CLR_RXF_MASK | QUADSPI_MCR_CLR_TXF_MASK, 667 base + QUADSPI_MCR); 668 669 qspi_writel(q, QUADSPI_SPTRCLR_BFPTRC | QUADSPI_SPTRCLR_IPPTRC, 670 base + QUADSPI_SPTRCLR); 671 672 qspi_writel(q, invalid_mstrid, base + QUADSPI_BUF0CR); 673 qspi_writel(q, invalid_mstrid, base + QUADSPI_BUF1CR); 674 qspi_writel(q, invalid_mstrid, base + QUADSPI_BUF2CR); 675 676 fsl_qspi_prepare_lut(q, op); 677 678 /* 679 * If we have large chunks of data, we read them through the AHB bus 680 * by accessing the mapped memory. In all other cases we use 681 * IP commands to access the flash. 682 */ 683 if (op->data.nbytes > (q->devtype_data->rxfifo - 4) && 684 op->data.dir == SPI_MEM_DATA_IN) { 685 fsl_qspi_read_ahb(q, op); 686 } else { 687 qspi_writel(q, QUADSPI_RBCT_WMRK_MASK | 688 QUADSPI_RBCT_RXBRD_USEIPS, base + QUADSPI_RBCT); 689 690 if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT) 691 fsl_qspi_fill_txfifo(q, op); 692 693 err = fsl_qspi_do_op(q, op); 694 } 695 696 /* Invalidate the data in the AHB buffer. */ 697 fsl_qspi_invalidate(q); 698 699 mutex_unlock(&q->lock); 700 701 return err; 702 } 703 704 static int fsl_qspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) 705 { 706 struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master); 707 708 if (op->data.dir == SPI_MEM_DATA_OUT) { 709 if (op->data.nbytes > q->devtype_data->txfifo) 710 op->data.nbytes = q->devtype_data->txfifo; 711 } else { 712 if (op->data.nbytes > q->devtype_data->ahb_buf_size) 713 op->data.nbytes = q->devtype_data->ahb_buf_size; 714 else if (op->data.nbytes > (q->devtype_data->rxfifo - 4)) 715 op->data.nbytes = ALIGN_DOWN(op->data.nbytes, 8); 716 } 717 718 return 0; 719 } 720 721 static int fsl_qspi_default_setup(struct fsl_qspi *q) 722 { 723 void __iomem *base = q->iobase; 724 u32 reg, addr_offset = 0; 725 int ret; 726 727 /* disable and unprepare clock to avoid glitch pass to controller */ 728 fsl_qspi_clk_disable_unprep(q); 729 730 /* the default frequency, we will change it later if necessary. */ 731 ret = clk_set_rate(q->clk, 66000000); 732 if (ret) 733 return ret; 734 735 ret = fsl_qspi_clk_prep_enable(q); 736 if (ret) 737 return ret; 738 739 /* Reset the module */ 740 qspi_writel(q, QUADSPI_MCR_SWRSTSD_MASK | QUADSPI_MCR_SWRSTHD_MASK, 741 base + QUADSPI_MCR); 742 udelay(1); 743 744 /* Disable the module */ 745 qspi_writel(q, QUADSPI_MCR_MDIS_MASK | QUADSPI_MCR_RESERVED_MASK, 746 base + QUADSPI_MCR); 747 748 /* 749 * Previous boot stages (BootROM, bootloader) might have used DDR 750 * mode and did not clear the TDH bits. As we currently use SDR mode 751 * only, clear the TDH bits if necessary. 752 */ 753 if (needs_tdh_setting(q)) 754 qspi_writel(q, qspi_readl(q, base + QUADSPI_FLSHCR) & 755 ~QUADSPI_FLSHCR_TDH_MASK, 756 base + QUADSPI_FLSHCR); 757 758 reg = qspi_readl(q, base + QUADSPI_SMPR); 759 qspi_writel(q, reg & ~(QUADSPI_SMPR_FSDLY_MASK 760 | QUADSPI_SMPR_FSPHS_MASK 761 | QUADSPI_SMPR_HSENA_MASK 762 | QUADSPI_SMPR_DDRSMP_MASK), base + QUADSPI_SMPR); 763 764 /* We only use the buffer3 for AHB read */ 765 qspi_writel(q, 0, base + QUADSPI_BUF0IND); 766 qspi_writel(q, 0, base + QUADSPI_BUF1IND); 767 qspi_writel(q, 0, base + QUADSPI_BUF2IND); 768 769 qspi_writel(q, QUADSPI_BFGENCR_SEQID(SEQID_LUT), 770 q->iobase + QUADSPI_BFGENCR); 771 qspi_writel(q, QUADSPI_RBCT_WMRK_MASK, base + QUADSPI_RBCT); 772 qspi_writel(q, QUADSPI_BUF3CR_ALLMST_MASK | 773 QUADSPI_BUF3CR_ADATSZ(q->devtype_data->ahb_buf_size / 8), 774 base + QUADSPI_BUF3CR); 775 776 if (needs_amba_base_offset(q)) 777 addr_offset = q->memmap_phy; 778 779 /* 780 * In HW there can be a maximum of four chips on two buses with 781 * two chip selects on each bus. We use four chip selects in SW 782 * to differentiate between the four chips. 783 * We use ahb_buf_size for each chip and set SFA1AD, SFA2AD, SFB1AD, 784 * SFB2AD accordingly. 785 */ 786 qspi_writel(q, q->devtype_data->ahb_buf_size + addr_offset, 787 base + QUADSPI_SFA1AD); 788 qspi_writel(q, q->devtype_data->ahb_buf_size * 2 + addr_offset, 789 base + QUADSPI_SFA2AD); 790 qspi_writel(q, q->devtype_data->ahb_buf_size * 3 + addr_offset, 791 base + QUADSPI_SFB1AD); 792 qspi_writel(q, q->devtype_data->ahb_buf_size * 4 + addr_offset, 793 base + QUADSPI_SFB2AD); 794 795 q->selected = -1; 796 797 /* Enable the module */ 798 qspi_writel(q, QUADSPI_MCR_RESERVED_MASK | QUADSPI_MCR_END_CFG_MASK, 799 base + QUADSPI_MCR); 800 801 /* clear all interrupt status */ 802 qspi_writel(q, 0xffffffff, q->iobase + QUADSPI_FR); 803 804 /* enable the interrupt */ 805 qspi_writel(q, QUADSPI_RSER_TFIE, q->iobase + QUADSPI_RSER); 806 807 return 0; 808 } 809 810 static const char *fsl_qspi_get_name(struct spi_mem *mem) 811 { 812 struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master); 813 struct device *dev = &mem->spi->dev; 814 const char *name; 815 816 /* 817 * In order to keep mtdparts compatible with the old MTD driver at 818 * mtd/spi-nor/fsl-quadspi.c, we set a custom name derived from the 819 * platform_device of the controller. 820 */ 821 if (of_get_available_child_count(q->dev->of_node) == 1) 822 return dev_name(q->dev); 823 824 name = devm_kasprintf(dev, GFP_KERNEL, 825 "%s-%d", dev_name(q->dev), 826 mem->spi->chip_select); 827 828 if (!name) { 829 dev_err(dev, "failed to get memory for custom flash name\n"); 830 return ERR_PTR(-ENOMEM); 831 } 832 833 return name; 834 } 835 836 static const struct spi_controller_mem_ops fsl_qspi_mem_ops = { 837 .adjust_op_size = fsl_qspi_adjust_op_size, 838 .supports_op = fsl_qspi_supports_op, 839 .exec_op = fsl_qspi_exec_op, 840 .get_name = fsl_qspi_get_name, 841 }; 842 843 static int fsl_qspi_probe(struct platform_device *pdev) 844 { 845 struct spi_controller *ctlr; 846 struct device *dev = &pdev->dev; 847 struct device_node *np = dev->of_node; 848 struct resource *res; 849 struct fsl_qspi *q; 850 int ret; 851 852 ctlr = spi_alloc_master(&pdev->dev, sizeof(*q)); 853 if (!ctlr) 854 return -ENOMEM; 855 856 ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | 857 SPI_TX_DUAL | SPI_TX_QUAD; 858 859 q = spi_controller_get_devdata(ctlr); 860 q->dev = dev; 861 q->devtype_data = of_device_get_match_data(dev); 862 if (!q->devtype_data) { 863 ret = -ENODEV; 864 goto err_put_ctrl; 865 } 866 867 platform_set_drvdata(pdev, q); 868 869 /* find the resources */ 870 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "QuadSPI"); 871 q->iobase = devm_ioremap_resource(dev, res); 872 if (IS_ERR(q->iobase)) { 873 ret = PTR_ERR(q->iobase); 874 goto err_put_ctrl; 875 } 876 877 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 878 "QuadSPI-memory"); 879 q->ahb_addr = devm_ioremap_resource(dev, res); 880 if (IS_ERR(q->ahb_addr)) { 881 ret = PTR_ERR(q->ahb_addr); 882 goto err_put_ctrl; 883 } 884 885 q->memmap_phy = res->start; 886 887 /* find the clocks */ 888 q->clk_en = devm_clk_get(dev, "qspi_en"); 889 if (IS_ERR(q->clk_en)) { 890 ret = PTR_ERR(q->clk_en); 891 goto err_put_ctrl; 892 } 893 894 q->clk = devm_clk_get(dev, "qspi"); 895 if (IS_ERR(q->clk)) { 896 ret = PTR_ERR(q->clk); 897 goto err_put_ctrl; 898 } 899 900 ret = fsl_qspi_clk_prep_enable(q); 901 if (ret) { 902 dev_err(dev, "can not enable the clock\n"); 903 goto err_put_ctrl; 904 } 905 906 /* find the irq */ 907 ret = platform_get_irq(pdev, 0); 908 if (ret < 0) 909 goto err_disable_clk; 910 911 ret = devm_request_irq(dev, ret, 912 fsl_qspi_irq_handler, 0, pdev->name, q); 913 if (ret) { 914 dev_err(dev, "failed to request irq: %d\n", ret); 915 goto err_disable_clk; 916 } 917 918 mutex_init(&q->lock); 919 920 ctlr->bus_num = -1; 921 ctlr->num_chipselect = 4; 922 ctlr->mem_ops = &fsl_qspi_mem_ops; 923 924 fsl_qspi_default_setup(q); 925 926 ctlr->dev.of_node = np; 927 928 ret = devm_spi_register_controller(dev, ctlr); 929 if (ret) 930 goto err_destroy_mutex; 931 932 return 0; 933 934 err_destroy_mutex: 935 mutex_destroy(&q->lock); 936 937 err_disable_clk: 938 fsl_qspi_clk_disable_unprep(q); 939 940 err_put_ctrl: 941 spi_controller_put(ctlr); 942 943 dev_err(dev, "Freescale QuadSPI probe failed\n"); 944 return ret; 945 } 946 947 static int fsl_qspi_remove(struct platform_device *pdev) 948 { 949 struct fsl_qspi *q = platform_get_drvdata(pdev); 950 951 /* disable the hardware */ 952 qspi_writel(q, QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR); 953 qspi_writel(q, 0x0, q->iobase + QUADSPI_RSER); 954 955 fsl_qspi_clk_disable_unprep(q); 956 957 mutex_destroy(&q->lock); 958 959 return 0; 960 } 961 962 static int fsl_qspi_suspend(struct device *dev) 963 { 964 return 0; 965 } 966 967 static int fsl_qspi_resume(struct device *dev) 968 { 969 struct fsl_qspi *q = dev_get_drvdata(dev); 970 971 fsl_qspi_default_setup(q); 972 973 return 0; 974 } 975 976 static const struct of_device_id fsl_qspi_dt_ids[] = { 977 { .compatible = "fsl,vf610-qspi", .data = &vybrid_data, }, 978 { .compatible = "fsl,imx6sx-qspi", .data = &imx6sx_data, }, 979 { .compatible = "fsl,imx7d-qspi", .data = &imx7d_data, }, 980 { .compatible = "fsl,imx6ul-qspi", .data = &imx6ul_data, }, 981 { .compatible = "fsl,ls1021a-qspi", .data = &ls1021a_data, }, 982 { .compatible = "fsl,ls2080a-qspi", .data = &ls2080a_data, }, 983 { /* sentinel */ } 984 }; 985 MODULE_DEVICE_TABLE(of, fsl_qspi_dt_ids); 986 987 static const struct dev_pm_ops fsl_qspi_pm_ops = { 988 .suspend = fsl_qspi_suspend, 989 .resume = fsl_qspi_resume, 990 }; 991 992 static struct platform_driver fsl_qspi_driver = { 993 .driver = { 994 .name = "fsl-quadspi", 995 .of_match_table = fsl_qspi_dt_ids, 996 .pm = &fsl_qspi_pm_ops, 997 }, 998 .probe = fsl_qspi_probe, 999 .remove = fsl_qspi_remove, 1000 }; 1001 module_platform_driver(fsl_qspi_driver); 1002 1003 MODULE_DESCRIPTION("Freescale QuadSPI Controller Driver"); 1004 MODULE_AUTHOR("Freescale Semiconductor Inc."); 1005 MODULE_AUTHOR("Boris Brezillon <bbrezillon@kernel.org>"); 1006 MODULE_AUTHOR("Frieder Schrempf <frieder.schrempf@kontron.de>"); 1007 MODULE_AUTHOR("Yogesh Gaur <yogeshnarayan.gaur@nxp.com>"); 1008 MODULE_AUTHOR("Suresh Gupta <suresh.gupta@nxp.com>"); 1009 MODULE_LICENSE("GPL v2"); 1010