1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) STMicroelectronics 2018 - All Rights Reserved 4 * Author: Ludovic Barre <ludovic.barre@st.com> for STMicroelectronics. 5 */ 6 #include <linux/bitfield.h> 7 #include <linux/clk.h> 8 #include <linux/dmaengine.h> 9 #include <linux/dma-mapping.h> 10 #include <linux/errno.h> 11 #include <linux/io.h> 12 #include <linux/iopoll.h> 13 #include <linux/interrupt.h> 14 #include <linux/module.h> 15 #include <linux/mutex.h> 16 #include <linux/of.h> 17 #include <linux/of_device.h> 18 #include <linux/of_gpio.h> 19 #include <linux/pinctrl/consumer.h> 20 #include <linux/pm_runtime.h> 21 #include <linux/platform_device.h> 22 #include <linux/reset.h> 23 #include <linux/sizes.h> 24 #include <linux/spi/spi-mem.h> 25 26 #define QSPI_CR 0x00 27 #define CR_EN BIT(0) 28 #define CR_ABORT BIT(1) 29 #define CR_DMAEN BIT(2) 30 #define CR_TCEN BIT(3) 31 #define CR_SSHIFT BIT(4) 32 #define CR_DFM BIT(6) 33 #define CR_FSEL BIT(7) 34 #define CR_FTHRES_SHIFT 8 35 #define CR_TEIE BIT(16) 36 #define CR_TCIE BIT(17) 37 #define CR_FTIE BIT(18) 38 #define CR_SMIE BIT(19) 39 #define CR_TOIE BIT(20) 40 #define CR_APMS BIT(22) 41 #define CR_PRESC_MASK GENMASK(31, 24) 42 43 #define QSPI_DCR 0x04 44 #define DCR_FSIZE_MASK GENMASK(20, 16) 45 46 #define QSPI_SR 0x08 47 #define SR_TEF BIT(0) 48 #define SR_TCF BIT(1) 49 #define SR_FTF BIT(2) 50 #define SR_SMF BIT(3) 51 #define SR_TOF BIT(4) 52 #define SR_BUSY BIT(5) 53 #define SR_FLEVEL_MASK GENMASK(13, 8) 54 55 #define QSPI_FCR 0x0c 56 #define FCR_CTEF BIT(0) 57 #define FCR_CTCF BIT(1) 58 #define FCR_CSMF BIT(3) 59 60 #define QSPI_DLR 0x10 61 62 #define QSPI_CCR 0x14 63 #define CCR_INST_MASK GENMASK(7, 0) 64 #define CCR_IMODE_MASK GENMASK(9, 8) 65 #define CCR_ADMODE_MASK GENMASK(11, 10) 66 #define CCR_ADSIZE_MASK GENMASK(13, 12) 67 #define CCR_DCYC_MASK GENMASK(22, 18) 68 #define CCR_DMODE_MASK GENMASK(25, 24) 69 #define CCR_FMODE_MASK GENMASK(27, 26) 70 #define CCR_FMODE_INDW (0U << 26) 71 #define CCR_FMODE_INDR (1U << 26) 72 #define CCR_FMODE_APM (2U << 26) 73 #define CCR_FMODE_MM (3U << 26) 74 #define CCR_BUSWIDTH_0 0x0 75 #define CCR_BUSWIDTH_1 0x1 76 #define CCR_BUSWIDTH_2 0x2 77 #define CCR_BUSWIDTH_4 0x3 78 79 #define QSPI_AR 0x18 80 #define QSPI_ABR 0x1c 81 #define QSPI_DR 0x20 82 #define QSPI_PSMKR 0x24 83 #define QSPI_PSMAR 0x28 84 #define QSPI_PIR 0x2c 85 #define QSPI_LPTR 0x30 86 87 #define STM32_QSPI_MAX_MMAP_SZ SZ_256M 88 #define STM32_QSPI_MAX_NORCHIP 2 89 90 #define STM32_FIFO_TIMEOUT_US 30000 91 #define STM32_BUSY_TIMEOUT_US 100000 92 #define STM32_ABT_TIMEOUT_US 100000 93 #define STM32_COMP_TIMEOUT_MS 1000 94 #define STM32_AUTOSUSPEND_DELAY -1 95 96 struct stm32_qspi_flash { 97 u32 cs; 98 u32 presc; 99 }; 100 101 struct stm32_qspi { 102 struct device *dev; 103 struct spi_controller *ctrl; 104 phys_addr_t phys_base; 105 void __iomem *io_base; 106 void __iomem *mm_base; 107 resource_size_t mm_size; 108 struct clk *clk; 109 u32 clk_rate; 110 struct stm32_qspi_flash flash[STM32_QSPI_MAX_NORCHIP]; 111 struct completion data_completion; 112 struct completion match_completion; 113 u32 fmode; 114 115 struct dma_chan *dma_chtx; 116 struct dma_chan *dma_chrx; 117 struct completion dma_completion; 118 119 u32 cr_reg; 120 u32 dcr_reg; 121 unsigned long status_timeout; 122 123 /* 124 * to protect device configuration, could be different between 125 * 2 flash access (bk1, bk2) 126 */ 127 struct mutex lock; 128 }; 129 130 static irqreturn_t stm32_qspi_irq(int irq, void *dev_id) 131 { 132 struct stm32_qspi *qspi = (struct stm32_qspi *)dev_id; 133 u32 cr, sr; 134 135 cr = readl_relaxed(qspi->io_base + QSPI_CR); 136 sr = readl_relaxed(qspi->io_base + QSPI_SR); 137 138 if (cr & CR_SMIE && sr & SR_SMF) { 139 /* disable irq */ 140 cr &= ~CR_SMIE; 141 writel_relaxed(cr, qspi->io_base + QSPI_CR); 142 complete(&qspi->match_completion); 143 144 return IRQ_HANDLED; 145 } 146 147 if (sr & (SR_TEF | SR_TCF)) { 148 /* disable irq */ 149 cr &= ~CR_TCIE & ~CR_TEIE; 150 writel_relaxed(cr, qspi->io_base + QSPI_CR); 151 complete(&qspi->data_completion); 152 } 153 154 return IRQ_HANDLED; 155 } 156 157 static void stm32_qspi_read_fifo(u8 *val, void __iomem *addr) 158 { 159 *val = readb_relaxed(addr); 160 } 161 162 static void stm32_qspi_write_fifo(u8 *val, void __iomem *addr) 163 { 164 writeb_relaxed(*val, addr); 165 } 166 167 static int stm32_qspi_tx_poll(struct stm32_qspi *qspi, 168 const struct spi_mem_op *op) 169 { 170 void (*tx_fifo)(u8 *val, void __iomem *addr); 171 u32 len = op->data.nbytes, sr; 172 u8 *buf; 173 int ret; 174 175 if (op->data.dir == SPI_MEM_DATA_IN) { 176 tx_fifo = stm32_qspi_read_fifo; 177 buf = op->data.buf.in; 178 179 } else { 180 tx_fifo = stm32_qspi_write_fifo; 181 buf = (u8 *)op->data.buf.out; 182 } 183 184 while (len--) { 185 ret = readl_relaxed_poll_timeout_atomic(qspi->io_base + QSPI_SR, 186 sr, (sr & SR_FTF), 1, 187 STM32_FIFO_TIMEOUT_US); 188 if (ret) { 189 dev_err(qspi->dev, "fifo timeout (len:%d stat:%#x)\n", 190 len, sr); 191 return ret; 192 } 193 tx_fifo(buf++, qspi->io_base + QSPI_DR); 194 } 195 196 return 0; 197 } 198 199 static int stm32_qspi_tx_mm(struct stm32_qspi *qspi, 200 const struct spi_mem_op *op) 201 { 202 memcpy_fromio(op->data.buf.in, qspi->mm_base + op->addr.val, 203 op->data.nbytes); 204 return 0; 205 } 206 207 static void stm32_qspi_dma_callback(void *arg) 208 { 209 struct completion *dma_completion = arg; 210 211 complete(dma_completion); 212 } 213 214 static int stm32_qspi_tx_dma(struct stm32_qspi *qspi, 215 const struct spi_mem_op *op) 216 { 217 struct dma_async_tx_descriptor *desc; 218 enum dma_transfer_direction dma_dir; 219 struct dma_chan *dma_ch; 220 struct sg_table sgt; 221 dma_cookie_t cookie; 222 u32 cr, t_out; 223 int err; 224 225 if (op->data.dir == SPI_MEM_DATA_IN) { 226 dma_dir = DMA_DEV_TO_MEM; 227 dma_ch = qspi->dma_chrx; 228 } else { 229 dma_dir = DMA_MEM_TO_DEV; 230 dma_ch = qspi->dma_chtx; 231 } 232 233 /* 234 * spi_map_buf return -EINVAL if the buffer is not DMA-able 235 * (DMA-able: in vmalloc | kmap | virt_addr_valid) 236 */ 237 err = spi_controller_dma_map_mem_op_data(qspi->ctrl, op, &sgt); 238 if (err) 239 return err; 240 241 desc = dmaengine_prep_slave_sg(dma_ch, sgt.sgl, sgt.nents, 242 dma_dir, DMA_PREP_INTERRUPT); 243 if (!desc) { 244 err = -ENOMEM; 245 goto out_unmap; 246 } 247 248 cr = readl_relaxed(qspi->io_base + QSPI_CR); 249 250 reinit_completion(&qspi->dma_completion); 251 desc->callback = stm32_qspi_dma_callback; 252 desc->callback_param = &qspi->dma_completion; 253 cookie = dmaengine_submit(desc); 254 err = dma_submit_error(cookie); 255 if (err) 256 goto out; 257 258 dma_async_issue_pending(dma_ch); 259 260 writel_relaxed(cr | CR_DMAEN, qspi->io_base + QSPI_CR); 261 262 t_out = sgt.nents * STM32_COMP_TIMEOUT_MS; 263 if (!wait_for_completion_timeout(&qspi->dma_completion, 264 msecs_to_jiffies(t_out))) 265 err = -ETIMEDOUT; 266 267 if (err) 268 dmaengine_terminate_all(dma_ch); 269 270 out: 271 writel_relaxed(cr & ~CR_DMAEN, qspi->io_base + QSPI_CR); 272 out_unmap: 273 spi_controller_dma_unmap_mem_op_data(qspi->ctrl, op, &sgt); 274 275 return err; 276 } 277 278 static int stm32_qspi_tx(struct stm32_qspi *qspi, const struct spi_mem_op *op) 279 { 280 if (!op->data.nbytes) 281 return 0; 282 283 if (qspi->fmode == CCR_FMODE_MM) 284 return stm32_qspi_tx_mm(qspi, op); 285 else if (((op->data.dir == SPI_MEM_DATA_IN && qspi->dma_chrx) || 286 (op->data.dir == SPI_MEM_DATA_OUT && qspi->dma_chtx)) && 287 op->data.nbytes > 4) 288 if (!stm32_qspi_tx_dma(qspi, op)) 289 return 0; 290 291 return stm32_qspi_tx_poll(qspi, op); 292 } 293 294 static int stm32_qspi_wait_nobusy(struct stm32_qspi *qspi) 295 { 296 u32 sr; 297 298 return readl_relaxed_poll_timeout_atomic(qspi->io_base + QSPI_SR, sr, 299 !(sr & SR_BUSY), 1, 300 STM32_BUSY_TIMEOUT_US); 301 } 302 303 static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi) 304 { 305 u32 cr, sr; 306 int err = 0; 307 308 if ((readl_relaxed(qspi->io_base + QSPI_SR) & SR_TCF) || 309 qspi->fmode == CCR_FMODE_APM) 310 goto out; 311 312 reinit_completion(&qspi->data_completion); 313 cr = readl_relaxed(qspi->io_base + QSPI_CR); 314 writel_relaxed(cr | CR_TCIE | CR_TEIE, qspi->io_base + QSPI_CR); 315 316 if (!wait_for_completion_timeout(&qspi->data_completion, 317 msecs_to_jiffies(STM32_COMP_TIMEOUT_MS))) { 318 err = -ETIMEDOUT; 319 } else { 320 sr = readl_relaxed(qspi->io_base + QSPI_SR); 321 if (sr & SR_TEF) 322 err = -EIO; 323 } 324 325 out: 326 /* clear flags */ 327 writel_relaxed(FCR_CTCF | FCR_CTEF, qspi->io_base + QSPI_FCR); 328 if (!err) 329 err = stm32_qspi_wait_nobusy(qspi); 330 331 return err; 332 } 333 334 static int stm32_qspi_wait_poll_status(struct stm32_qspi *qspi) 335 { 336 u32 cr; 337 338 reinit_completion(&qspi->match_completion); 339 cr = readl_relaxed(qspi->io_base + QSPI_CR); 340 writel_relaxed(cr | CR_SMIE, qspi->io_base + QSPI_CR); 341 342 if (!wait_for_completion_timeout(&qspi->match_completion, 343 msecs_to_jiffies(qspi->status_timeout))) 344 return -ETIMEDOUT; 345 346 writel_relaxed(FCR_CSMF, qspi->io_base + QSPI_FCR); 347 348 return 0; 349 } 350 351 static int stm32_qspi_get_mode(u8 buswidth) 352 { 353 if (buswidth == 4) 354 return CCR_BUSWIDTH_4; 355 356 return buswidth; 357 } 358 359 static int stm32_qspi_send(struct spi_device *spi, const struct spi_mem_op *op) 360 { 361 struct stm32_qspi *qspi = spi_controller_get_devdata(spi->master); 362 struct stm32_qspi_flash *flash = &qspi->flash[spi_get_chipselect(spi, 0)]; 363 u32 ccr, cr; 364 int timeout, err = 0, err_poll_status = 0; 365 366 dev_dbg(qspi->dev, "cmd:%#x mode:%d.%d.%d.%d addr:%#llx len:%#x\n", 367 op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth, 368 op->dummy.buswidth, op->data.buswidth, 369 op->addr.val, op->data.nbytes); 370 371 cr = readl_relaxed(qspi->io_base + QSPI_CR); 372 cr &= ~CR_PRESC_MASK & ~CR_FSEL; 373 cr |= FIELD_PREP(CR_PRESC_MASK, flash->presc); 374 cr |= FIELD_PREP(CR_FSEL, flash->cs); 375 writel_relaxed(cr, qspi->io_base + QSPI_CR); 376 377 if (op->data.nbytes) 378 writel_relaxed(op->data.nbytes - 1, 379 qspi->io_base + QSPI_DLR); 380 381 ccr = qspi->fmode; 382 ccr |= FIELD_PREP(CCR_INST_MASK, op->cmd.opcode); 383 ccr |= FIELD_PREP(CCR_IMODE_MASK, 384 stm32_qspi_get_mode(op->cmd.buswidth)); 385 386 if (op->addr.nbytes) { 387 ccr |= FIELD_PREP(CCR_ADMODE_MASK, 388 stm32_qspi_get_mode(op->addr.buswidth)); 389 ccr |= FIELD_PREP(CCR_ADSIZE_MASK, op->addr.nbytes - 1); 390 } 391 392 if (op->dummy.nbytes) 393 ccr |= FIELD_PREP(CCR_DCYC_MASK, 394 op->dummy.nbytes * 8 / op->dummy.buswidth); 395 396 if (op->data.nbytes) { 397 ccr |= FIELD_PREP(CCR_DMODE_MASK, 398 stm32_qspi_get_mode(op->data.buswidth)); 399 } 400 401 writel_relaxed(ccr, qspi->io_base + QSPI_CCR); 402 403 if (op->addr.nbytes && qspi->fmode != CCR_FMODE_MM) 404 writel_relaxed(op->addr.val, qspi->io_base + QSPI_AR); 405 406 if (qspi->fmode == CCR_FMODE_APM) 407 err_poll_status = stm32_qspi_wait_poll_status(qspi); 408 409 err = stm32_qspi_tx(qspi, op); 410 411 /* 412 * Abort in: 413 * -error case 414 * -read memory map: prefetching must be stopped if we read the last 415 * byte of device (device size - fifo size). like device size is not 416 * knows, the prefetching is always stop. 417 */ 418 if (err || err_poll_status || qspi->fmode == CCR_FMODE_MM) 419 goto abort; 420 421 /* wait end of tx in indirect mode */ 422 err = stm32_qspi_wait_cmd(qspi); 423 if (err) 424 goto abort; 425 426 return 0; 427 428 abort: 429 cr = readl_relaxed(qspi->io_base + QSPI_CR) | CR_ABORT; 430 writel_relaxed(cr, qspi->io_base + QSPI_CR); 431 432 /* wait clear of abort bit by hw */ 433 timeout = readl_relaxed_poll_timeout_atomic(qspi->io_base + QSPI_CR, 434 cr, !(cr & CR_ABORT), 1, 435 STM32_ABT_TIMEOUT_US); 436 437 writel_relaxed(FCR_CTCF | FCR_CSMF, qspi->io_base + QSPI_FCR); 438 439 if (err || err_poll_status || timeout) 440 dev_err(qspi->dev, "%s err:%d err_poll_status:%d abort timeout:%d\n", 441 __func__, err, err_poll_status, timeout); 442 443 return err; 444 } 445 446 static int stm32_qspi_poll_status(struct spi_mem *mem, const struct spi_mem_op *op, 447 u16 mask, u16 match, 448 unsigned long initial_delay_us, 449 unsigned long polling_rate_us, 450 unsigned long timeout_ms) 451 { 452 struct stm32_qspi *qspi = spi_controller_get_devdata(mem->spi->master); 453 int ret; 454 455 if (!spi_mem_supports_op(mem, op)) 456 return -EOPNOTSUPP; 457 458 ret = pm_runtime_resume_and_get(qspi->dev); 459 if (ret < 0) 460 return ret; 461 462 mutex_lock(&qspi->lock); 463 464 writel_relaxed(mask, qspi->io_base + QSPI_PSMKR); 465 writel_relaxed(match, qspi->io_base + QSPI_PSMAR); 466 qspi->fmode = CCR_FMODE_APM; 467 qspi->status_timeout = timeout_ms; 468 469 ret = stm32_qspi_send(mem->spi, op); 470 mutex_unlock(&qspi->lock); 471 472 pm_runtime_mark_last_busy(qspi->dev); 473 pm_runtime_put_autosuspend(qspi->dev); 474 475 return ret; 476 } 477 478 static int stm32_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) 479 { 480 struct stm32_qspi *qspi = spi_controller_get_devdata(mem->spi->master); 481 int ret; 482 483 ret = pm_runtime_resume_and_get(qspi->dev); 484 if (ret < 0) 485 return ret; 486 487 mutex_lock(&qspi->lock); 488 if (op->data.dir == SPI_MEM_DATA_IN && op->data.nbytes) 489 qspi->fmode = CCR_FMODE_INDR; 490 else 491 qspi->fmode = CCR_FMODE_INDW; 492 493 ret = stm32_qspi_send(mem->spi, op); 494 mutex_unlock(&qspi->lock); 495 496 pm_runtime_mark_last_busy(qspi->dev); 497 pm_runtime_put_autosuspend(qspi->dev); 498 499 return ret; 500 } 501 502 static int stm32_qspi_dirmap_create(struct spi_mem_dirmap_desc *desc) 503 { 504 struct stm32_qspi *qspi = spi_controller_get_devdata(desc->mem->spi->master); 505 506 if (desc->info.op_tmpl.data.dir == SPI_MEM_DATA_OUT) 507 return -EOPNOTSUPP; 508 509 /* should never happen, as mm_base == null is an error probe exit condition */ 510 if (!qspi->mm_base && desc->info.op_tmpl.data.dir == SPI_MEM_DATA_IN) 511 return -EOPNOTSUPP; 512 513 if (!qspi->mm_size) 514 return -EOPNOTSUPP; 515 516 return 0; 517 } 518 519 static ssize_t stm32_qspi_dirmap_read(struct spi_mem_dirmap_desc *desc, 520 u64 offs, size_t len, void *buf) 521 { 522 struct stm32_qspi *qspi = spi_controller_get_devdata(desc->mem->spi->master); 523 struct spi_mem_op op; 524 u32 addr_max; 525 int ret; 526 527 ret = pm_runtime_resume_and_get(qspi->dev); 528 if (ret < 0) 529 return ret; 530 531 mutex_lock(&qspi->lock); 532 /* make a local copy of desc op_tmpl and complete dirmap rdesc 533 * spi_mem_op template with offs, len and *buf in order to get 534 * all needed transfer information into struct spi_mem_op 535 */ 536 memcpy(&op, &desc->info.op_tmpl, sizeof(struct spi_mem_op)); 537 dev_dbg(qspi->dev, "%s len = 0x%zx offs = 0x%llx buf = 0x%p\n", __func__, len, offs, buf); 538 539 op.data.nbytes = len; 540 op.addr.val = desc->info.offset + offs; 541 op.data.buf.in = buf; 542 543 addr_max = op.addr.val + op.data.nbytes + 1; 544 if (addr_max < qspi->mm_size && op.addr.buswidth) 545 qspi->fmode = CCR_FMODE_MM; 546 else 547 qspi->fmode = CCR_FMODE_INDR; 548 549 ret = stm32_qspi_send(desc->mem->spi, &op); 550 mutex_unlock(&qspi->lock); 551 552 pm_runtime_mark_last_busy(qspi->dev); 553 pm_runtime_put_autosuspend(qspi->dev); 554 555 return ret ?: len; 556 } 557 558 static int stm32_qspi_transfer_one_message(struct spi_controller *ctrl, 559 struct spi_message *msg) 560 { 561 struct stm32_qspi *qspi = spi_controller_get_devdata(ctrl); 562 struct spi_transfer *transfer; 563 struct spi_device *spi = msg->spi; 564 struct spi_mem_op op; 565 int ret = 0; 566 567 if (!spi_get_csgpiod(spi, 0)) 568 return -EOPNOTSUPP; 569 570 ret = pm_runtime_resume_and_get(qspi->dev); 571 if (ret < 0) 572 return ret; 573 574 mutex_lock(&qspi->lock); 575 576 gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), true); 577 578 list_for_each_entry(transfer, &msg->transfers, transfer_list) { 579 u8 dummy_bytes = 0; 580 581 memset(&op, 0, sizeof(op)); 582 583 dev_dbg(qspi->dev, "tx_buf:%p tx_nbits:%d rx_buf:%p rx_nbits:%d len:%d dummy_data:%d\n", 584 transfer->tx_buf, transfer->tx_nbits, 585 transfer->rx_buf, transfer->rx_nbits, 586 transfer->len, transfer->dummy_data); 587 588 /* 589 * QSPI hardware supports dummy bytes transfer. 590 * If current transfer is dummy byte, merge it with the next 591 * transfer in order to take into account QSPI block constraint 592 */ 593 if (transfer->dummy_data) { 594 op.dummy.buswidth = transfer->tx_nbits; 595 op.dummy.nbytes = transfer->len; 596 dummy_bytes = transfer->len; 597 598 /* if happens, means that message is not correctly built */ 599 if (list_is_last(&transfer->transfer_list, &msg->transfers)) { 600 ret = -EINVAL; 601 goto end_of_transfer; 602 } 603 604 transfer = list_next_entry(transfer, transfer_list); 605 } 606 607 op.data.nbytes = transfer->len; 608 609 if (transfer->rx_buf) { 610 qspi->fmode = CCR_FMODE_INDR; 611 op.data.buswidth = transfer->rx_nbits; 612 op.data.dir = SPI_MEM_DATA_IN; 613 op.data.buf.in = transfer->rx_buf; 614 } else { 615 qspi->fmode = CCR_FMODE_INDW; 616 op.data.buswidth = transfer->tx_nbits; 617 op.data.dir = SPI_MEM_DATA_OUT; 618 op.data.buf.out = transfer->tx_buf; 619 } 620 621 ret = stm32_qspi_send(spi, &op); 622 if (ret) 623 goto end_of_transfer; 624 625 msg->actual_length += transfer->len + dummy_bytes; 626 } 627 628 end_of_transfer: 629 gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), false); 630 631 mutex_unlock(&qspi->lock); 632 633 msg->status = ret; 634 spi_finalize_current_message(ctrl); 635 636 pm_runtime_mark_last_busy(qspi->dev); 637 pm_runtime_put_autosuspend(qspi->dev); 638 639 return ret; 640 } 641 642 static int stm32_qspi_setup(struct spi_device *spi) 643 { 644 struct spi_controller *ctrl = spi->master; 645 struct stm32_qspi *qspi = spi_controller_get_devdata(ctrl); 646 struct stm32_qspi_flash *flash; 647 u32 presc, mode; 648 int ret; 649 650 if (ctrl->busy) 651 return -EBUSY; 652 653 if (!spi->max_speed_hz) 654 return -EINVAL; 655 656 mode = spi->mode & (SPI_TX_OCTAL | SPI_RX_OCTAL); 657 if ((mode == SPI_TX_OCTAL || mode == SPI_RX_OCTAL) || 658 ((mode == (SPI_TX_OCTAL | SPI_RX_OCTAL)) && 659 gpiod_count(qspi->dev, "cs") == -ENOENT)) { 660 dev_err(qspi->dev, "spi-rx-bus-width\\/spi-tx-bus-width\\/cs-gpios\n"); 661 dev_err(qspi->dev, "configuration not supported\n"); 662 663 return -EINVAL; 664 } 665 666 ret = pm_runtime_resume_and_get(qspi->dev); 667 if (ret < 0) 668 return ret; 669 670 presc = DIV_ROUND_UP(qspi->clk_rate, spi->max_speed_hz) - 1; 671 672 flash = &qspi->flash[spi_get_chipselect(spi, 0)]; 673 flash->cs = spi_get_chipselect(spi, 0); 674 flash->presc = presc; 675 676 mutex_lock(&qspi->lock); 677 qspi->cr_reg = CR_APMS | 3 << CR_FTHRES_SHIFT | CR_SSHIFT | CR_EN; 678 679 /* 680 * Dual flash mode is only enable in case SPI_TX_OCTAL and SPI_TX_OCTAL 681 * are both set in spi->mode and "cs-gpios" properties is found in DT 682 */ 683 if (mode == (SPI_TX_OCTAL | SPI_RX_OCTAL)) { 684 qspi->cr_reg |= CR_DFM; 685 dev_dbg(qspi->dev, "Dual flash mode enable"); 686 } 687 688 writel_relaxed(qspi->cr_reg, qspi->io_base + QSPI_CR); 689 690 /* set dcr fsize to max address */ 691 qspi->dcr_reg = DCR_FSIZE_MASK; 692 writel_relaxed(qspi->dcr_reg, qspi->io_base + QSPI_DCR); 693 mutex_unlock(&qspi->lock); 694 695 pm_runtime_mark_last_busy(qspi->dev); 696 pm_runtime_put_autosuspend(qspi->dev); 697 698 return 0; 699 } 700 701 static int stm32_qspi_dma_setup(struct stm32_qspi *qspi) 702 { 703 struct dma_slave_config dma_cfg; 704 struct device *dev = qspi->dev; 705 int ret = 0; 706 707 memset(&dma_cfg, 0, sizeof(dma_cfg)); 708 709 dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 710 dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 711 dma_cfg.src_addr = qspi->phys_base + QSPI_DR; 712 dma_cfg.dst_addr = qspi->phys_base + QSPI_DR; 713 dma_cfg.src_maxburst = 4; 714 dma_cfg.dst_maxburst = 4; 715 716 qspi->dma_chrx = dma_request_chan(dev, "rx"); 717 if (IS_ERR(qspi->dma_chrx)) { 718 ret = PTR_ERR(qspi->dma_chrx); 719 qspi->dma_chrx = NULL; 720 if (ret == -EPROBE_DEFER) 721 goto out; 722 } else { 723 if (dmaengine_slave_config(qspi->dma_chrx, &dma_cfg)) { 724 dev_err(dev, "dma rx config failed\n"); 725 dma_release_channel(qspi->dma_chrx); 726 qspi->dma_chrx = NULL; 727 } 728 } 729 730 qspi->dma_chtx = dma_request_chan(dev, "tx"); 731 if (IS_ERR(qspi->dma_chtx)) { 732 ret = PTR_ERR(qspi->dma_chtx); 733 qspi->dma_chtx = NULL; 734 } else { 735 if (dmaengine_slave_config(qspi->dma_chtx, &dma_cfg)) { 736 dev_err(dev, "dma tx config failed\n"); 737 dma_release_channel(qspi->dma_chtx); 738 qspi->dma_chtx = NULL; 739 } 740 } 741 742 out: 743 init_completion(&qspi->dma_completion); 744 745 if (ret != -EPROBE_DEFER) 746 ret = 0; 747 748 return ret; 749 } 750 751 static void stm32_qspi_dma_free(struct stm32_qspi *qspi) 752 { 753 if (qspi->dma_chtx) 754 dma_release_channel(qspi->dma_chtx); 755 if (qspi->dma_chrx) 756 dma_release_channel(qspi->dma_chrx); 757 } 758 759 /* 760 * no special host constraint, so use default spi_mem_default_supports_op 761 * to check supported mode. 762 */ 763 static const struct spi_controller_mem_ops stm32_qspi_mem_ops = { 764 .exec_op = stm32_qspi_exec_op, 765 .dirmap_create = stm32_qspi_dirmap_create, 766 .dirmap_read = stm32_qspi_dirmap_read, 767 .poll_status = stm32_qspi_poll_status, 768 }; 769 770 static int stm32_qspi_probe(struct platform_device *pdev) 771 { 772 struct device *dev = &pdev->dev; 773 struct spi_controller *ctrl; 774 struct reset_control *rstc; 775 struct stm32_qspi *qspi; 776 struct resource *res; 777 int ret, irq; 778 779 ctrl = devm_spi_alloc_master(dev, sizeof(*qspi)); 780 if (!ctrl) 781 return -ENOMEM; 782 783 qspi = spi_controller_get_devdata(ctrl); 784 qspi->ctrl = ctrl; 785 786 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi"); 787 qspi->io_base = devm_ioremap_resource(dev, res); 788 if (IS_ERR(qspi->io_base)) 789 return PTR_ERR(qspi->io_base); 790 791 qspi->phys_base = res->start; 792 793 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_mm"); 794 qspi->mm_base = devm_ioremap_resource(dev, res); 795 if (IS_ERR(qspi->mm_base)) 796 return PTR_ERR(qspi->mm_base); 797 798 qspi->mm_size = resource_size(res); 799 if (qspi->mm_size > STM32_QSPI_MAX_MMAP_SZ) 800 return -EINVAL; 801 802 irq = platform_get_irq(pdev, 0); 803 if (irq < 0) 804 return irq; 805 806 ret = devm_request_irq(dev, irq, stm32_qspi_irq, 0, 807 dev_name(dev), qspi); 808 if (ret) { 809 dev_err(dev, "failed to request irq\n"); 810 return ret; 811 } 812 813 init_completion(&qspi->data_completion); 814 init_completion(&qspi->match_completion); 815 816 qspi->clk = devm_clk_get(dev, NULL); 817 if (IS_ERR(qspi->clk)) 818 return PTR_ERR(qspi->clk); 819 820 qspi->clk_rate = clk_get_rate(qspi->clk); 821 if (!qspi->clk_rate) 822 return -EINVAL; 823 824 ret = clk_prepare_enable(qspi->clk); 825 if (ret) { 826 dev_err(dev, "can not enable the clock\n"); 827 return ret; 828 } 829 830 rstc = devm_reset_control_get_exclusive(dev, NULL); 831 if (IS_ERR(rstc)) { 832 ret = PTR_ERR(rstc); 833 if (ret == -EPROBE_DEFER) 834 goto err_clk_disable; 835 } else { 836 reset_control_assert(rstc); 837 udelay(2); 838 reset_control_deassert(rstc); 839 } 840 841 qspi->dev = dev; 842 platform_set_drvdata(pdev, qspi); 843 ret = stm32_qspi_dma_setup(qspi); 844 if (ret) 845 goto err_dma_free; 846 847 mutex_init(&qspi->lock); 848 849 ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_OCTAL 850 | SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_OCTAL; 851 ctrl->setup = stm32_qspi_setup; 852 ctrl->bus_num = -1; 853 ctrl->mem_ops = &stm32_qspi_mem_ops; 854 ctrl->use_gpio_descriptors = true; 855 ctrl->transfer_one_message = stm32_qspi_transfer_one_message; 856 ctrl->num_chipselect = STM32_QSPI_MAX_NORCHIP; 857 ctrl->dev.of_node = dev->of_node; 858 859 pm_runtime_set_autosuspend_delay(dev, STM32_AUTOSUSPEND_DELAY); 860 pm_runtime_use_autosuspend(dev); 861 pm_runtime_set_active(dev); 862 pm_runtime_enable(dev); 863 pm_runtime_get_noresume(dev); 864 865 ret = spi_register_master(ctrl); 866 if (ret) 867 goto err_pm_runtime_free; 868 869 pm_runtime_mark_last_busy(dev); 870 pm_runtime_put_autosuspend(dev); 871 872 return 0; 873 874 err_pm_runtime_free: 875 pm_runtime_get_sync(qspi->dev); 876 /* disable qspi */ 877 writel_relaxed(0, qspi->io_base + QSPI_CR); 878 mutex_destroy(&qspi->lock); 879 pm_runtime_put_noidle(qspi->dev); 880 pm_runtime_disable(qspi->dev); 881 pm_runtime_set_suspended(qspi->dev); 882 pm_runtime_dont_use_autosuspend(qspi->dev); 883 err_dma_free: 884 stm32_qspi_dma_free(qspi); 885 err_clk_disable: 886 clk_disable_unprepare(qspi->clk); 887 888 return ret; 889 } 890 891 static void stm32_qspi_remove(struct platform_device *pdev) 892 { 893 struct stm32_qspi *qspi = platform_get_drvdata(pdev); 894 895 pm_runtime_get_sync(qspi->dev); 896 spi_unregister_master(qspi->ctrl); 897 /* disable qspi */ 898 writel_relaxed(0, qspi->io_base + QSPI_CR); 899 stm32_qspi_dma_free(qspi); 900 mutex_destroy(&qspi->lock); 901 pm_runtime_put_noidle(qspi->dev); 902 pm_runtime_disable(qspi->dev); 903 pm_runtime_set_suspended(qspi->dev); 904 pm_runtime_dont_use_autosuspend(qspi->dev); 905 clk_disable_unprepare(qspi->clk); 906 } 907 908 static int __maybe_unused stm32_qspi_runtime_suspend(struct device *dev) 909 { 910 struct stm32_qspi *qspi = dev_get_drvdata(dev); 911 912 clk_disable_unprepare(qspi->clk); 913 914 return 0; 915 } 916 917 static int __maybe_unused stm32_qspi_runtime_resume(struct device *dev) 918 { 919 struct stm32_qspi *qspi = dev_get_drvdata(dev); 920 921 return clk_prepare_enable(qspi->clk); 922 } 923 924 static int __maybe_unused stm32_qspi_suspend(struct device *dev) 925 { 926 pinctrl_pm_select_sleep_state(dev); 927 928 return pm_runtime_force_suspend(dev); 929 } 930 931 static int __maybe_unused stm32_qspi_resume(struct device *dev) 932 { 933 struct stm32_qspi *qspi = dev_get_drvdata(dev); 934 int ret; 935 936 ret = pm_runtime_force_resume(dev); 937 if (ret < 0) 938 return ret; 939 940 pinctrl_pm_select_default_state(dev); 941 942 ret = pm_runtime_resume_and_get(dev); 943 if (ret < 0) 944 return ret; 945 946 writel_relaxed(qspi->cr_reg, qspi->io_base + QSPI_CR); 947 writel_relaxed(qspi->dcr_reg, qspi->io_base + QSPI_DCR); 948 949 pm_runtime_mark_last_busy(dev); 950 pm_runtime_put_autosuspend(dev); 951 952 return 0; 953 } 954 955 static const struct dev_pm_ops stm32_qspi_pm_ops = { 956 SET_RUNTIME_PM_OPS(stm32_qspi_runtime_suspend, 957 stm32_qspi_runtime_resume, NULL) 958 SET_SYSTEM_SLEEP_PM_OPS(stm32_qspi_suspend, stm32_qspi_resume) 959 }; 960 961 static const struct of_device_id stm32_qspi_match[] = { 962 {.compatible = "st,stm32f469-qspi"}, 963 {} 964 }; 965 MODULE_DEVICE_TABLE(of, stm32_qspi_match); 966 967 static struct platform_driver stm32_qspi_driver = { 968 .probe = stm32_qspi_probe, 969 .remove_new = stm32_qspi_remove, 970 .driver = { 971 .name = "stm32-qspi", 972 .of_match_table = stm32_qspi_match, 973 .pm = &stm32_qspi_pm_ops, 974 }, 975 }; 976 module_platform_driver(stm32_qspi_driver); 977 978 MODULE_AUTHOR("Ludovic Barre <ludovic.barre@st.com>"); 979 MODULE_DESCRIPTION("STMicroelectronics STM32 quad spi driver"); 980 MODULE_LICENSE("GPL v2"); 981