1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Designware SPI core controller driver (refer pxa2xx_spi.c) 4 * 5 * Copyright (c) 2009, Intel Corporation. 6 */ 7 8 #include <linux/dma-mapping.h> 9 #include <linux/interrupt.h> 10 #include <linux/module.h> 11 #include <linux/preempt.h> 12 #include <linux/highmem.h> 13 #include <linux/delay.h> 14 #include <linux/slab.h> 15 #include <linux/spi/spi.h> 16 #include <linux/spi/spi-mem.h> 17 #include <linux/string.h> 18 #include <linux/of.h> 19 20 #include "spi-dw.h" 21 22 #ifdef CONFIG_DEBUG_FS 23 #include <linux/debugfs.h> 24 #endif 25 26 /* Slave spi_device related */ 27 struct chip_data { 28 u32 cr0; 29 u32 rx_sample_dly; /* RX sample delay */ 30 }; 31 32 #ifdef CONFIG_DEBUG_FS 33 34 #define DW_SPI_DBGFS_REG(_name, _off) \ 35 { \ 36 .name = _name, \ 37 .offset = _off, \ 38 } 39 40 static const struct debugfs_reg32 dw_spi_dbgfs_regs[] = { 41 DW_SPI_DBGFS_REG("CTRLR0", DW_SPI_CTRLR0), 42 DW_SPI_DBGFS_REG("CTRLR1", DW_SPI_CTRLR1), 43 DW_SPI_DBGFS_REG("SSIENR", DW_SPI_SSIENR), 44 DW_SPI_DBGFS_REG("SER", DW_SPI_SER), 45 DW_SPI_DBGFS_REG("BAUDR", DW_SPI_BAUDR), 46 DW_SPI_DBGFS_REG("TXFTLR", DW_SPI_TXFTLR), 47 DW_SPI_DBGFS_REG("RXFTLR", DW_SPI_RXFTLR), 48 DW_SPI_DBGFS_REG("TXFLR", DW_SPI_TXFLR), 49 DW_SPI_DBGFS_REG("RXFLR", DW_SPI_RXFLR), 50 DW_SPI_DBGFS_REG("SR", DW_SPI_SR), 51 DW_SPI_DBGFS_REG("IMR", DW_SPI_IMR), 52 DW_SPI_DBGFS_REG("ISR", DW_SPI_ISR), 53 DW_SPI_DBGFS_REG("DMACR", DW_SPI_DMACR), 54 DW_SPI_DBGFS_REG("DMATDLR", DW_SPI_DMATDLR), 55 DW_SPI_DBGFS_REG("DMARDLR", DW_SPI_DMARDLR), 56 DW_SPI_DBGFS_REG("RX_SAMPLE_DLY", DW_SPI_RX_SAMPLE_DLY), 57 }; 58 59 static int dw_spi_debugfs_init(struct dw_spi *dws) 60 { 61 char name[32]; 62 63 snprintf(name, 32, "dw_spi%d", dws->master->bus_num); 64 dws->debugfs = debugfs_create_dir(name, NULL); 65 if (!dws->debugfs) 66 return -ENOMEM; 67 68 dws->regset.regs = dw_spi_dbgfs_regs; 69 dws->regset.nregs = ARRAY_SIZE(dw_spi_dbgfs_regs); 70 dws->regset.base = dws->regs; 71 debugfs_create_regset32("registers", 0400, dws->debugfs, &dws->regset); 72 73 return 0; 74 } 75 76 static void dw_spi_debugfs_remove(struct dw_spi *dws) 77 { 78 debugfs_remove_recursive(dws->debugfs); 79 } 80 81 #else 82 static inline int dw_spi_debugfs_init(struct dw_spi *dws) 83 { 84 return 0; 85 } 86 87 static inline void dw_spi_debugfs_remove(struct dw_spi *dws) 88 { 89 } 90 #endif /* CONFIG_DEBUG_FS */ 91 92 void dw_spi_set_cs(struct spi_device *spi, bool enable) 93 { 94 struct dw_spi *dws = spi_controller_get_devdata(spi->controller); 95 bool cs_high = !!(spi->mode & SPI_CS_HIGH); 96 97 /* 98 * DW SPI controller demands any native CS being set in order to 99 * proceed with data transfer. So in order to activate the SPI 100 * communications we must set a corresponding bit in the Slave 101 * Enable register no matter whether the SPI core is configured to 102 * support active-high or active-low CS level. 103 */ 104 if (cs_high == enable) 105 dw_writel(dws, DW_SPI_SER, BIT(spi->chip_select)); 106 else 107 dw_writel(dws, DW_SPI_SER, 0); 108 } 109 EXPORT_SYMBOL_GPL(dw_spi_set_cs); 110 111 /* Return the max entries we can fill into tx fifo */ 112 static inline u32 tx_max(struct dw_spi *dws) 113 { 114 u32 tx_room, rxtx_gap; 115 116 tx_room = dws->fifo_len - dw_readl(dws, DW_SPI_TXFLR); 117 118 /* 119 * Another concern is about the tx/rx mismatch, we 120 * though to use (dws->fifo_len - rxflr - txflr) as 121 * one maximum value for tx, but it doesn't cover the 122 * data which is out of tx/rx fifo and inside the 123 * shift registers. So a control from sw point of 124 * view is taken. 125 */ 126 rxtx_gap = dws->fifo_len - (dws->rx_len - dws->tx_len); 127 128 return min3((u32)dws->tx_len, tx_room, rxtx_gap); 129 } 130 131 /* Return the max entries we should read out of rx fifo */ 132 static inline u32 rx_max(struct dw_spi *dws) 133 { 134 return min_t(u32, dws->rx_len, dw_readl(dws, DW_SPI_RXFLR)); 135 } 136 137 static void dw_writer(struct dw_spi *dws) 138 { 139 u32 max = tx_max(dws); 140 u32 txw = 0; 141 142 while (max--) { 143 if (dws->tx) { 144 if (dws->n_bytes == 1) 145 txw = *(u8 *)(dws->tx); 146 else if (dws->n_bytes == 2) 147 txw = *(u16 *)(dws->tx); 148 else 149 txw = *(u32 *)(dws->tx); 150 151 dws->tx += dws->n_bytes; 152 } 153 dw_write_io_reg(dws, DW_SPI_DR, txw); 154 --dws->tx_len; 155 } 156 } 157 158 static void dw_reader(struct dw_spi *dws) 159 { 160 u32 max = rx_max(dws); 161 u32 rxw; 162 163 while (max--) { 164 rxw = dw_read_io_reg(dws, DW_SPI_DR); 165 if (dws->rx) { 166 if (dws->n_bytes == 1) 167 *(u8 *)(dws->rx) = rxw; 168 else if (dws->n_bytes == 2) 169 *(u16 *)(dws->rx) = rxw; 170 else 171 *(u32 *)(dws->rx) = rxw; 172 173 dws->rx += dws->n_bytes; 174 } 175 --dws->rx_len; 176 } 177 } 178 179 int dw_spi_check_status(struct dw_spi *dws, bool raw) 180 { 181 u32 irq_status; 182 int ret = 0; 183 184 if (raw) 185 irq_status = dw_readl(dws, DW_SPI_RISR); 186 else 187 irq_status = dw_readl(dws, DW_SPI_ISR); 188 189 if (irq_status & SPI_INT_RXOI) { 190 dev_err(&dws->master->dev, "RX FIFO overflow detected\n"); 191 ret = -EIO; 192 } 193 194 if (irq_status & SPI_INT_RXUI) { 195 dev_err(&dws->master->dev, "RX FIFO underflow detected\n"); 196 ret = -EIO; 197 } 198 199 if (irq_status & SPI_INT_TXOI) { 200 dev_err(&dws->master->dev, "TX FIFO overflow detected\n"); 201 ret = -EIO; 202 } 203 204 /* Generically handle the erroneous situation */ 205 if (ret) { 206 spi_reset_chip(dws); 207 if (dws->master->cur_msg) 208 dws->master->cur_msg->status = ret; 209 } 210 211 return ret; 212 } 213 EXPORT_SYMBOL_GPL(dw_spi_check_status); 214 215 static irqreturn_t dw_spi_transfer_handler(struct dw_spi *dws) 216 { 217 u16 irq_status = dw_readl(dws, DW_SPI_ISR); 218 219 if (dw_spi_check_status(dws, false)) { 220 spi_finalize_current_transfer(dws->master); 221 return IRQ_HANDLED; 222 } 223 224 /* 225 * Read data from the Rx FIFO every time we've got a chance executing 226 * this method. If there is nothing left to receive, terminate the 227 * procedure. Otherwise adjust the Rx FIFO Threshold level if it's a 228 * final stage of the transfer. By doing so we'll get the next IRQ 229 * right when the leftover incoming data is received. 230 */ 231 dw_reader(dws); 232 if (!dws->rx_len) { 233 spi_mask_intr(dws, 0xff); 234 spi_finalize_current_transfer(dws->master); 235 } else if (dws->rx_len <= dw_readl(dws, DW_SPI_RXFTLR)) { 236 dw_writel(dws, DW_SPI_RXFTLR, dws->rx_len - 1); 237 } 238 239 /* 240 * Send data out if Tx FIFO Empty IRQ is received. The IRQ will be 241 * disabled after the data transmission is finished so not to 242 * have the TXE IRQ flood at the final stage of the transfer. 243 */ 244 if (irq_status & SPI_INT_TXEI) { 245 dw_writer(dws); 246 if (!dws->tx_len) 247 spi_mask_intr(dws, SPI_INT_TXEI); 248 } 249 250 return IRQ_HANDLED; 251 } 252 253 static irqreturn_t dw_spi_irq(int irq, void *dev_id) 254 { 255 struct spi_controller *master = dev_id; 256 struct dw_spi *dws = spi_controller_get_devdata(master); 257 u16 irq_status = dw_readl(dws, DW_SPI_ISR) & 0x3f; 258 259 if (!irq_status) 260 return IRQ_NONE; 261 262 if (!master->cur_msg) { 263 spi_mask_intr(dws, 0xff); 264 return IRQ_HANDLED; 265 } 266 267 return dws->transfer_handler(dws); 268 } 269 270 static u32 dw_spi_prepare_cr0(struct dw_spi *dws, struct spi_device *spi) 271 { 272 u32 cr0 = 0; 273 274 if (!(dws->caps & DW_SPI_CAP_DWC_SSI)) { 275 /* CTRLR0[ 5: 4] Frame Format */ 276 cr0 |= SSI_MOTO_SPI << SPI_FRF_OFFSET; 277 278 /* 279 * SPI mode (SCPOL|SCPH) 280 * CTRLR0[ 6] Serial Clock Phase 281 * CTRLR0[ 7] Serial Clock Polarity 282 */ 283 cr0 |= ((spi->mode & SPI_CPOL) ? 1 : 0) << SPI_SCOL_OFFSET; 284 cr0 |= ((spi->mode & SPI_CPHA) ? 1 : 0) << SPI_SCPH_OFFSET; 285 286 /* CTRLR0[11] Shift Register Loop */ 287 cr0 |= ((spi->mode & SPI_LOOP) ? 1 : 0) << SPI_SRL_OFFSET; 288 } else { 289 /* CTRLR0[ 7: 6] Frame Format */ 290 cr0 |= SSI_MOTO_SPI << DWC_SSI_CTRLR0_FRF_OFFSET; 291 292 /* 293 * SPI mode (SCPOL|SCPH) 294 * CTRLR0[ 8] Serial Clock Phase 295 * CTRLR0[ 9] Serial Clock Polarity 296 */ 297 cr0 |= ((spi->mode & SPI_CPOL) ? 1 : 0) << DWC_SSI_CTRLR0_SCPOL_OFFSET; 298 cr0 |= ((spi->mode & SPI_CPHA) ? 1 : 0) << DWC_SSI_CTRLR0_SCPH_OFFSET; 299 300 /* CTRLR0[13] Shift Register Loop */ 301 cr0 |= ((spi->mode & SPI_LOOP) ? 1 : 0) << DWC_SSI_CTRLR0_SRL_OFFSET; 302 303 if (dws->caps & DW_SPI_CAP_KEEMBAY_MST) 304 cr0 |= DWC_SSI_CTRLR0_KEEMBAY_MST; 305 } 306 307 return cr0; 308 } 309 310 void dw_spi_update_config(struct dw_spi *dws, struct spi_device *spi, 311 struct dw_spi_cfg *cfg) 312 { 313 struct chip_data *chip = spi_get_ctldata(spi); 314 u32 cr0 = chip->cr0; 315 u32 speed_hz; 316 u16 clk_div; 317 318 /* CTRLR0[ 4/3: 0] or CTRLR0[ 20: 16] Data Frame Size */ 319 cr0 |= (cfg->dfs - 1) << dws->dfs_offset; 320 321 if (!(dws->caps & DW_SPI_CAP_DWC_SSI)) 322 /* CTRLR0[ 9:8] Transfer Mode */ 323 cr0 |= cfg->tmode << SPI_TMOD_OFFSET; 324 else 325 /* CTRLR0[11:10] Transfer Mode */ 326 cr0 |= cfg->tmode << DWC_SSI_CTRLR0_TMOD_OFFSET; 327 328 dw_writel(dws, DW_SPI_CTRLR0, cr0); 329 330 if (cfg->tmode == SPI_TMOD_EPROMREAD || cfg->tmode == SPI_TMOD_RO) 331 dw_writel(dws, DW_SPI_CTRLR1, cfg->ndf ? cfg->ndf - 1 : 0); 332 333 /* Note DW APB SSI clock divider doesn't support odd numbers */ 334 clk_div = (DIV_ROUND_UP(dws->max_freq, cfg->freq) + 1) & 0xfffe; 335 speed_hz = dws->max_freq / clk_div; 336 337 if (dws->current_freq != speed_hz) { 338 spi_set_clk(dws, clk_div); 339 dws->current_freq = speed_hz; 340 } 341 342 /* Update RX sample delay if required */ 343 if (dws->cur_rx_sample_dly != chip->rx_sample_dly) { 344 dw_writel(dws, DW_SPI_RX_SAMPLE_DLY, chip->rx_sample_dly); 345 dws->cur_rx_sample_dly = chip->rx_sample_dly; 346 } 347 } 348 EXPORT_SYMBOL_GPL(dw_spi_update_config); 349 350 static void dw_spi_irq_setup(struct dw_spi *dws) 351 { 352 u16 level; 353 u8 imask; 354 355 /* 356 * Originally Tx and Rx data lengths match. Rx FIFO Threshold level 357 * will be adjusted at the final stage of the IRQ-based SPI transfer 358 * execution so not to lose the leftover of the incoming data. 359 */ 360 level = min_t(u16, dws->fifo_len / 2, dws->tx_len); 361 dw_writel(dws, DW_SPI_TXFTLR, level); 362 dw_writel(dws, DW_SPI_RXFTLR, level - 1); 363 364 dws->transfer_handler = dw_spi_transfer_handler; 365 366 imask = SPI_INT_TXEI | SPI_INT_TXOI | SPI_INT_RXUI | SPI_INT_RXOI | 367 SPI_INT_RXFI; 368 spi_umask_intr(dws, imask); 369 } 370 371 /* 372 * The iterative procedure of the poll-based transfer is simple: write as much 373 * as possible to the Tx FIFO, wait until the pending to receive data is ready 374 * to be read, read it from the Rx FIFO and check whether the performed 375 * procedure has been successful. 376 * 377 * Note this method the same way as the IRQ-based transfer won't work well for 378 * the SPI devices connected to the controller with native CS due to the 379 * automatic CS assertion/de-assertion. 380 */ 381 static int dw_spi_poll_transfer(struct dw_spi *dws, 382 struct spi_transfer *transfer) 383 { 384 struct spi_delay delay; 385 u16 nbits; 386 int ret; 387 388 delay.unit = SPI_DELAY_UNIT_SCK; 389 nbits = dws->n_bytes * BITS_PER_BYTE; 390 391 do { 392 dw_writer(dws); 393 394 delay.value = nbits * (dws->rx_len - dws->tx_len); 395 spi_delay_exec(&delay, transfer); 396 397 dw_reader(dws); 398 399 ret = dw_spi_check_status(dws, true); 400 if (ret) 401 return ret; 402 } while (dws->rx_len); 403 404 return 0; 405 } 406 407 static int dw_spi_transfer_one(struct spi_controller *master, 408 struct spi_device *spi, struct spi_transfer *transfer) 409 { 410 struct dw_spi *dws = spi_controller_get_devdata(master); 411 struct dw_spi_cfg cfg = { 412 .tmode = SPI_TMOD_TR, 413 .dfs = transfer->bits_per_word, 414 .freq = transfer->speed_hz, 415 }; 416 int ret; 417 418 dws->dma_mapped = 0; 419 dws->n_bytes = DIV_ROUND_UP(transfer->bits_per_word, BITS_PER_BYTE); 420 dws->tx = (void *)transfer->tx_buf; 421 dws->tx_len = transfer->len / dws->n_bytes; 422 dws->rx = transfer->rx_buf; 423 dws->rx_len = dws->tx_len; 424 425 /* Ensure the data above is visible for all CPUs */ 426 smp_mb(); 427 428 spi_enable_chip(dws, 0); 429 430 dw_spi_update_config(dws, spi, &cfg); 431 432 transfer->effective_speed_hz = dws->current_freq; 433 434 /* Check if current transfer is a DMA transaction */ 435 if (master->can_dma && master->can_dma(master, spi, transfer)) 436 dws->dma_mapped = master->cur_msg_mapped; 437 438 /* For poll mode just disable all interrupts */ 439 spi_mask_intr(dws, 0xff); 440 441 if (dws->dma_mapped) { 442 ret = dws->dma_ops->dma_setup(dws, transfer); 443 if (ret) 444 return ret; 445 } 446 447 spi_enable_chip(dws, 1); 448 449 if (dws->dma_mapped) 450 return dws->dma_ops->dma_transfer(dws, transfer); 451 else if (dws->irq == IRQ_NOTCONNECTED) 452 return dw_spi_poll_transfer(dws, transfer); 453 454 dw_spi_irq_setup(dws); 455 456 return 1; 457 } 458 459 static void dw_spi_handle_err(struct spi_controller *master, 460 struct spi_message *msg) 461 { 462 struct dw_spi *dws = spi_controller_get_devdata(master); 463 464 if (dws->dma_mapped) 465 dws->dma_ops->dma_stop(dws); 466 467 spi_reset_chip(dws); 468 } 469 470 static int dw_spi_adjust_mem_op_size(struct spi_mem *mem, struct spi_mem_op *op) 471 { 472 if (op->data.dir == SPI_MEM_DATA_IN) 473 op->data.nbytes = clamp_val(op->data.nbytes, 0, SPI_NDF_MASK + 1); 474 475 return 0; 476 } 477 478 static bool dw_spi_supports_mem_op(struct spi_mem *mem, 479 const struct spi_mem_op *op) 480 { 481 if (op->data.buswidth > 1 || op->addr.buswidth > 1 || 482 op->dummy.buswidth > 1 || op->cmd.buswidth > 1) 483 return false; 484 485 return spi_mem_default_supports_op(mem, op); 486 } 487 488 static int dw_spi_init_mem_buf(struct dw_spi *dws, const struct spi_mem_op *op) 489 { 490 unsigned int i, j, len; 491 u8 *out; 492 493 /* 494 * Calculate the total length of the EEPROM command transfer and 495 * either use the pre-allocated buffer or create a temporary one. 496 */ 497 len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes; 498 if (op->data.dir == SPI_MEM_DATA_OUT) 499 len += op->data.nbytes; 500 501 if (len <= SPI_BUF_SIZE) { 502 out = dws->buf; 503 } else { 504 out = kzalloc(len, GFP_KERNEL); 505 if (!out) 506 return -ENOMEM; 507 } 508 509 /* 510 * Collect the operation code, address and dummy bytes into the single 511 * buffer. If it's a transfer with data to be sent, also copy it into the 512 * single buffer in order to speed the data transmission up. 513 */ 514 for (i = 0; i < op->cmd.nbytes; ++i) 515 out[i] = SPI_GET_BYTE(op->cmd.opcode, op->cmd.nbytes - i - 1); 516 for (j = 0; j < op->addr.nbytes; ++i, ++j) 517 out[i] = SPI_GET_BYTE(op->addr.val, op->addr.nbytes - j - 1); 518 for (j = 0; j < op->dummy.nbytes; ++i, ++j) 519 out[i] = 0x0; 520 521 if (op->data.dir == SPI_MEM_DATA_OUT) 522 memcpy(&out[i], op->data.buf.out, op->data.nbytes); 523 524 dws->n_bytes = 1; 525 dws->tx = out; 526 dws->tx_len = len; 527 if (op->data.dir == SPI_MEM_DATA_IN) { 528 dws->rx = op->data.buf.in; 529 dws->rx_len = op->data.nbytes; 530 } else { 531 dws->rx = NULL; 532 dws->rx_len = 0; 533 } 534 535 return 0; 536 } 537 538 static void dw_spi_free_mem_buf(struct dw_spi *dws) 539 { 540 if (dws->tx != dws->buf) 541 kfree(dws->tx); 542 } 543 544 static int dw_spi_write_then_read(struct dw_spi *dws, struct spi_device *spi) 545 { 546 u32 room, entries, sts; 547 unsigned int len; 548 u8 *buf; 549 550 /* 551 * At initial stage we just pre-fill the Tx FIFO in with no rush, 552 * since native CS hasn't been enabled yet and the automatic data 553 * transmission won't start til we do that. 554 */ 555 len = min(dws->fifo_len, dws->tx_len); 556 buf = dws->tx; 557 while (len--) 558 dw_write_io_reg(dws, DW_SPI_DR, *buf++); 559 560 /* 561 * After setting any bit in the SER register the transmission will 562 * start automatically. We have to keep up with that procedure 563 * otherwise the CS de-assertion will happen whereupon the memory 564 * operation will be pre-terminated. 565 */ 566 len = dws->tx_len - ((void *)buf - dws->tx); 567 dw_spi_set_cs(spi, false); 568 while (len) { 569 entries = readl_relaxed(dws->regs + DW_SPI_TXFLR); 570 if (!entries) { 571 dev_err(&dws->master->dev, "CS de-assertion on Tx\n"); 572 return -EIO; 573 } 574 room = min(dws->fifo_len - entries, len); 575 for (; room; --room, --len) 576 dw_write_io_reg(dws, DW_SPI_DR, *buf++); 577 } 578 579 /* 580 * Data fetching will start automatically if the EEPROM-read mode is 581 * activated. We have to keep up with the incoming data pace to 582 * prevent the Rx FIFO overflow causing the inbound data loss. 583 */ 584 len = dws->rx_len; 585 buf = dws->rx; 586 while (len) { 587 entries = readl_relaxed(dws->regs + DW_SPI_RXFLR); 588 if (!entries) { 589 sts = readl_relaxed(dws->regs + DW_SPI_RISR); 590 if (sts & SPI_INT_RXOI) { 591 dev_err(&dws->master->dev, "FIFO overflow on Rx\n"); 592 return -EIO; 593 } 594 continue; 595 } 596 entries = min(entries, len); 597 for (; entries; --entries, --len) 598 *buf++ = dw_read_io_reg(dws, DW_SPI_DR); 599 } 600 601 return 0; 602 } 603 604 static inline bool dw_spi_ctlr_busy(struct dw_spi *dws) 605 { 606 return dw_readl(dws, DW_SPI_SR) & SR_BUSY; 607 } 608 609 static int dw_spi_wait_mem_op_done(struct dw_spi *dws) 610 { 611 int retry = SPI_WAIT_RETRIES; 612 struct spi_delay delay; 613 unsigned long ns, us; 614 u32 nents; 615 616 nents = dw_readl(dws, DW_SPI_TXFLR); 617 ns = NSEC_PER_SEC / dws->current_freq * nents; 618 ns *= dws->n_bytes * BITS_PER_BYTE; 619 if (ns <= NSEC_PER_USEC) { 620 delay.unit = SPI_DELAY_UNIT_NSECS; 621 delay.value = ns; 622 } else { 623 us = DIV_ROUND_UP(ns, NSEC_PER_USEC); 624 delay.unit = SPI_DELAY_UNIT_USECS; 625 delay.value = clamp_val(us, 0, USHRT_MAX); 626 } 627 628 while (dw_spi_ctlr_busy(dws) && retry--) 629 spi_delay_exec(&delay, NULL); 630 631 if (retry < 0) { 632 dev_err(&dws->master->dev, "Mem op hanged up\n"); 633 return -EIO; 634 } 635 636 return 0; 637 } 638 639 static void dw_spi_stop_mem_op(struct dw_spi *dws, struct spi_device *spi) 640 { 641 spi_enable_chip(dws, 0); 642 dw_spi_set_cs(spi, true); 643 spi_enable_chip(dws, 1); 644 } 645 646 /* 647 * The SPI memory operation implementation below is the best choice for the 648 * devices, which are selected by the native chip-select lane. It's 649 * specifically developed to workaround the problem with automatic chip-select 650 * lane toggle when there is no data in the Tx FIFO buffer. Luckily the current 651 * SPI-mem core calls exec_op() callback only if the GPIO-based CS is 652 * unavailable. 653 */ 654 static int dw_spi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op) 655 { 656 struct dw_spi *dws = spi_controller_get_devdata(mem->spi->controller); 657 struct dw_spi_cfg cfg; 658 unsigned long flags; 659 int ret; 660 661 /* 662 * Collect the outbound data into a single buffer to speed the 663 * transmission up at least on the initial stage. 664 */ 665 ret = dw_spi_init_mem_buf(dws, op); 666 if (ret) 667 return ret; 668 669 /* 670 * DW SPI EEPROM-read mode is required only for the SPI memory Data-IN 671 * operation. Transmit-only mode is suitable for the rest of them. 672 */ 673 cfg.dfs = 8; 674 cfg.freq = clamp(mem->spi->max_speed_hz, 0U, dws->max_mem_freq); 675 if (op->data.dir == SPI_MEM_DATA_IN) { 676 cfg.tmode = SPI_TMOD_EPROMREAD; 677 cfg.ndf = op->data.nbytes; 678 } else { 679 cfg.tmode = SPI_TMOD_TO; 680 } 681 682 spi_enable_chip(dws, 0); 683 684 dw_spi_update_config(dws, mem->spi, &cfg); 685 686 spi_mask_intr(dws, 0xff); 687 688 spi_enable_chip(dws, 1); 689 690 /* 691 * DW APB SSI controller has very nasty peculiarities. First originally 692 * (without any vendor-specific modifications) it doesn't provide a 693 * direct way to set and clear the native chip-select signal. Instead 694 * the controller asserts the CS lane if Tx FIFO isn't empty and a 695 * transmission is going on, and automatically de-asserts it back to 696 * the high level if the Tx FIFO doesn't have anything to be pushed 697 * out. Due to that a multi-tasking or heavy IRQs activity might be 698 * fatal, since the transfer procedure preemption may cause the Tx FIFO 699 * getting empty and sudden CS de-assertion, which in the middle of the 700 * transfer will most likely cause the data loss. Secondly the 701 * EEPROM-read or Read-only DW SPI transfer modes imply the incoming 702 * data being automatically pulled in into the Rx FIFO. So if the 703 * driver software is late in fetching the data from the FIFO before 704 * it's overflown, new incoming data will be lost. In order to make 705 * sure the executed memory operations are CS-atomic and to prevent the 706 * Rx FIFO overflow we have to disable the local interrupts so to block 707 * any preemption during the subsequent IO operations. 708 * 709 * Note. At some circumstances disabling IRQs may not help to prevent 710 * the problems described above. The CS de-assertion and Rx FIFO 711 * overflow may still happen due to the relatively slow system bus or 712 * CPU not working fast enough, so the write-then-read algo implemented 713 * here just won't keep up with the SPI bus data transfer. Such 714 * situation is highly platform specific and is supposed to be fixed by 715 * manually restricting the SPI bus frequency using the 716 * dws->max_mem_freq parameter. 717 */ 718 local_irq_save(flags); 719 preempt_disable(); 720 721 ret = dw_spi_write_then_read(dws, mem->spi); 722 723 local_irq_restore(flags); 724 preempt_enable(); 725 726 /* 727 * Wait for the operation being finished and check the controller 728 * status only if there hasn't been any run-time error detected. In the 729 * former case it's just pointless. In the later one to prevent an 730 * additional error message printing since any hw error flag being set 731 * would be due to an error detected on the data transfer. 732 */ 733 if (!ret) { 734 ret = dw_spi_wait_mem_op_done(dws); 735 if (!ret) 736 ret = dw_spi_check_status(dws, true); 737 } 738 739 dw_spi_stop_mem_op(dws, mem->spi); 740 741 dw_spi_free_mem_buf(dws); 742 743 return ret; 744 } 745 746 /* 747 * Initialize the default memory operations if a glue layer hasn't specified 748 * custom ones. Direct mapping operations will be preserved anyway since DW SPI 749 * controller doesn't have an embedded dirmap interface. Note the memory 750 * operations implemented in this driver is the best choice only for the DW APB 751 * SSI controller with standard native CS functionality. If a hardware vendor 752 * has fixed the automatic CS assertion/de-assertion peculiarity, then it will 753 * be safer to use the normal SPI-messages-based transfers implementation. 754 */ 755 static void dw_spi_init_mem_ops(struct dw_spi *dws) 756 { 757 if (!dws->mem_ops.exec_op && !(dws->caps & DW_SPI_CAP_CS_OVERRIDE) && 758 !dws->set_cs) { 759 dws->mem_ops.adjust_op_size = dw_spi_adjust_mem_op_size; 760 dws->mem_ops.supports_op = dw_spi_supports_mem_op; 761 dws->mem_ops.exec_op = dw_spi_exec_mem_op; 762 if (!dws->max_mem_freq) 763 dws->max_mem_freq = dws->max_freq; 764 } 765 } 766 767 /* This may be called twice for each spi dev */ 768 static int dw_spi_setup(struct spi_device *spi) 769 { 770 struct dw_spi *dws = spi_controller_get_devdata(spi->controller); 771 struct chip_data *chip; 772 773 /* Only alloc on first setup */ 774 chip = spi_get_ctldata(spi); 775 if (!chip) { 776 struct dw_spi *dws = spi_controller_get_devdata(spi->controller); 777 u32 rx_sample_dly_ns; 778 779 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); 780 if (!chip) 781 return -ENOMEM; 782 spi_set_ctldata(spi, chip); 783 /* Get specific / default rx-sample-delay */ 784 if (device_property_read_u32(&spi->dev, 785 "rx-sample-delay-ns", 786 &rx_sample_dly_ns) != 0) 787 /* Use default controller value */ 788 rx_sample_dly_ns = dws->def_rx_sample_dly_ns; 789 chip->rx_sample_dly = DIV_ROUND_CLOSEST(rx_sample_dly_ns, 790 NSEC_PER_SEC / 791 dws->max_freq); 792 } 793 794 /* 795 * Update CR0 data each time the setup callback is invoked since 796 * the device parameters could have been changed, for instance, by 797 * the MMC SPI driver or something else. 798 */ 799 chip->cr0 = dw_spi_prepare_cr0(dws, spi); 800 801 return 0; 802 } 803 804 static void dw_spi_cleanup(struct spi_device *spi) 805 { 806 struct chip_data *chip = spi_get_ctldata(spi); 807 808 kfree(chip); 809 spi_set_ctldata(spi, NULL); 810 } 811 812 /* Restart the controller, disable all interrupts, clean rx fifo */ 813 static void spi_hw_init(struct device *dev, struct dw_spi *dws) 814 { 815 spi_reset_chip(dws); 816 817 /* 818 * Try to detect the FIFO depth if not set by interface driver, 819 * the depth could be from 2 to 256 from HW spec 820 */ 821 if (!dws->fifo_len) { 822 u32 fifo; 823 824 for (fifo = 1; fifo < 256; fifo++) { 825 dw_writel(dws, DW_SPI_TXFTLR, fifo); 826 if (fifo != dw_readl(dws, DW_SPI_TXFTLR)) 827 break; 828 } 829 dw_writel(dws, DW_SPI_TXFTLR, 0); 830 831 dws->fifo_len = (fifo == 1) ? 0 : fifo; 832 dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len); 833 } 834 835 /* 836 * Detect CTRLR0.DFS field size and offset by testing the lowest bits 837 * writability. Note DWC SSI controller also has the extended DFS, but 838 * with zero offset. 839 */ 840 if (!(dws->caps & DW_SPI_CAP_DWC_SSI)) { 841 u32 cr0, tmp = dw_readl(dws, DW_SPI_CTRLR0); 842 843 spi_enable_chip(dws, 0); 844 dw_writel(dws, DW_SPI_CTRLR0, 0xffffffff); 845 cr0 = dw_readl(dws, DW_SPI_CTRLR0); 846 dw_writel(dws, DW_SPI_CTRLR0, tmp); 847 spi_enable_chip(dws, 1); 848 849 if (!(cr0 & SPI_DFS_MASK)) { 850 dws->caps |= DW_SPI_CAP_DFS32; 851 dws->dfs_offset = SPI_DFS32_OFFSET; 852 dev_dbg(dev, "Detected 32-bits max data frame size\n"); 853 } 854 } else { 855 dws->caps |= DW_SPI_CAP_DFS32; 856 } 857 858 /* enable HW fixup for explicit CS deselect for Amazon's alpine chip */ 859 if (dws->caps & DW_SPI_CAP_CS_OVERRIDE) 860 dw_writel(dws, DW_SPI_CS_OVERRIDE, 0xF); 861 } 862 863 int dw_spi_add_host(struct device *dev, struct dw_spi *dws) 864 { 865 struct spi_controller *master; 866 int ret; 867 868 if (!dws) 869 return -EINVAL; 870 871 master = spi_alloc_master(dev, 0); 872 if (!master) 873 return -ENOMEM; 874 875 dws->master = master; 876 dws->dma_addr = (dma_addr_t)(dws->paddr + DW_SPI_DR); 877 878 spi_controller_set_devdata(master, dws); 879 880 /* Basic HW init */ 881 spi_hw_init(dev, dws); 882 883 ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED, dev_name(dev), 884 master); 885 if (ret < 0 && ret != -ENOTCONN) { 886 dev_err(dev, "can not get IRQ\n"); 887 goto err_free_master; 888 } 889 890 dw_spi_init_mem_ops(dws); 891 892 master->use_gpio_descriptors = true; 893 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP; 894 if (dws->caps & DW_SPI_CAP_DFS32) 895 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); 896 else 897 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16); 898 master->bus_num = dws->bus_num; 899 master->num_chipselect = dws->num_cs; 900 master->setup = dw_spi_setup; 901 master->cleanup = dw_spi_cleanup; 902 if (dws->set_cs) 903 master->set_cs = dws->set_cs; 904 else 905 master->set_cs = dw_spi_set_cs; 906 master->transfer_one = dw_spi_transfer_one; 907 master->handle_err = dw_spi_handle_err; 908 if (dws->mem_ops.exec_op) 909 master->mem_ops = &dws->mem_ops; 910 master->max_speed_hz = dws->max_freq; 911 master->dev.of_node = dev->of_node; 912 master->dev.fwnode = dev->fwnode; 913 master->flags = SPI_MASTER_GPIO_SS; 914 master->auto_runtime_pm = true; 915 916 /* Get default rx sample delay */ 917 device_property_read_u32(dev, "rx-sample-delay-ns", 918 &dws->def_rx_sample_dly_ns); 919 920 if (dws->dma_ops && dws->dma_ops->dma_init) { 921 ret = dws->dma_ops->dma_init(dev, dws); 922 if (ret) { 923 dev_warn(dev, "DMA init failed\n"); 924 } else { 925 master->can_dma = dws->dma_ops->can_dma; 926 master->flags |= SPI_CONTROLLER_MUST_TX; 927 } 928 } 929 930 ret = spi_register_controller(master); 931 if (ret) { 932 dev_err(&master->dev, "problem registering spi master\n"); 933 goto err_dma_exit; 934 } 935 936 dw_spi_debugfs_init(dws); 937 return 0; 938 939 err_dma_exit: 940 if (dws->dma_ops && dws->dma_ops->dma_exit) 941 dws->dma_ops->dma_exit(dws); 942 spi_enable_chip(dws, 0); 943 free_irq(dws->irq, master); 944 err_free_master: 945 spi_controller_put(master); 946 return ret; 947 } 948 EXPORT_SYMBOL_GPL(dw_spi_add_host); 949 950 void dw_spi_remove_host(struct dw_spi *dws) 951 { 952 dw_spi_debugfs_remove(dws); 953 954 spi_unregister_controller(dws->master); 955 956 if (dws->dma_ops && dws->dma_ops->dma_exit) 957 dws->dma_ops->dma_exit(dws); 958 959 spi_shutdown_chip(dws); 960 961 free_irq(dws->irq, dws->master); 962 } 963 EXPORT_SYMBOL_GPL(dw_spi_remove_host); 964 965 int dw_spi_suspend_host(struct dw_spi *dws) 966 { 967 int ret; 968 969 ret = spi_controller_suspend(dws->master); 970 if (ret) 971 return ret; 972 973 spi_shutdown_chip(dws); 974 return 0; 975 } 976 EXPORT_SYMBOL_GPL(dw_spi_suspend_host); 977 978 int dw_spi_resume_host(struct dw_spi *dws) 979 { 980 spi_hw_init(&dws->master->dev, dws); 981 return spi_controller_resume(dws->master); 982 } 983 EXPORT_SYMBOL_GPL(dw_spi_resume_host); 984 985 MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>"); 986 MODULE_DESCRIPTION("Driver for DesignWare SPI controller core"); 987 MODULE_LICENSE("GPL v2"); 988