1 /* 2 * IMG SPFI controller driver 3 * 4 * Copyright (C) 2007,2008,2013 Imagination Technologies Ltd. 5 * Copyright (C) 2014 Google, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2, as published by the Free Software Foundation. 10 */ 11 12 #include <linux/clk.h> 13 #include <linux/delay.h> 14 #include <linux/dmaengine.h> 15 #include <linux/gpio.h> 16 #include <linux/interrupt.h> 17 #include <linux/io.h> 18 #include <linux/irq.h> 19 #include <linux/module.h> 20 #include <linux/of.h> 21 #include <linux/platform_device.h> 22 #include <linux/pm_runtime.h> 23 #include <linux/scatterlist.h> 24 #include <linux/slab.h> 25 #include <linux/spi/spi.h> 26 #include <linux/spinlock.h> 27 28 #define SPFI_DEVICE_PARAMETER(x) (0x00 + 0x4 * (x)) 29 #define SPFI_DEVICE_PARAMETER_BITCLK_SHIFT 24 30 #define SPFI_DEVICE_PARAMETER_BITCLK_MASK 0xff 31 #define SPFI_DEVICE_PARAMETER_CSSETUP_SHIFT 16 32 #define SPFI_DEVICE_PARAMETER_CSSETUP_MASK 0xff 33 #define SPFI_DEVICE_PARAMETER_CSHOLD_SHIFT 8 34 #define SPFI_DEVICE_PARAMETER_CSHOLD_MASK 0xff 35 #define SPFI_DEVICE_PARAMETER_CSDELAY_SHIFT 0 36 #define SPFI_DEVICE_PARAMETER_CSDELAY_MASK 0xff 37 38 #define SPFI_CONTROL 0x14 39 #define SPFI_CONTROL_CONTINUE BIT(12) 40 #define SPFI_CONTROL_SOFT_RESET BIT(11) 41 #define SPFI_CONTROL_SEND_DMA BIT(10) 42 #define SPFI_CONTROL_GET_DMA BIT(9) 43 #define SPFI_CONTROL_SE BIT(8) 44 #define SPFI_CONTROL_TMODE_SHIFT 5 45 #define SPFI_CONTROL_TMODE_MASK 0x7 46 #define SPFI_CONTROL_TMODE_SINGLE 0 47 #define SPFI_CONTROL_TMODE_DUAL 1 48 #define SPFI_CONTROL_TMODE_QUAD 2 49 #define SPFI_CONTROL_SPFI_EN BIT(0) 50 51 #define SPFI_TRANSACTION 0x18 52 #define SPFI_TRANSACTION_TSIZE_SHIFT 16 53 #define SPFI_TRANSACTION_TSIZE_MASK 0xffff 54 55 #define SPFI_PORT_STATE 0x1c 56 #define SPFI_PORT_STATE_DEV_SEL_SHIFT 20 57 #define SPFI_PORT_STATE_DEV_SEL_MASK 0x7 58 #define SPFI_PORT_STATE_CK_POL(x) BIT(19 - (x)) 59 #define SPFI_PORT_STATE_CK_PHASE(x) BIT(14 - (x)) 60 61 #define SPFI_TX_32BIT_VALID_DATA 0x20 62 #define SPFI_TX_8BIT_VALID_DATA 0x24 63 #define SPFI_RX_32BIT_VALID_DATA 0x28 64 #define SPFI_RX_8BIT_VALID_DATA 0x2c 65 66 #define SPFI_INTERRUPT_STATUS 0x30 67 #define SPFI_INTERRUPT_ENABLE 0x34 68 #define SPFI_INTERRUPT_CLEAR 0x38 69 #define SPFI_INTERRUPT_IACCESS BIT(12) 70 #define SPFI_INTERRUPT_GDEX8BIT BIT(11) 71 #define SPFI_INTERRUPT_ALLDONETRIG BIT(9) 72 #define SPFI_INTERRUPT_GDFUL BIT(8) 73 #define SPFI_INTERRUPT_GDHF BIT(7) 74 #define SPFI_INTERRUPT_GDEX32BIT BIT(6) 75 #define SPFI_INTERRUPT_GDTRIG BIT(5) 76 #define SPFI_INTERRUPT_SDFUL BIT(3) 77 #define SPFI_INTERRUPT_SDHF BIT(2) 78 #define SPFI_INTERRUPT_SDE BIT(1) 79 #define SPFI_INTERRUPT_SDTRIG BIT(0) 80 81 /* 82 * There are four parallel FIFOs of 16 bytes each. The word buffer 83 * (*_32BIT_VALID_DATA) accesses all four FIFOs at once, resulting in an 84 * effective FIFO size of 64 bytes. The byte buffer (*_8BIT_VALID_DATA) 85 * accesses only a single FIFO, resulting in an effective FIFO size of 86 * 16 bytes. 87 */ 88 #define SPFI_32BIT_FIFO_SIZE 64 89 #define SPFI_8BIT_FIFO_SIZE 16 90 91 struct img_spfi { 92 struct device *dev; 93 struct spi_master *master; 94 spinlock_t lock; 95 96 void __iomem *regs; 97 phys_addr_t phys; 98 int irq; 99 struct clk *spfi_clk; 100 struct clk *sys_clk; 101 102 struct dma_chan *rx_ch; 103 struct dma_chan *tx_ch; 104 bool tx_dma_busy; 105 bool rx_dma_busy; 106 }; 107 108 struct img_spfi_device_data { 109 bool gpio_requested; 110 }; 111 112 static inline u32 spfi_readl(struct img_spfi *spfi, u32 reg) 113 { 114 return readl(spfi->regs + reg); 115 } 116 117 static inline void spfi_writel(struct img_spfi *spfi, u32 val, u32 reg) 118 { 119 writel(val, spfi->regs + reg); 120 } 121 122 static inline void spfi_start(struct img_spfi *spfi) 123 { 124 u32 val; 125 126 val = spfi_readl(spfi, SPFI_CONTROL); 127 val |= SPFI_CONTROL_SPFI_EN; 128 spfi_writel(spfi, val, SPFI_CONTROL); 129 } 130 131 static inline void spfi_reset(struct img_spfi *spfi) 132 { 133 spfi_writel(spfi, SPFI_CONTROL_SOFT_RESET, SPFI_CONTROL); 134 spfi_writel(spfi, 0, SPFI_CONTROL); 135 } 136 137 static int spfi_wait_all_done(struct img_spfi *spfi) 138 { 139 unsigned long timeout = jiffies + msecs_to_jiffies(50); 140 141 while (time_before(jiffies, timeout)) { 142 u32 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS); 143 144 if (status & SPFI_INTERRUPT_ALLDONETRIG) { 145 spfi_writel(spfi, SPFI_INTERRUPT_ALLDONETRIG, 146 SPFI_INTERRUPT_CLEAR); 147 return 0; 148 } 149 cpu_relax(); 150 } 151 152 dev_err(spfi->dev, "Timed out waiting for transaction to complete\n"); 153 spfi_reset(spfi); 154 155 return -ETIMEDOUT; 156 } 157 158 static unsigned int spfi_pio_write32(struct img_spfi *spfi, const u32 *buf, 159 unsigned int max) 160 { 161 unsigned int count = 0; 162 u32 status; 163 164 while (count < max / 4) { 165 spfi_writel(spfi, SPFI_INTERRUPT_SDFUL, SPFI_INTERRUPT_CLEAR); 166 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS); 167 if (status & SPFI_INTERRUPT_SDFUL) 168 break; 169 spfi_writel(spfi, buf[count], SPFI_TX_32BIT_VALID_DATA); 170 count++; 171 } 172 173 return count * 4; 174 } 175 176 static unsigned int spfi_pio_write8(struct img_spfi *spfi, const u8 *buf, 177 unsigned int max) 178 { 179 unsigned int count = 0; 180 u32 status; 181 182 while (count < max) { 183 spfi_writel(spfi, SPFI_INTERRUPT_SDFUL, SPFI_INTERRUPT_CLEAR); 184 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS); 185 if (status & SPFI_INTERRUPT_SDFUL) 186 break; 187 spfi_writel(spfi, buf[count], SPFI_TX_8BIT_VALID_DATA); 188 count++; 189 } 190 191 return count; 192 } 193 194 static unsigned int spfi_pio_read32(struct img_spfi *spfi, u32 *buf, 195 unsigned int max) 196 { 197 unsigned int count = 0; 198 u32 status; 199 200 while (count < max / 4) { 201 spfi_writel(spfi, SPFI_INTERRUPT_GDEX32BIT, 202 SPFI_INTERRUPT_CLEAR); 203 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS); 204 if (!(status & SPFI_INTERRUPT_GDEX32BIT)) 205 break; 206 buf[count] = spfi_readl(spfi, SPFI_RX_32BIT_VALID_DATA); 207 count++; 208 } 209 210 return count * 4; 211 } 212 213 static unsigned int spfi_pio_read8(struct img_spfi *spfi, u8 *buf, 214 unsigned int max) 215 { 216 unsigned int count = 0; 217 u32 status; 218 219 while (count < max) { 220 spfi_writel(spfi, SPFI_INTERRUPT_GDEX8BIT, 221 SPFI_INTERRUPT_CLEAR); 222 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS); 223 if (!(status & SPFI_INTERRUPT_GDEX8BIT)) 224 break; 225 buf[count] = spfi_readl(spfi, SPFI_RX_8BIT_VALID_DATA); 226 count++; 227 } 228 229 return count; 230 } 231 232 static int img_spfi_start_pio(struct spi_master *master, 233 struct spi_device *spi, 234 struct spi_transfer *xfer) 235 { 236 struct img_spfi *spfi = spi_master_get_devdata(spi->master); 237 unsigned int tx_bytes = 0, rx_bytes = 0; 238 const void *tx_buf = xfer->tx_buf; 239 void *rx_buf = xfer->rx_buf; 240 unsigned long timeout; 241 int ret; 242 243 if (tx_buf) 244 tx_bytes = xfer->len; 245 if (rx_buf) 246 rx_bytes = xfer->len; 247 248 spfi_start(spfi); 249 250 timeout = jiffies + 251 msecs_to_jiffies(xfer->len * 8 * 1000 / xfer->speed_hz + 100); 252 while ((tx_bytes > 0 || rx_bytes > 0) && 253 time_before(jiffies, timeout)) { 254 unsigned int tx_count, rx_count; 255 256 if (tx_bytes >= 4) 257 tx_count = spfi_pio_write32(spfi, tx_buf, tx_bytes); 258 else 259 tx_count = spfi_pio_write8(spfi, tx_buf, tx_bytes); 260 261 if (rx_bytes >= 4) 262 rx_count = spfi_pio_read32(spfi, rx_buf, rx_bytes); 263 else 264 rx_count = spfi_pio_read8(spfi, rx_buf, rx_bytes); 265 266 tx_buf += tx_count; 267 rx_buf += rx_count; 268 tx_bytes -= tx_count; 269 rx_bytes -= rx_count; 270 271 cpu_relax(); 272 } 273 274 if (rx_bytes > 0 || tx_bytes > 0) { 275 dev_err(spfi->dev, "PIO transfer timed out\n"); 276 return -ETIMEDOUT; 277 } 278 279 ret = spfi_wait_all_done(spfi); 280 if (ret < 0) 281 return ret; 282 283 return 0; 284 } 285 286 static void img_spfi_dma_rx_cb(void *data) 287 { 288 struct img_spfi *spfi = data; 289 unsigned long flags; 290 291 spfi_wait_all_done(spfi); 292 293 spin_lock_irqsave(&spfi->lock, flags); 294 spfi->rx_dma_busy = false; 295 if (!spfi->tx_dma_busy) 296 spi_finalize_current_transfer(spfi->master); 297 spin_unlock_irqrestore(&spfi->lock, flags); 298 } 299 300 static void img_spfi_dma_tx_cb(void *data) 301 { 302 struct img_spfi *spfi = data; 303 unsigned long flags; 304 305 spfi_wait_all_done(spfi); 306 307 spin_lock_irqsave(&spfi->lock, flags); 308 spfi->tx_dma_busy = false; 309 if (!spfi->rx_dma_busy) 310 spi_finalize_current_transfer(spfi->master); 311 spin_unlock_irqrestore(&spfi->lock, flags); 312 } 313 314 static int img_spfi_start_dma(struct spi_master *master, 315 struct spi_device *spi, 316 struct spi_transfer *xfer) 317 { 318 struct img_spfi *spfi = spi_master_get_devdata(spi->master); 319 struct dma_async_tx_descriptor *rxdesc = NULL, *txdesc = NULL; 320 struct dma_slave_config rxconf, txconf; 321 322 spfi->rx_dma_busy = false; 323 spfi->tx_dma_busy = false; 324 325 if (xfer->rx_buf) { 326 rxconf.direction = DMA_DEV_TO_MEM; 327 if (xfer->len % 4 == 0) { 328 rxconf.src_addr = spfi->phys + SPFI_RX_32BIT_VALID_DATA; 329 rxconf.src_addr_width = 4; 330 rxconf.src_maxburst = 4; 331 } else { 332 rxconf.src_addr = spfi->phys + SPFI_RX_8BIT_VALID_DATA; 333 rxconf.src_addr_width = 1; 334 rxconf.src_maxburst = 4; 335 } 336 dmaengine_slave_config(spfi->rx_ch, &rxconf); 337 338 rxdesc = dmaengine_prep_slave_sg(spfi->rx_ch, xfer->rx_sg.sgl, 339 xfer->rx_sg.nents, 340 DMA_DEV_TO_MEM, 341 DMA_PREP_INTERRUPT); 342 if (!rxdesc) 343 goto stop_dma; 344 345 rxdesc->callback = img_spfi_dma_rx_cb; 346 rxdesc->callback_param = spfi; 347 } 348 349 if (xfer->tx_buf) { 350 txconf.direction = DMA_MEM_TO_DEV; 351 if (xfer->len % 4 == 0) { 352 txconf.dst_addr = spfi->phys + SPFI_TX_32BIT_VALID_DATA; 353 txconf.dst_addr_width = 4; 354 txconf.dst_maxburst = 4; 355 } else { 356 txconf.dst_addr = spfi->phys + SPFI_TX_8BIT_VALID_DATA; 357 txconf.dst_addr_width = 1; 358 txconf.dst_maxburst = 4; 359 } 360 dmaengine_slave_config(spfi->tx_ch, &txconf); 361 362 txdesc = dmaengine_prep_slave_sg(spfi->tx_ch, xfer->tx_sg.sgl, 363 xfer->tx_sg.nents, 364 DMA_MEM_TO_DEV, 365 DMA_PREP_INTERRUPT); 366 if (!txdesc) 367 goto stop_dma; 368 369 txdesc->callback = img_spfi_dma_tx_cb; 370 txdesc->callback_param = spfi; 371 } 372 373 if (xfer->rx_buf) { 374 spfi->rx_dma_busy = true; 375 dmaengine_submit(rxdesc); 376 dma_async_issue_pending(spfi->rx_ch); 377 } 378 379 spfi_start(spfi); 380 381 if (xfer->tx_buf) { 382 spfi->tx_dma_busy = true; 383 dmaengine_submit(txdesc); 384 dma_async_issue_pending(spfi->tx_ch); 385 } 386 387 return 1; 388 389 stop_dma: 390 dmaengine_terminate_all(spfi->rx_ch); 391 dmaengine_terminate_all(spfi->tx_ch); 392 return -EIO; 393 } 394 395 static void img_spfi_handle_err(struct spi_master *master, 396 struct spi_message *msg) 397 { 398 struct img_spfi *spfi = spi_master_get_devdata(master); 399 unsigned long flags; 400 401 /* 402 * Stop all DMA and reset the controller if the previous transaction 403 * timed-out and never completed it's DMA. 404 */ 405 spin_lock_irqsave(&spfi->lock, flags); 406 if (spfi->tx_dma_busy || spfi->rx_dma_busy) { 407 spfi->tx_dma_busy = false; 408 spfi->rx_dma_busy = false; 409 410 dmaengine_terminate_all(spfi->tx_ch); 411 dmaengine_terminate_all(spfi->rx_ch); 412 } 413 spin_unlock_irqrestore(&spfi->lock, flags); 414 } 415 416 static int img_spfi_prepare(struct spi_master *master, struct spi_message *msg) 417 { 418 struct img_spfi *spfi = spi_master_get_devdata(master); 419 u32 val; 420 421 val = spfi_readl(spfi, SPFI_PORT_STATE); 422 val &= ~(SPFI_PORT_STATE_DEV_SEL_MASK << 423 SPFI_PORT_STATE_DEV_SEL_SHIFT); 424 val |= msg->spi->chip_select << SPFI_PORT_STATE_DEV_SEL_SHIFT; 425 if (msg->spi->mode & SPI_CPHA) 426 val |= SPFI_PORT_STATE_CK_PHASE(msg->spi->chip_select); 427 else 428 val &= ~SPFI_PORT_STATE_CK_PHASE(msg->spi->chip_select); 429 if (msg->spi->mode & SPI_CPOL) 430 val |= SPFI_PORT_STATE_CK_POL(msg->spi->chip_select); 431 else 432 val &= ~SPFI_PORT_STATE_CK_POL(msg->spi->chip_select); 433 spfi_writel(spfi, val, SPFI_PORT_STATE); 434 435 return 0; 436 } 437 438 static int img_spfi_unprepare(struct spi_master *master, 439 struct spi_message *msg) 440 { 441 struct img_spfi *spfi = spi_master_get_devdata(master); 442 443 spfi_reset(spfi); 444 445 return 0; 446 } 447 448 static int img_spfi_setup(struct spi_device *spi) 449 { 450 int ret = -EINVAL; 451 struct img_spfi_device_data *spfi_data = spi_get_ctldata(spi); 452 453 if (!spfi_data) { 454 spfi_data = kzalloc(sizeof(*spfi_data), GFP_KERNEL); 455 if (!spfi_data) 456 return -ENOMEM; 457 spfi_data->gpio_requested = false; 458 spi_set_ctldata(spi, spfi_data); 459 } 460 if (!spfi_data->gpio_requested) { 461 ret = gpio_request_one(spi->cs_gpio, 462 (spi->mode & SPI_CS_HIGH) ? 463 GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH, 464 dev_name(&spi->dev)); 465 if (ret) 466 dev_err(&spi->dev, "can't request chipselect gpio %d\n", 467 spi->cs_gpio); 468 else 469 spfi_data->gpio_requested = true; 470 } else { 471 if (gpio_is_valid(spi->cs_gpio)) { 472 int mode = ((spi->mode & SPI_CS_HIGH) ? 473 GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH); 474 475 ret = gpio_direction_output(spi->cs_gpio, mode); 476 if (ret) 477 dev_err(&spi->dev, "chipselect gpio %d setup failed (%d)\n", 478 spi->cs_gpio, ret); 479 } 480 } 481 return ret; 482 } 483 484 static void img_spfi_cleanup(struct spi_device *spi) 485 { 486 struct img_spfi_device_data *spfi_data = spi_get_ctldata(spi); 487 488 if (spfi_data) { 489 if (spfi_data->gpio_requested) 490 gpio_free(spi->cs_gpio); 491 kfree(spfi_data); 492 spi_set_ctldata(spi, NULL); 493 } 494 } 495 496 static void img_spfi_config(struct spi_master *master, struct spi_device *spi, 497 struct spi_transfer *xfer) 498 { 499 struct img_spfi *spfi = spi_master_get_devdata(spi->master); 500 u32 val, div; 501 502 /* 503 * output = spfi_clk * (BITCLK / 512), where BITCLK must be a 504 * power of 2 up to 128 505 */ 506 div = DIV_ROUND_UP(clk_get_rate(spfi->spfi_clk), xfer->speed_hz); 507 div = clamp(512 / (1 << get_count_order(div)), 1, 128); 508 509 val = spfi_readl(spfi, SPFI_DEVICE_PARAMETER(spi->chip_select)); 510 val &= ~(SPFI_DEVICE_PARAMETER_BITCLK_MASK << 511 SPFI_DEVICE_PARAMETER_BITCLK_SHIFT); 512 val |= div << SPFI_DEVICE_PARAMETER_BITCLK_SHIFT; 513 spfi_writel(spfi, val, SPFI_DEVICE_PARAMETER(spi->chip_select)); 514 515 spfi_writel(spfi, xfer->len << SPFI_TRANSACTION_TSIZE_SHIFT, 516 SPFI_TRANSACTION); 517 518 val = spfi_readl(spfi, SPFI_CONTROL); 519 val &= ~(SPFI_CONTROL_SEND_DMA | SPFI_CONTROL_GET_DMA); 520 if (xfer->tx_buf) 521 val |= SPFI_CONTROL_SEND_DMA; 522 if (xfer->rx_buf) 523 val |= SPFI_CONTROL_GET_DMA; 524 val &= ~(SPFI_CONTROL_TMODE_MASK << SPFI_CONTROL_TMODE_SHIFT); 525 if (xfer->tx_nbits == SPI_NBITS_DUAL && 526 xfer->rx_nbits == SPI_NBITS_DUAL) 527 val |= SPFI_CONTROL_TMODE_DUAL << SPFI_CONTROL_TMODE_SHIFT; 528 else if (xfer->tx_nbits == SPI_NBITS_QUAD && 529 xfer->rx_nbits == SPI_NBITS_QUAD) 530 val |= SPFI_CONTROL_TMODE_QUAD << SPFI_CONTROL_TMODE_SHIFT; 531 val |= SPFI_CONTROL_SE; 532 spfi_writel(spfi, val, SPFI_CONTROL); 533 } 534 535 static int img_spfi_transfer_one(struct spi_master *master, 536 struct spi_device *spi, 537 struct spi_transfer *xfer) 538 { 539 struct img_spfi *spfi = spi_master_get_devdata(spi->master); 540 int ret; 541 542 if (xfer->len > SPFI_TRANSACTION_TSIZE_MASK) { 543 dev_err(spfi->dev, 544 "Transfer length (%d) is greater than the max supported (%d)", 545 xfer->len, SPFI_TRANSACTION_TSIZE_MASK); 546 return -EINVAL; 547 } 548 549 img_spfi_config(master, spi, xfer); 550 if (master->can_dma && master->can_dma(master, spi, xfer)) 551 ret = img_spfi_start_dma(master, spi, xfer); 552 else 553 ret = img_spfi_start_pio(master, spi, xfer); 554 555 return ret; 556 } 557 558 static bool img_spfi_can_dma(struct spi_master *master, struct spi_device *spi, 559 struct spi_transfer *xfer) 560 { 561 if (xfer->len > SPFI_32BIT_FIFO_SIZE) 562 return true; 563 return false; 564 } 565 566 static irqreturn_t img_spfi_irq(int irq, void *dev_id) 567 { 568 struct img_spfi *spfi = (struct img_spfi *)dev_id; 569 u32 status; 570 571 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS); 572 if (status & SPFI_INTERRUPT_IACCESS) { 573 spfi_writel(spfi, SPFI_INTERRUPT_IACCESS, SPFI_INTERRUPT_CLEAR); 574 dev_err(spfi->dev, "Illegal access interrupt"); 575 return IRQ_HANDLED; 576 } 577 578 return IRQ_NONE; 579 } 580 581 static int img_spfi_probe(struct platform_device *pdev) 582 { 583 struct spi_master *master; 584 struct img_spfi *spfi; 585 struct resource *res; 586 int ret; 587 u32 max_speed_hz; 588 589 master = spi_alloc_master(&pdev->dev, sizeof(*spfi)); 590 if (!master) 591 return -ENOMEM; 592 platform_set_drvdata(pdev, master); 593 594 spfi = spi_master_get_devdata(master); 595 spfi->dev = &pdev->dev; 596 spfi->master = master; 597 spin_lock_init(&spfi->lock); 598 599 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 600 spfi->regs = devm_ioremap_resource(spfi->dev, res); 601 if (IS_ERR(spfi->regs)) { 602 ret = PTR_ERR(spfi->regs); 603 goto put_spi; 604 } 605 spfi->phys = res->start; 606 607 spfi->irq = platform_get_irq(pdev, 0); 608 if (spfi->irq < 0) { 609 ret = spfi->irq; 610 goto put_spi; 611 } 612 ret = devm_request_irq(spfi->dev, spfi->irq, img_spfi_irq, 613 IRQ_TYPE_LEVEL_HIGH, dev_name(spfi->dev), spfi); 614 if (ret) 615 goto put_spi; 616 617 spfi->sys_clk = devm_clk_get(spfi->dev, "sys"); 618 if (IS_ERR(spfi->sys_clk)) { 619 ret = PTR_ERR(spfi->sys_clk); 620 goto put_spi; 621 } 622 spfi->spfi_clk = devm_clk_get(spfi->dev, "spfi"); 623 if (IS_ERR(spfi->spfi_clk)) { 624 ret = PTR_ERR(spfi->spfi_clk); 625 goto put_spi; 626 } 627 628 ret = clk_prepare_enable(spfi->sys_clk); 629 if (ret) 630 goto put_spi; 631 ret = clk_prepare_enable(spfi->spfi_clk); 632 if (ret) 633 goto disable_pclk; 634 635 spfi_reset(spfi); 636 /* 637 * Only enable the error (IACCESS) interrupt. In PIO mode we'll 638 * poll the status of the FIFOs. 639 */ 640 spfi_writel(spfi, SPFI_INTERRUPT_IACCESS, SPFI_INTERRUPT_ENABLE); 641 642 master->auto_runtime_pm = true; 643 master->bus_num = pdev->id; 644 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_TX_DUAL | SPI_RX_DUAL; 645 if (of_property_read_bool(spfi->dev->of_node, "img,supports-quad-mode")) 646 master->mode_bits |= SPI_TX_QUAD | SPI_RX_QUAD; 647 master->dev.of_node = pdev->dev.of_node; 648 master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(8); 649 master->max_speed_hz = clk_get_rate(spfi->spfi_clk) / 4; 650 master->min_speed_hz = clk_get_rate(spfi->spfi_clk) / 512; 651 652 /* 653 * Maximum speed supported by spfi is limited to the lower value 654 * between 1/4 of the SPFI clock or to "spfi-max-frequency" 655 * defined in the device tree. 656 * If no value is defined in the device tree assume the maximum 657 * speed supported to be 1/4 of the SPFI clock. 658 */ 659 if (!of_property_read_u32(spfi->dev->of_node, "spfi-max-frequency", 660 &max_speed_hz)) { 661 if (master->max_speed_hz > max_speed_hz) 662 master->max_speed_hz = max_speed_hz; 663 } 664 665 master->setup = img_spfi_setup; 666 master->cleanup = img_spfi_cleanup; 667 master->transfer_one = img_spfi_transfer_one; 668 master->prepare_message = img_spfi_prepare; 669 master->unprepare_message = img_spfi_unprepare; 670 master->handle_err = img_spfi_handle_err; 671 672 spfi->tx_ch = dma_request_slave_channel(spfi->dev, "tx"); 673 spfi->rx_ch = dma_request_slave_channel(spfi->dev, "rx"); 674 if (!spfi->tx_ch || !spfi->rx_ch) { 675 if (spfi->tx_ch) 676 dma_release_channel(spfi->tx_ch); 677 if (spfi->rx_ch) 678 dma_release_channel(spfi->rx_ch); 679 dev_warn(spfi->dev, "Failed to get DMA channels, falling back to PIO mode\n"); 680 } else { 681 master->dma_tx = spfi->tx_ch; 682 master->dma_rx = spfi->rx_ch; 683 master->can_dma = img_spfi_can_dma; 684 } 685 686 pm_runtime_set_active(spfi->dev); 687 pm_runtime_enable(spfi->dev); 688 689 ret = devm_spi_register_master(spfi->dev, master); 690 if (ret) 691 goto disable_pm; 692 693 return 0; 694 695 disable_pm: 696 pm_runtime_disable(spfi->dev); 697 if (spfi->rx_ch) 698 dma_release_channel(spfi->rx_ch); 699 if (spfi->tx_ch) 700 dma_release_channel(spfi->tx_ch); 701 clk_disable_unprepare(spfi->spfi_clk); 702 disable_pclk: 703 clk_disable_unprepare(spfi->sys_clk); 704 put_spi: 705 spi_master_put(master); 706 707 return ret; 708 } 709 710 static int img_spfi_remove(struct platform_device *pdev) 711 { 712 struct spi_master *master = platform_get_drvdata(pdev); 713 struct img_spfi *spfi = spi_master_get_devdata(master); 714 715 if (spfi->tx_ch) 716 dma_release_channel(spfi->tx_ch); 717 if (spfi->rx_ch) 718 dma_release_channel(spfi->rx_ch); 719 720 pm_runtime_disable(spfi->dev); 721 if (!pm_runtime_status_suspended(spfi->dev)) { 722 clk_disable_unprepare(spfi->spfi_clk); 723 clk_disable_unprepare(spfi->sys_clk); 724 } 725 726 return 0; 727 } 728 729 #ifdef CONFIG_PM 730 static int img_spfi_runtime_suspend(struct device *dev) 731 { 732 struct spi_master *master = dev_get_drvdata(dev); 733 struct img_spfi *spfi = spi_master_get_devdata(master); 734 735 clk_disable_unprepare(spfi->spfi_clk); 736 clk_disable_unprepare(spfi->sys_clk); 737 738 return 0; 739 } 740 741 static int img_spfi_runtime_resume(struct device *dev) 742 { 743 struct spi_master *master = dev_get_drvdata(dev); 744 struct img_spfi *spfi = spi_master_get_devdata(master); 745 int ret; 746 747 ret = clk_prepare_enable(spfi->sys_clk); 748 if (ret) 749 return ret; 750 ret = clk_prepare_enable(spfi->spfi_clk); 751 if (ret) { 752 clk_disable_unprepare(spfi->sys_clk); 753 return ret; 754 } 755 756 return 0; 757 } 758 #endif /* CONFIG_PM */ 759 760 #ifdef CONFIG_PM_SLEEP 761 static int img_spfi_suspend(struct device *dev) 762 { 763 struct spi_master *master = dev_get_drvdata(dev); 764 765 return spi_master_suspend(master); 766 } 767 768 static int img_spfi_resume(struct device *dev) 769 { 770 struct spi_master *master = dev_get_drvdata(dev); 771 struct img_spfi *spfi = spi_master_get_devdata(master); 772 int ret; 773 774 ret = pm_runtime_get_sync(dev); 775 if (ret) 776 return ret; 777 spfi_reset(spfi); 778 pm_runtime_put(dev); 779 780 return spi_master_resume(master); 781 } 782 #endif /* CONFIG_PM_SLEEP */ 783 784 static const struct dev_pm_ops img_spfi_pm_ops = { 785 SET_RUNTIME_PM_OPS(img_spfi_runtime_suspend, img_spfi_runtime_resume, 786 NULL) 787 SET_SYSTEM_SLEEP_PM_OPS(img_spfi_suspend, img_spfi_resume) 788 }; 789 790 static const struct of_device_id img_spfi_of_match[] = { 791 { .compatible = "img,spfi", }, 792 { }, 793 }; 794 MODULE_DEVICE_TABLE(of, img_spfi_of_match); 795 796 static struct platform_driver img_spfi_driver = { 797 .driver = { 798 .name = "img-spfi", 799 .pm = &img_spfi_pm_ops, 800 .of_match_table = of_match_ptr(img_spfi_of_match), 801 }, 802 .probe = img_spfi_probe, 803 .remove = img_spfi_remove, 804 }; 805 module_platform_driver(img_spfi_driver); 806 807 MODULE_DESCRIPTION("IMG SPFI controller driver"); 808 MODULE_AUTHOR("Andrew Bresticker <abrestic@chromium.org>"); 809 MODULE_LICENSE("GPL v2"); 810