1 /* 2 * Copyright (C) 2009 Samsung Electronics Ltd. 3 * Jaswinder Singh <jassi.brar@samsung.com> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 18 */ 19 20 #include <linux/init.h> 21 #include <linux/module.h> 22 #include <linux/interrupt.h> 23 #include <linux/delay.h> 24 #include <linux/clk.h> 25 #include <linux/dma-mapping.h> 26 #include <linux/dmaengine.h> 27 #include <linux/platform_device.h> 28 #include <linux/pm_runtime.h> 29 #include <linux/spi/spi.h> 30 #include <linux/gpio.h> 31 #include <linux/of.h> 32 #include <linux/of_gpio.h> 33 34 #include <linux/platform_data/spi-s3c64xx.h> 35 36 #define MAX_SPI_PORTS 3 37 #define S3C64XX_SPI_QUIRK_POLL (1 << 0) 38 39 /* Registers and bit-fields */ 40 41 #define S3C64XX_SPI_CH_CFG 0x00 42 #define S3C64XX_SPI_CLK_CFG 0x04 43 #define S3C64XX_SPI_MODE_CFG 0x08 44 #define S3C64XX_SPI_SLAVE_SEL 0x0C 45 #define S3C64XX_SPI_INT_EN 0x10 46 #define S3C64XX_SPI_STATUS 0x14 47 #define S3C64XX_SPI_TX_DATA 0x18 48 #define S3C64XX_SPI_RX_DATA 0x1C 49 #define S3C64XX_SPI_PACKET_CNT 0x20 50 #define S3C64XX_SPI_PENDING_CLR 0x24 51 #define S3C64XX_SPI_SWAP_CFG 0x28 52 #define S3C64XX_SPI_FB_CLK 0x2C 53 54 #define S3C64XX_SPI_CH_HS_EN (1<<6) /* High Speed Enable */ 55 #define S3C64XX_SPI_CH_SW_RST (1<<5) 56 #define S3C64XX_SPI_CH_SLAVE (1<<4) 57 #define S3C64XX_SPI_CPOL_L (1<<3) 58 #define S3C64XX_SPI_CPHA_B (1<<2) 59 #define S3C64XX_SPI_CH_RXCH_ON (1<<1) 60 #define S3C64XX_SPI_CH_TXCH_ON (1<<0) 61 62 #define S3C64XX_SPI_CLKSEL_SRCMSK (3<<9) 63 #define S3C64XX_SPI_CLKSEL_SRCSHFT 9 64 #define S3C64XX_SPI_ENCLK_ENABLE (1<<8) 65 #define S3C64XX_SPI_PSR_MASK 0xff 66 67 #define S3C64XX_SPI_MODE_CH_TSZ_BYTE (0<<29) 68 #define S3C64XX_SPI_MODE_CH_TSZ_HALFWORD (1<<29) 69 #define S3C64XX_SPI_MODE_CH_TSZ_WORD (2<<29) 70 #define S3C64XX_SPI_MODE_CH_TSZ_MASK (3<<29) 71 #define S3C64XX_SPI_MODE_BUS_TSZ_BYTE (0<<17) 72 #define S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD (1<<17) 73 #define S3C64XX_SPI_MODE_BUS_TSZ_WORD (2<<17) 74 #define S3C64XX_SPI_MODE_BUS_TSZ_MASK (3<<17) 75 #define S3C64XX_SPI_MODE_RXDMA_ON (1<<2) 76 #define S3C64XX_SPI_MODE_TXDMA_ON (1<<1) 77 #define S3C64XX_SPI_MODE_4BURST (1<<0) 78 79 #define S3C64XX_SPI_SLAVE_AUTO (1<<1) 80 #define S3C64XX_SPI_SLAVE_SIG_INACT (1<<0) 81 82 #define S3C64XX_SPI_INT_TRAILING_EN (1<<6) 83 #define S3C64XX_SPI_INT_RX_OVERRUN_EN (1<<5) 84 #define S3C64XX_SPI_INT_RX_UNDERRUN_EN (1<<4) 85 #define S3C64XX_SPI_INT_TX_OVERRUN_EN (1<<3) 86 #define S3C64XX_SPI_INT_TX_UNDERRUN_EN (1<<2) 87 #define S3C64XX_SPI_INT_RX_FIFORDY_EN (1<<1) 88 #define S3C64XX_SPI_INT_TX_FIFORDY_EN (1<<0) 89 90 #define S3C64XX_SPI_ST_RX_OVERRUN_ERR (1<<5) 91 #define S3C64XX_SPI_ST_RX_UNDERRUN_ERR (1<<4) 92 #define S3C64XX_SPI_ST_TX_OVERRUN_ERR (1<<3) 93 #define S3C64XX_SPI_ST_TX_UNDERRUN_ERR (1<<2) 94 #define S3C64XX_SPI_ST_RX_FIFORDY (1<<1) 95 #define S3C64XX_SPI_ST_TX_FIFORDY (1<<0) 96 97 #define S3C64XX_SPI_PACKET_CNT_EN (1<<16) 98 99 #define S3C64XX_SPI_PND_TX_UNDERRUN_CLR (1<<4) 100 #define S3C64XX_SPI_PND_TX_OVERRUN_CLR (1<<3) 101 #define S3C64XX_SPI_PND_RX_UNDERRUN_CLR (1<<2) 102 #define S3C64XX_SPI_PND_RX_OVERRUN_CLR (1<<1) 103 #define S3C64XX_SPI_PND_TRAILING_CLR (1<<0) 104 105 #define S3C64XX_SPI_SWAP_RX_HALF_WORD (1<<7) 106 #define S3C64XX_SPI_SWAP_RX_BYTE (1<<6) 107 #define S3C64XX_SPI_SWAP_RX_BIT (1<<5) 108 #define S3C64XX_SPI_SWAP_RX_EN (1<<4) 109 #define S3C64XX_SPI_SWAP_TX_HALF_WORD (1<<3) 110 #define S3C64XX_SPI_SWAP_TX_BYTE (1<<2) 111 #define S3C64XX_SPI_SWAP_TX_BIT (1<<1) 112 #define S3C64XX_SPI_SWAP_TX_EN (1<<0) 113 114 #define S3C64XX_SPI_FBCLK_MSK (3<<0) 115 116 #define FIFO_LVL_MASK(i) ((i)->port_conf->fifo_lvl_mask[i->port_id]) 117 #define S3C64XX_SPI_ST_TX_DONE(v, i) (((v) & \ 118 (1 << (i)->port_conf->tx_st_done)) ? 1 : 0) 119 #define TX_FIFO_LVL(v, i) (((v) >> 6) & FIFO_LVL_MASK(i)) 120 #define RX_FIFO_LVL(v, i) (((v) >> (i)->port_conf->rx_lvl_offset) & \ 121 FIFO_LVL_MASK(i)) 122 123 #define S3C64XX_SPI_MAX_TRAILCNT 0x3ff 124 #define S3C64XX_SPI_TRAILCNT_OFF 19 125 126 #define S3C64XX_SPI_TRAILCNT S3C64XX_SPI_MAX_TRAILCNT 127 128 #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t) 129 #define is_polling(x) (x->port_conf->quirks & S3C64XX_SPI_QUIRK_POLL) 130 131 #define RXBUSY (1<<2) 132 #define TXBUSY (1<<3) 133 134 struct s3c64xx_spi_dma_data { 135 struct dma_chan *ch; 136 enum dma_transfer_direction direction; 137 unsigned int dmach; 138 }; 139 140 /** 141 * struct s3c64xx_spi_info - SPI Controller hardware info 142 * @fifo_lvl_mask: Bit-mask for {TX|RX}_FIFO_LVL bits in SPI_STATUS register. 143 * @rx_lvl_offset: Bit offset of RX_FIFO_LVL bits in SPI_STATUS regiter. 144 * @tx_st_done: Bit offset of TX_DONE bit in SPI_STATUS regiter. 145 * @high_speed: True, if the controller supports HIGH_SPEED_EN bit. 146 * @clk_from_cmu: True, if the controller does not include a clock mux and 147 * prescaler unit. 148 * 149 * The Samsung s3c64xx SPI controller are used on various Samsung SoC's but 150 * differ in some aspects such as the size of the fifo and spi bus clock 151 * setup. Such differences are specified to the driver using this structure 152 * which is provided as driver data to the driver. 153 */ 154 struct s3c64xx_spi_port_config { 155 int fifo_lvl_mask[MAX_SPI_PORTS]; 156 int rx_lvl_offset; 157 int tx_st_done; 158 int quirks; 159 bool high_speed; 160 bool clk_from_cmu; 161 }; 162 163 /** 164 * struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver. 165 * @clk: Pointer to the spi clock. 166 * @src_clk: Pointer to the clock used to generate SPI signals. 167 * @master: Pointer to the SPI Protocol master. 168 * @cntrlr_info: Platform specific data for the controller this driver manages. 169 * @tgl_spi: Pointer to the last CS left untoggled by the cs_change hint. 170 * @lock: Controller specific lock. 171 * @state: Set of FLAGS to indicate status. 172 * @rx_dmach: Controller's DMA channel for Rx. 173 * @tx_dmach: Controller's DMA channel for Tx. 174 * @sfr_start: BUS address of SPI controller regs. 175 * @regs: Pointer to ioremap'ed controller registers. 176 * @irq: interrupt 177 * @xfer_completion: To indicate completion of xfer task. 178 * @cur_mode: Stores the active configuration of the controller. 179 * @cur_bpw: Stores the active bits per word settings. 180 * @cur_speed: Stores the active xfer clock speed. 181 */ 182 struct s3c64xx_spi_driver_data { 183 void __iomem *regs; 184 struct clk *clk; 185 struct clk *src_clk; 186 struct platform_device *pdev; 187 struct spi_master *master; 188 struct s3c64xx_spi_info *cntrlr_info; 189 struct spi_device *tgl_spi; 190 spinlock_t lock; 191 unsigned long sfr_start; 192 struct completion xfer_completion; 193 unsigned state; 194 unsigned cur_mode, cur_bpw; 195 unsigned cur_speed; 196 struct s3c64xx_spi_dma_data rx_dma; 197 struct s3c64xx_spi_dma_data tx_dma; 198 struct s3c64xx_spi_port_config *port_conf; 199 unsigned int port_id; 200 }; 201 202 static void flush_fifo(struct s3c64xx_spi_driver_data *sdd) 203 { 204 void __iomem *regs = sdd->regs; 205 unsigned long loops; 206 u32 val; 207 208 writel(0, regs + S3C64XX_SPI_PACKET_CNT); 209 210 val = readl(regs + S3C64XX_SPI_CH_CFG); 211 val &= ~(S3C64XX_SPI_CH_RXCH_ON | S3C64XX_SPI_CH_TXCH_ON); 212 writel(val, regs + S3C64XX_SPI_CH_CFG); 213 214 val = readl(regs + S3C64XX_SPI_CH_CFG); 215 val |= S3C64XX_SPI_CH_SW_RST; 216 val &= ~S3C64XX_SPI_CH_HS_EN; 217 writel(val, regs + S3C64XX_SPI_CH_CFG); 218 219 /* Flush TxFIFO*/ 220 loops = msecs_to_loops(1); 221 do { 222 val = readl(regs + S3C64XX_SPI_STATUS); 223 } while (TX_FIFO_LVL(val, sdd) && loops--); 224 225 if (loops == 0) 226 dev_warn(&sdd->pdev->dev, "Timed out flushing TX FIFO\n"); 227 228 /* Flush RxFIFO*/ 229 loops = msecs_to_loops(1); 230 do { 231 val = readl(regs + S3C64XX_SPI_STATUS); 232 if (RX_FIFO_LVL(val, sdd)) 233 readl(regs + S3C64XX_SPI_RX_DATA); 234 else 235 break; 236 } while (loops--); 237 238 if (loops == 0) 239 dev_warn(&sdd->pdev->dev, "Timed out flushing RX FIFO\n"); 240 241 val = readl(regs + S3C64XX_SPI_CH_CFG); 242 val &= ~S3C64XX_SPI_CH_SW_RST; 243 writel(val, regs + S3C64XX_SPI_CH_CFG); 244 245 val = readl(regs + S3C64XX_SPI_MODE_CFG); 246 val &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON); 247 writel(val, regs + S3C64XX_SPI_MODE_CFG); 248 } 249 250 static void s3c64xx_spi_dmacb(void *data) 251 { 252 struct s3c64xx_spi_driver_data *sdd; 253 struct s3c64xx_spi_dma_data *dma = data; 254 unsigned long flags; 255 256 if (dma->direction == DMA_DEV_TO_MEM) 257 sdd = container_of(data, 258 struct s3c64xx_spi_driver_data, rx_dma); 259 else 260 sdd = container_of(data, 261 struct s3c64xx_spi_driver_data, tx_dma); 262 263 spin_lock_irqsave(&sdd->lock, flags); 264 265 if (dma->direction == DMA_DEV_TO_MEM) { 266 sdd->state &= ~RXBUSY; 267 if (!(sdd->state & TXBUSY)) 268 complete(&sdd->xfer_completion); 269 } else { 270 sdd->state &= ~TXBUSY; 271 if (!(sdd->state & RXBUSY)) 272 complete(&sdd->xfer_completion); 273 } 274 275 spin_unlock_irqrestore(&sdd->lock, flags); 276 } 277 278 static void prepare_dma(struct s3c64xx_spi_dma_data *dma, 279 struct sg_table *sgt) 280 { 281 struct s3c64xx_spi_driver_data *sdd; 282 struct dma_slave_config config; 283 struct dma_async_tx_descriptor *desc; 284 285 memset(&config, 0, sizeof(config)); 286 287 if (dma->direction == DMA_DEV_TO_MEM) { 288 sdd = container_of((void *)dma, 289 struct s3c64xx_spi_driver_data, rx_dma); 290 config.direction = dma->direction; 291 config.src_addr = sdd->sfr_start + S3C64XX_SPI_RX_DATA; 292 config.src_addr_width = sdd->cur_bpw / 8; 293 config.src_maxburst = 1; 294 dmaengine_slave_config(dma->ch, &config); 295 } else { 296 sdd = container_of((void *)dma, 297 struct s3c64xx_spi_driver_data, tx_dma); 298 config.direction = dma->direction; 299 config.dst_addr = sdd->sfr_start + S3C64XX_SPI_TX_DATA; 300 config.dst_addr_width = sdd->cur_bpw / 8; 301 config.dst_maxburst = 1; 302 dmaengine_slave_config(dma->ch, &config); 303 } 304 305 desc = dmaengine_prep_slave_sg(dma->ch, sgt->sgl, sgt->nents, 306 dma->direction, DMA_PREP_INTERRUPT); 307 308 desc->callback = s3c64xx_spi_dmacb; 309 desc->callback_param = dma; 310 311 dmaengine_submit(desc); 312 dma_async_issue_pending(dma->ch); 313 } 314 315 static int s3c64xx_spi_prepare_transfer(struct spi_master *spi) 316 { 317 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi); 318 dma_filter_fn filter = sdd->cntrlr_info->filter; 319 struct device *dev = &sdd->pdev->dev; 320 dma_cap_mask_t mask; 321 int ret; 322 323 if (!is_polling(sdd)) { 324 dma_cap_zero(mask); 325 dma_cap_set(DMA_SLAVE, mask); 326 327 /* Acquire DMA channels */ 328 sdd->rx_dma.ch = dma_request_slave_channel_compat(mask, filter, 329 (void *)sdd->rx_dma.dmach, dev, "rx"); 330 if (!sdd->rx_dma.ch) { 331 dev_err(dev, "Failed to get RX DMA channel\n"); 332 ret = -EBUSY; 333 goto out; 334 } 335 spi->dma_rx = sdd->rx_dma.ch; 336 337 sdd->tx_dma.ch = dma_request_slave_channel_compat(mask, filter, 338 (void *)sdd->tx_dma.dmach, dev, "tx"); 339 if (!sdd->tx_dma.ch) { 340 dev_err(dev, "Failed to get TX DMA channel\n"); 341 ret = -EBUSY; 342 goto out_rx; 343 } 344 spi->dma_tx = sdd->tx_dma.ch; 345 } 346 347 ret = pm_runtime_get_sync(&sdd->pdev->dev); 348 if (ret < 0) { 349 dev_err(dev, "Failed to enable device: %d\n", ret); 350 goto out_tx; 351 } 352 353 return 0; 354 355 out_tx: 356 dma_release_channel(sdd->tx_dma.ch); 357 out_rx: 358 dma_release_channel(sdd->rx_dma.ch); 359 out: 360 return ret; 361 } 362 363 static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi) 364 { 365 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi); 366 367 /* Free DMA channels */ 368 if (!is_polling(sdd)) { 369 dma_release_channel(sdd->rx_dma.ch); 370 dma_release_channel(sdd->tx_dma.ch); 371 } 372 373 pm_runtime_put(&sdd->pdev->dev); 374 return 0; 375 } 376 377 static bool s3c64xx_spi_can_dma(struct spi_master *master, 378 struct spi_device *spi, 379 struct spi_transfer *xfer) 380 { 381 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); 382 383 return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1; 384 } 385 386 static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, 387 struct spi_device *spi, 388 struct spi_transfer *xfer, int dma_mode) 389 { 390 void __iomem *regs = sdd->regs; 391 u32 modecfg, chcfg; 392 393 modecfg = readl(regs + S3C64XX_SPI_MODE_CFG); 394 modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON); 395 396 chcfg = readl(regs + S3C64XX_SPI_CH_CFG); 397 chcfg &= ~S3C64XX_SPI_CH_TXCH_ON; 398 399 if (dma_mode) { 400 chcfg &= ~S3C64XX_SPI_CH_RXCH_ON; 401 } else { 402 /* Always shift in data in FIFO, even if xfer is Tx only, 403 * this helps setting PCKT_CNT value for generating clocks 404 * as exactly needed. 405 */ 406 chcfg |= S3C64XX_SPI_CH_RXCH_ON; 407 writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff) 408 | S3C64XX_SPI_PACKET_CNT_EN, 409 regs + S3C64XX_SPI_PACKET_CNT); 410 } 411 412 if (xfer->tx_buf != NULL) { 413 sdd->state |= TXBUSY; 414 chcfg |= S3C64XX_SPI_CH_TXCH_ON; 415 if (dma_mode) { 416 modecfg |= S3C64XX_SPI_MODE_TXDMA_ON; 417 prepare_dma(&sdd->tx_dma, &xfer->tx_sg); 418 } else { 419 switch (sdd->cur_bpw) { 420 case 32: 421 iowrite32_rep(regs + S3C64XX_SPI_TX_DATA, 422 xfer->tx_buf, xfer->len / 4); 423 break; 424 case 16: 425 iowrite16_rep(regs + S3C64XX_SPI_TX_DATA, 426 xfer->tx_buf, xfer->len / 2); 427 break; 428 default: 429 iowrite8_rep(regs + S3C64XX_SPI_TX_DATA, 430 xfer->tx_buf, xfer->len); 431 break; 432 } 433 } 434 } 435 436 if (xfer->rx_buf != NULL) { 437 sdd->state |= RXBUSY; 438 439 if (sdd->port_conf->high_speed && sdd->cur_speed >= 30000000UL 440 && !(sdd->cur_mode & SPI_CPHA)) 441 chcfg |= S3C64XX_SPI_CH_HS_EN; 442 443 if (dma_mode) { 444 modecfg |= S3C64XX_SPI_MODE_RXDMA_ON; 445 chcfg |= S3C64XX_SPI_CH_RXCH_ON; 446 writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff) 447 | S3C64XX_SPI_PACKET_CNT_EN, 448 regs + S3C64XX_SPI_PACKET_CNT); 449 prepare_dma(&sdd->rx_dma, &xfer->rx_sg); 450 } 451 } 452 453 writel(modecfg, regs + S3C64XX_SPI_MODE_CFG); 454 writel(chcfg, regs + S3C64XX_SPI_CH_CFG); 455 } 456 457 static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd, 458 int timeout_ms) 459 { 460 void __iomem *regs = sdd->regs; 461 unsigned long val = 1; 462 u32 status; 463 464 /* max fifo depth available */ 465 u32 max_fifo = (FIFO_LVL_MASK(sdd) >> 1) + 1; 466 467 if (timeout_ms) 468 val = msecs_to_loops(timeout_ms); 469 470 do { 471 status = readl(regs + S3C64XX_SPI_STATUS); 472 } while (RX_FIFO_LVL(status, sdd) < max_fifo && --val); 473 474 /* return the actual received data length */ 475 return RX_FIFO_LVL(status, sdd); 476 } 477 478 static int wait_for_dma(struct s3c64xx_spi_driver_data *sdd, 479 struct spi_transfer *xfer) 480 { 481 void __iomem *regs = sdd->regs; 482 unsigned long val; 483 u32 status; 484 int ms; 485 486 /* millisecs to xfer 'len' bytes @ 'cur_speed' */ 487 ms = xfer->len * 8 * 1000 / sdd->cur_speed; 488 ms += 10; /* some tolerance */ 489 490 val = msecs_to_jiffies(ms) + 10; 491 val = wait_for_completion_timeout(&sdd->xfer_completion, val); 492 493 /* 494 * If the previous xfer was completed within timeout, then 495 * proceed further else return -EIO. 496 * DmaTx returns after simply writing data in the FIFO, 497 * w/o waiting for real transmission on the bus to finish. 498 * DmaRx returns only after Dma read data from FIFO which 499 * needs bus transmission to finish, so we don't worry if 500 * Xfer involved Rx(with or without Tx). 501 */ 502 if (val && !xfer->rx_buf) { 503 val = msecs_to_loops(10); 504 status = readl(regs + S3C64XX_SPI_STATUS); 505 while ((TX_FIFO_LVL(status, sdd) 506 || !S3C64XX_SPI_ST_TX_DONE(status, sdd)) 507 && --val) { 508 cpu_relax(); 509 status = readl(regs + S3C64XX_SPI_STATUS); 510 } 511 512 } 513 514 /* If timed out while checking rx/tx status return error */ 515 if (!val) 516 return -EIO; 517 518 return 0; 519 } 520 521 static int wait_for_pio(struct s3c64xx_spi_driver_data *sdd, 522 struct spi_transfer *xfer) 523 { 524 void __iomem *regs = sdd->regs; 525 unsigned long val; 526 u32 status; 527 int loops; 528 u32 cpy_len; 529 u8 *buf; 530 int ms; 531 532 /* millisecs to xfer 'len' bytes @ 'cur_speed' */ 533 ms = xfer->len * 8 * 1000 / sdd->cur_speed; 534 ms += 10; /* some tolerance */ 535 536 val = msecs_to_loops(ms); 537 do { 538 status = readl(regs + S3C64XX_SPI_STATUS); 539 } while (RX_FIFO_LVL(status, sdd) < xfer->len && --val); 540 541 542 /* If it was only Tx */ 543 if (!xfer->rx_buf) { 544 sdd->state &= ~TXBUSY; 545 return 0; 546 } 547 548 /* 549 * If the receive length is bigger than the controller fifo 550 * size, calculate the loops and read the fifo as many times. 551 * loops = length / max fifo size (calculated by using the 552 * fifo mask). 553 * For any size less than the fifo size the below code is 554 * executed atleast once. 555 */ 556 loops = xfer->len / ((FIFO_LVL_MASK(sdd) >> 1) + 1); 557 buf = xfer->rx_buf; 558 do { 559 /* wait for data to be received in the fifo */ 560 cpy_len = s3c64xx_spi_wait_for_timeout(sdd, 561 (loops ? ms : 0)); 562 563 switch (sdd->cur_bpw) { 564 case 32: 565 ioread32_rep(regs + S3C64XX_SPI_RX_DATA, 566 buf, cpy_len / 4); 567 break; 568 case 16: 569 ioread16_rep(regs + S3C64XX_SPI_RX_DATA, 570 buf, cpy_len / 2); 571 break; 572 default: 573 ioread8_rep(regs + S3C64XX_SPI_RX_DATA, 574 buf, cpy_len); 575 break; 576 } 577 578 buf = buf + cpy_len; 579 } while (loops--); 580 sdd->state &= ~RXBUSY; 581 582 return 0; 583 } 584 585 static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) 586 { 587 void __iomem *regs = sdd->regs; 588 u32 val; 589 590 /* Disable Clock */ 591 if (sdd->port_conf->clk_from_cmu) { 592 clk_disable_unprepare(sdd->src_clk); 593 } else { 594 val = readl(regs + S3C64XX_SPI_CLK_CFG); 595 val &= ~S3C64XX_SPI_ENCLK_ENABLE; 596 writel(val, regs + S3C64XX_SPI_CLK_CFG); 597 } 598 599 /* Set Polarity and Phase */ 600 val = readl(regs + S3C64XX_SPI_CH_CFG); 601 val &= ~(S3C64XX_SPI_CH_SLAVE | 602 S3C64XX_SPI_CPOL_L | 603 S3C64XX_SPI_CPHA_B); 604 605 if (sdd->cur_mode & SPI_CPOL) 606 val |= S3C64XX_SPI_CPOL_L; 607 608 if (sdd->cur_mode & SPI_CPHA) 609 val |= S3C64XX_SPI_CPHA_B; 610 611 writel(val, regs + S3C64XX_SPI_CH_CFG); 612 613 /* Set Channel & DMA Mode */ 614 val = readl(regs + S3C64XX_SPI_MODE_CFG); 615 val &= ~(S3C64XX_SPI_MODE_BUS_TSZ_MASK 616 | S3C64XX_SPI_MODE_CH_TSZ_MASK); 617 618 switch (sdd->cur_bpw) { 619 case 32: 620 val |= S3C64XX_SPI_MODE_BUS_TSZ_WORD; 621 val |= S3C64XX_SPI_MODE_CH_TSZ_WORD; 622 break; 623 case 16: 624 val |= S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD; 625 val |= S3C64XX_SPI_MODE_CH_TSZ_HALFWORD; 626 break; 627 default: 628 val |= S3C64XX_SPI_MODE_BUS_TSZ_BYTE; 629 val |= S3C64XX_SPI_MODE_CH_TSZ_BYTE; 630 break; 631 } 632 633 writel(val, regs + S3C64XX_SPI_MODE_CFG); 634 635 if (sdd->port_conf->clk_from_cmu) { 636 /* Configure Clock */ 637 /* There is half-multiplier before the SPI */ 638 clk_set_rate(sdd->src_clk, sdd->cur_speed * 2); 639 /* Enable Clock */ 640 clk_prepare_enable(sdd->src_clk); 641 } else { 642 /* Configure Clock */ 643 val = readl(regs + S3C64XX_SPI_CLK_CFG); 644 val &= ~S3C64XX_SPI_PSR_MASK; 645 val |= ((clk_get_rate(sdd->src_clk) / sdd->cur_speed / 2 - 1) 646 & S3C64XX_SPI_PSR_MASK); 647 writel(val, regs + S3C64XX_SPI_CLK_CFG); 648 649 /* Enable Clock */ 650 val = readl(regs + S3C64XX_SPI_CLK_CFG); 651 val |= S3C64XX_SPI_ENCLK_ENABLE; 652 writel(val, regs + S3C64XX_SPI_CLK_CFG); 653 } 654 } 655 656 #define XFER_DMAADDR_INVALID DMA_BIT_MASK(32) 657 658 static int s3c64xx_spi_prepare_message(struct spi_master *master, 659 struct spi_message *msg) 660 { 661 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); 662 struct spi_device *spi = msg->spi; 663 struct s3c64xx_spi_csinfo *cs = spi->controller_data; 664 665 /* If Master's(controller) state differs from that needed by Slave */ 666 if (sdd->cur_speed != spi->max_speed_hz 667 || sdd->cur_mode != spi->mode 668 || sdd->cur_bpw != spi->bits_per_word) { 669 sdd->cur_bpw = spi->bits_per_word; 670 sdd->cur_speed = spi->max_speed_hz; 671 sdd->cur_mode = spi->mode; 672 s3c64xx_spi_config(sdd); 673 } 674 675 /* Configure feedback delay */ 676 writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK); 677 678 return 0; 679 } 680 681 static int s3c64xx_spi_transfer_one(struct spi_master *master, 682 struct spi_device *spi, 683 struct spi_transfer *xfer) 684 { 685 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); 686 int status; 687 u32 speed; 688 u8 bpw; 689 unsigned long flags; 690 int use_dma; 691 692 reinit_completion(&sdd->xfer_completion); 693 694 /* Only BPW and Speed may change across transfers */ 695 bpw = xfer->bits_per_word; 696 speed = xfer->speed_hz ? : spi->max_speed_hz; 697 698 if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) { 699 sdd->cur_bpw = bpw; 700 sdd->cur_speed = speed; 701 s3c64xx_spi_config(sdd); 702 } 703 704 /* Polling method for xfers not bigger than FIFO capacity */ 705 use_dma = 0; 706 if (!is_polling(sdd) && 707 (sdd->rx_dma.ch && sdd->tx_dma.ch && 708 (xfer->len > ((FIFO_LVL_MASK(sdd) >> 1) + 1)))) 709 use_dma = 1; 710 711 spin_lock_irqsave(&sdd->lock, flags); 712 713 /* Pending only which is to be done */ 714 sdd->state &= ~RXBUSY; 715 sdd->state &= ~TXBUSY; 716 717 enable_datapath(sdd, spi, xfer, use_dma); 718 719 /* Start the signals */ 720 writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL); 721 722 spin_unlock_irqrestore(&sdd->lock, flags); 723 724 if (use_dma) 725 status = wait_for_dma(sdd, xfer); 726 else 727 status = wait_for_pio(sdd, xfer); 728 729 if (status) { 730 dev_err(&spi->dev, "I/O Error: rx-%d tx-%d res:rx-%c tx-%c len-%d\n", 731 xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0, 732 (sdd->state & RXBUSY) ? 'f' : 'p', 733 (sdd->state & TXBUSY) ? 'f' : 'p', 734 xfer->len); 735 736 if (use_dma) { 737 if (xfer->tx_buf != NULL 738 && (sdd->state & TXBUSY)) 739 dmaengine_terminate_all(sdd->tx_dma.ch); 740 if (xfer->rx_buf != NULL 741 && (sdd->state & RXBUSY)) 742 dmaengine_terminate_all(sdd->rx_dma.ch); 743 } 744 } else { 745 flush_fifo(sdd); 746 } 747 748 return status; 749 } 750 751 static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata( 752 struct spi_device *spi) 753 { 754 struct s3c64xx_spi_csinfo *cs; 755 struct device_node *slave_np, *data_np = NULL; 756 u32 fb_delay = 0; 757 758 slave_np = spi->dev.of_node; 759 if (!slave_np) { 760 dev_err(&spi->dev, "device node not found\n"); 761 return ERR_PTR(-EINVAL); 762 } 763 764 data_np = of_get_child_by_name(slave_np, "controller-data"); 765 if (!data_np) { 766 dev_err(&spi->dev, "child node 'controller-data' not found\n"); 767 return ERR_PTR(-EINVAL); 768 } 769 770 cs = kzalloc(sizeof(*cs), GFP_KERNEL); 771 if (!cs) { 772 of_node_put(data_np); 773 return ERR_PTR(-ENOMEM); 774 } 775 776 of_property_read_u32(data_np, "samsung,spi-feedback-delay", &fb_delay); 777 cs->fb_delay = fb_delay; 778 of_node_put(data_np); 779 return cs; 780 } 781 782 /* 783 * Here we only check the validity of requested configuration 784 * and save the configuration in a local data-structure. 785 * The controller is actually configured only just before we 786 * get a message to transfer. 787 */ 788 static int s3c64xx_spi_setup(struct spi_device *spi) 789 { 790 struct s3c64xx_spi_csinfo *cs = spi->controller_data; 791 struct s3c64xx_spi_driver_data *sdd; 792 struct s3c64xx_spi_info *sci; 793 int err; 794 795 sdd = spi_master_get_devdata(spi->master); 796 if (spi->dev.of_node) { 797 cs = s3c64xx_get_slave_ctrldata(spi); 798 spi->controller_data = cs; 799 } else if (cs) { 800 /* On non-DT platforms the SPI core will set spi->cs_gpio 801 * to -ENOENT. The GPIO pin used to drive the chip select 802 * is defined by using platform data so spi->cs_gpio value 803 * has to be override to have the proper GPIO pin number. 804 */ 805 spi->cs_gpio = cs->line; 806 } 807 808 if (IS_ERR_OR_NULL(cs)) { 809 dev_err(&spi->dev, "No CS for SPI(%d)\n", spi->chip_select); 810 return -ENODEV; 811 } 812 813 if (!spi_get_ctldata(spi)) { 814 if (gpio_is_valid(spi->cs_gpio)) { 815 err = gpio_request_one(spi->cs_gpio, GPIOF_OUT_INIT_HIGH, 816 dev_name(&spi->dev)); 817 if (err) { 818 dev_err(&spi->dev, 819 "Failed to get /CS gpio [%d]: %d\n", 820 spi->cs_gpio, err); 821 goto err_gpio_req; 822 } 823 } 824 825 spi_set_ctldata(spi, cs); 826 } 827 828 sci = sdd->cntrlr_info; 829 830 pm_runtime_get_sync(&sdd->pdev->dev); 831 832 /* Check if we can provide the requested rate */ 833 if (!sdd->port_conf->clk_from_cmu) { 834 u32 psr, speed; 835 836 /* Max possible */ 837 speed = clk_get_rate(sdd->src_clk) / 2 / (0 + 1); 838 839 if (spi->max_speed_hz > speed) 840 spi->max_speed_hz = speed; 841 842 psr = clk_get_rate(sdd->src_clk) / 2 / spi->max_speed_hz - 1; 843 psr &= S3C64XX_SPI_PSR_MASK; 844 if (psr == S3C64XX_SPI_PSR_MASK) 845 psr--; 846 847 speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1); 848 if (spi->max_speed_hz < speed) { 849 if (psr+1 < S3C64XX_SPI_PSR_MASK) { 850 psr++; 851 } else { 852 err = -EINVAL; 853 goto setup_exit; 854 } 855 } 856 857 speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1); 858 if (spi->max_speed_hz >= speed) { 859 spi->max_speed_hz = speed; 860 } else { 861 dev_err(&spi->dev, "Can't set %dHz transfer speed\n", 862 spi->max_speed_hz); 863 err = -EINVAL; 864 goto setup_exit; 865 } 866 } 867 868 pm_runtime_put(&sdd->pdev->dev); 869 writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL); 870 return 0; 871 872 setup_exit: 873 pm_runtime_put(&sdd->pdev->dev); 874 /* setup() returns with device de-selected */ 875 writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL); 876 877 if (gpio_is_valid(spi->cs_gpio)) 878 gpio_free(spi->cs_gpio); 879 spi_set_ctldata(spi, NULL); 880 881 err_gpio_req: 882 if (spi->dev.of_node) 883 kfree(cs); 884 885 return err; 886 } 887 888 static void s3c64xx_spi_cleanup(struct spi_device *spi) 889 { 890 struct s3c64xx_spi_csinfo *cs = spi_get_ctldata(spi); 891 892 if (gpio_is_valid(spi->cs_gpio)) { 893 gpio_free(spi->cs_gpio); 894 if (spi->dev.of_node) 895 kfree(cs); 896 else { 897 /* On non-DT platforms, the SPI core sets 898 * spi->cs_gpio to -ENOENT and .setup() 899 * overrides it with the GPIO pin value 900 * passed using platform data. 901 */ 902 spi->cs_gpio = -ENOENT; 903 } 904 } 905 906 spi_set_ctldata(spi, NULL); 907 } 908 909 static irqreturn_t s3c64xx_spi_irq(int irq, void *data) 910 { 911 struct s3c64xx_spi_driver_data *sdd = data; 912 struct spi_master *spi = sdd->master; 913 unsigned int val, clr = 0; 914 915 val = readl(sdd->regs + S3C64XX_SPI_STATUS); 916 917 if (val & S3C64XX_SPI_ST_RX_OVERRUN_ERR) { 918 clr = S3C64XX_SPI_PND_RX_OVERRUN_CLR; 919 dev_err(&spi->dev, "RX overrun\n"); 920 } 921 if (val & S3C64XX_SPI_ST_RX_UNDERRUN_ERR) { 922 clr |= S3C64XX_SPI_PND_RX_UNDERRUN_CLR; 923 dev_err(&spi->dev, "RX underrun\n"); 924 } 925 if (val & S3C64XX_SPI_ST_TX_OVERRUN_ERR) { 926 clr |= S3C64XX_SPI_PND_TX_OVERRUN_CLR; 927 dev_err(&spi->dev, "TX overrun\n"); 928 } 929 if (val & S3C64XX_SPI_ST_TX_UNDERRUN_ERR) { 930 clr |= S3C64XX_SPI_PND_TX_UNDERRUN_CLR; 931 dev_err(&spi->dev, "TX underrun\n"); 932 } 933 934 /* Clear the pending irq by setting and then clearing it */ 935 writel(clr, sdd->regs + S3C64XX_SPI_PENDING_CLR); 936 writel(0, sdd->regs + S3C64XX_SPI_PENDING_CLR); 937 938 return IRQ_HANDLED; 939 } 940 941 static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel) 942 { 943 struct s3c64xx_spi_info *sci = sdd->cntrlr_info; 944 void __iomem *regs = sdd->regs; 945 unsigned int val; 946 947 sdd->cur_speed = 0; 948 949 writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL); 950 951 /* Disable Interrupts - we use Polling if not DMA mode */ 952 writel(0, regs + S3C64XX_SPI_INT_EN); 953 954 if (!sdd->port_conf->clk_from_cmu) 955 writel(sci->src_clk_nr << S3C64XX_SPI_CLKSEL_SRCSHFT, 956 regs + S3C64XX_SPI_CLK_CFG); 957 writel(0, regs + S3C64XX_SPI_MODE_CFG); 958 writel(0, regs + S3C64XX_SPI_PACKET_CNT); 959 960 /* Clear any irq pending bits, should set and clear the bits */ 961 val = S3C64XX_SPI_PND_RX_OVERRUN_CLR | 962 S3C64XX_SPI_PND_RX_UNDERRUN_CLR | 963 S3C64XX_SPI_PND_TX_OVERRUN_CLR | 964 S3C64XX_SPI_PND_TX_UNDERRUN_CLR; 965 writel(val, regs + S3C64XX_SPI_PENDING_CLR); 966 writel(0, regs + S3C64XX_SPI_PENDING_CLR); 967 968 writel(0, regs + S3C64XX_SPI_SWAP_CFG); 969 970 val = readl(regs + S3C64XX_SPI_MODE_CFG); 971 val &= ~S3C64XX_SPI_MODE_4BURST; 972 val &= ~(S3C64XX_SPI_MAX_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF); 973 val |= (S3C64XX_SPI_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF); 974 writel(val, regs + S3C64XX_SPI_MODE_CFG); 975 976 flush_fifo(sdd); 977 } 978 979 #ifdef CONFIG_OF 980 static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev) 981 { 982 struct s3c64xx_spi_info *sci; 983 u32 temp; 984 985 sci = devm_kzalloc(dev, sizeof(*sci), GFP_KERNEL); 986 if (!sci) 987 return ERR_PTR(-ENOMEM); 988 989 if (of_property_read_u32(dev->of_node, "samsung,spi-src-clk", &temp)) { 990 dev_warn(dev, "spi bus clock parent not specified, using clock at index 0 as parent\n"); 991 sci->src_clk_nr = 0; 992 } else { 993 sci->src_clk_nr = temp; 994 } 995 996 if (of_property_read_u32(dev->of_node, "num-cs", &temp)) { 997 dev_warn(dev, "number of chip select lines not specified, assuming 1 chip select line\n"); 998 sci->num_cs = 1; 999 } else { 1000 sci->num_cs = temp; 1001 } 1002 1003 return sci; 1004 } 1005 #else 1006 static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev) 1007 { 1008 return dev_get_platdata(dev); 1009 } 1010 #endif 1011 1012 static const struct of_device_id s3c64xx_spi_dt_match[]; 1013 1014 static inline struct s3c64xx_spi_port_config *s3c64xx_spi_get_port_config( 1015 struct platform_device *pdev) 1016 { 1017 #ifdef CONFIG_OF 1018 if (pdev->dev.of_node) { 1019 const struct of_device_id *match; 1020 match = of_match_node(s3c64xx_spi_dt_match, pdev->dev.of_node); 1021 return (struct s3c64xx_spi_port_config *)match->data; 1022 } 1023 #endif 1024 return (struct s3c64xx_spi_port_config *) 1025 platform_get_device_id(pdev)->driver_data; 1026 } 1027 1028 static int s3c64xx_spi_probe(struct platform_device *pdev) 1029 { 1030 struct resource *mem_res; 1031 struct resource *res; 1032 struct s3c64xx_spi_driver_data *sdd; 1033 struct s3c64xx_spi_info *sci = dev_get_platdata(&pdev->dev); 1034 struct spi_master *master; 1035 int ret, irq; 1036 char clk_name[16]; 1037 1038 if (!sci && pdev->dev.of_node) { 1039 sci = s3c64xx_spi_parse_dt(&pdev->dev); 1040 if (IS_ERR(sci)) 1041 return PTR_ERR(sci); 1042 } 1043 1044 if (!sci) { 1045 dev_err(&pdev->dev, "platform_data missing!\n"); 1046 return -ENODEV; 1047 } 1048 1049 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1050 if (mem_res == NULL) { 1051 dev_err(&pdev->dev, "Unable to get SPI MEM resource\n"); 1052 return -ENXIO; 1053 } 1054 1055 irq = platform_get_irq(pdev, 0); 1056 if (irq < 0) { 1057 dev_warn(&pdev->dev, "Failed to get IRQ: %d\n", irq); 1058 return irq; 1059 } 1060 1061 master = spi_alloc_master(&pdev->dev, 1062 sizeof(struct s3c64xx_spi_driver_data)); 1063 if (master == NULL) { 1064 dev_err(&pdev->dev, "Unable to allocate SPI Master\n"); 1065 return -ENOMEM; 1066 } 1067 1068 platform_set_drvdata(pdev, master); 1069 1070 sdd = spi_master_get_devdata(master); 1071 sdd->port_conf = s3c64xx_spi_get_port_config(pdev); 1072 sdd->master = master; 1073 sdd->cntrlr_info = sci; 1074 sdd->pdev = pdev; 1075 sdd->sfr_start = mem_res->start; 1076 if (pdev->dev.of_node) { 1077 ret = of_alias_get_id(pdev->dev.of_node, "spi"); 1078 if (ret < 0) { 1079 dev_err(&pdev->dev, "failed to get alias id, errno %d\n", 1080 ret); 1081 goto err0; 1082 } 1083 sdd->port_id = ret; 1084 } else { 1085 sdd->port_id = pdev->id; 1086 } 1087 1088 sdd->cur_bpw = 8; 1089 1090 if (!sdd->pdev->dev.of_node) { 1091 res = platform_get_resource(pdev, IORESOURCE_DMA, 0); 1092 if (!res) { 1093 dev_warn(&pdev->dev, "Unable to get SPI tx dma resource. Switching to poll mode\n"); 1094 sdd->port_conf->quirks = S3C64XX_SPI_QUIRK_POLL; 1095 } else 1096 sdd->tx_dma.dmach = res->start; 1097 1098 res = platform_get_resource(pdev, IORESOURCE_DMA, 1); 1099 if (!res) { 1100 dev_warn(&pdev->dev, "Unable to get SPI rx dma resource. Switching to poll mode\n"); 1101 sdd->port_conf->quirks = S3C64XX_SPI_QUIRK_POLL; 1102 } else 1103 sdd->rx_dma.dmach = res->start; 1104 } 1105 1106 sdd->tx_dma.direction = DMA_MEM_TO_DEV; 1107 sdd->rx_dma.direction = DMA_DEV_TO_MEM; 1108 1109 master->dev.of_node = pdev->dev.of_node; 1110 master->bus_num = sdd->port_id; 1111 master->setup = s3c64xx_spi_setup; 1112 master->cleanup = s3c64xx_spi_cleanup; 1113 master->prepare_transfer_hardware = s3c64xx_spi_prepare_transfer; 1114 master->prepare_message = s3c64xx_spi_prepare_message; 1115 master->transfer_one = s3c64xx_spi_transfer_one; 1116 master->unprepare_transfer_hardware = s3c64xx_spi_unprepare_transfer; 1117 master->num_chipselect = sci->num_cs; 1118 master->dma_alignment = 8; 1119 master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) | 1120 SPI_BPW_MASK(8); 1121 /* the spi->mode bits understood by this driver: */ 1122 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; 1123 master->auto_runtime_pm = true; 1124 if (!is_polling(sdd)) 1125 master->can_dma = s3c64xx_spi_can_dma; 1126 1127 sdd->regs = devm_ioremap_resource(&pdev->dev, mem_res); 1128 if (IS_ERR(sdd->regs)) { 1129 ret = PTR_ERR(sdd->regs); 1130 goto err0; 1131 } 1132 1133 if (sci->cfg_gpio && sci->cfg_gpio()) { 1134 dev_err(&pdev->dev, "Unable to config gpio\n"); 1135 ret = -EBUSY; 1136 goto err0; 1137 } 1138 1139 /* Setup clocks */ 1140 sdd->clk = devm_clk_get(&pdev->dev, "spi"); 1141 if (IS_ERR(sdd->clk)) { 1142 dev_err(&pdev->dev, "Unable to acquire clock 'spi'\n"); 1143 ret = PTR_ERR(sdd->clk); 1144 goto err0; 1145 } 1146 1147 if (clk_prepare_enable(sdd->clk)) { 1148 dev_err(&pdev->dev, "Couldn't enable clock 'spi'\n"); 1149 ret = -EBUSY; 1150 goto err0; 1151 } 1152 1153 sprintf(clk_name, "spi_busclk%d", sci->src_clk_nr); 1154 sdd->src_clk = devm_clk_get(&pdev->dev, clk_name); 1155 if (IS_ERR(sdd->src_clk)) { 1156 dev_err(&pdev->dev, 1157 "Unable to acquire clock '%s'\n", clk_name); 1158 ret = PTR_ERR(sdd->src_clk); 1159 goto err2; 1160 } 1161 1162 if (clk_prepare_enable(sdd->src_clk)) { 1163 dev_err(&pdev->dev, "Couldn't enable clock '%s'\n", clk_name); 1164 ret = -EBUSY; 1165 goto err2; 1166 } 1167 1168 /* Setup Deufult Mode */ 1169 s3c64xx_spi_hwinit(sdd, sdd->port_id); 1170 1171 spin_lock_init(&sdd->lock); 1172 init_completion(&sdd->xfer_completion); 1173 1174 ret = devm_request_irq(&pdev->dev, irq, s3c64xx_spi_irq, 0, 1175 "spi-s3c64xx", sdd); 1176 if (ret != 0) { 1177 dev_err(&pdev->dev, "Failed to request IRQ %d: %d\n", 1178 irq, ret); 1179 goto err3; 1180 } 1181 1182 writel(S3C64XX_SPI_INT_RX_OVERRUN_EN | S3C64XX_SPI_INT_RX_UNDERRUN_EN | 1183 S3C64XX_SPI_INT_TX_OVERRUN_EN | S3C64XX_SPI_INT_TX_UNDERRUN_EN, 1184 sdd->regs + S3C64XX_SPI_INT_EN); 1185 1186 pm_runtime_set_active(&pdev->dev); 1187 pm_runtime_enable(&pdev->dev); 1188 1189 ret = devm_spi_register_master(&pdev->dev, master); 1190 if (ret != 0) { 1191 dev_err(&pdev->dev, "cannot register SPI master: %d\n", ret); 1192 goto err3; 1193 } 1194 1195 dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d with %d Slaves attached\n", 1196 sdd->port_id, master->num_chipselect); 1197 dev_dbg(&pdev->dev, "\tIOmem=[%pR]\tDMA=[Rx-%d, Tx-%d]\n", 1198 mem_res, 1199 sdd->rx_dma.dmach, sdd->tx_dma.dmach); 1200 1201 return 0; 1202 1203 err3: 1204 clk_disable_unprepare(sdd->src_clk); 1205 err2: 1206 clk_disable_unprepare(sdd->clk); 1207 err0: 1208 spi_master_put(master); 1209 1210 return ret; 1211 } 1212 1213 static int s3c64xx_spi_remove(struct platform_device *pdev) 1214 { 1215 struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); 1216 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); 1217 1218 pm_runtime_disable(&pdev->dev); 1219 1220 writel(0, sdd->regs + S3C64XX_SPI_INT_EN); 1221 1222 clk_disable_unprepare(sdd->src_clk); 1223 1224 clk_disable_unprepare(sdd->clk); 1225 1226 return 0; 1227 } 1228 1229 #ifdef CONFIG_PM_SLEEP 1230 static int s3c64xx_spi_suspend(struct device *dev) 1231 { 1232 struct spi_master *master = dev_get_drvdata(dev); 1233 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); 1234 1235 int ret = spi_master_suspend(master); 1236 if (ret) 1237 return ret; 1238 1239 if (!pm_runtime_suspended(dev)) { 1240 clk_disable_unprepare(sdd->clk); 1241 clk_disable_unprepare(sdd->src_clk); 1242 } 1243 1244 sdd->cur_speed = 0; /* Output Clock is stopped */ 1245 1246 return 0; 1247 } 1248 1249 static int s3c64xx_spi_resume(struct device *dev) 1250 { 1251 struct spi_master *master = dev_get_drvdata(dev); 1252 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); 1253 struct s3c64xx_spi_info *sci = sdd->cntrlr_info; 1254 1255 if (sci->cfg_gpio) 1256 sci->cfg_gpio(); 1257 1258 if (!pm_runtime_suspended(dev)) { 1259 clk_prepare_enable(sdd->src_clk); 1260 clk_prepare_enable(sdd->clk); 1261 } 1262 1263 s3c64xx_spi_hwinit(sdd, sdd->port_id); 1264 1265 return spi_master_resume(master); 1266 } 1267 #endif /* CONFIG_PM_SLEEP */ 1268 1269 #ifdef CONFIG_PM_RUNTIME 1270 static int s3c64xx_spi_runtime_suspend(struct device *dev) 1271 { 1272 struct spi_master *master = dev_get_drvdata(dev); 1273 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); 1274 1275 clk_disable_unprepare(sdd->clk); 1276 clk_disable_unprepare(sdd->src_clk); 1277 1278 return 0; 1279 } 1280 1281 static int s3c64xx_spi_runtime_resume(struct device *dev) 1282 { 1283 struct spi_master *master = dev_get_drvdata(dev); 1284 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); 1285 int ret; 1286 1287 ret = clk_prepare_enable(sdd->src_clk); 1288 if (ret != 0) 1289 return ret; 1290 1291 ret = clk_prepare_enable(sdd->clk); 1292 if (ret != 0) { 1293 clk_disable_unprepare(sdd->src_clk); 1294 return ret; 1295 } 1296 1297 return 0; 1298 } 1299 #endif /* CONFIG_PM_RUNTIME */ 1300 1301 static const struct dev_pm_ops s3c64xx_spi_pm = { 1302 SET_SYSTEM_SLEEP_PM_OPS(s3c64xx_spi_suspend, s3c64xx_spi_resume) 1303 SET_RUNTIME_PM_OPS(s3c64xx_spi_runtime_suspend, 1304 s3c64xx_spi_runtime_resume, NULL) 1305 }; 1306 1307 static struct s3c64xx_spi_port_config s3c2443_spi_port_config = { 1308 .fifo_lvl_mask = { 0x7f }, 1309 .rx_lvl_offset = 13, 1310 .tx_st_done = 21, 1311 .high_speed = true, 1312 }; 1313 1314 static struct s3c64xx_spi_port_config s3c6410_spi_port_config = { 1315 .fifo_lvl_mask = { 0x7f, 0x7F }, 1316 .rx_lvl_offset = 13, 1317 .tx_st_done = 21, 1318 }; 1319 1320 static struct s3c64xx_spi_port_config s5pv210_spi_port_config = { 1321 .fifo_lvl_mask = { 0x1ff, 0x7F }, 1322 .rx_lvl_offset = 15, 1323 .tx_st_done = 25, 1324 .high_speed = true, 1325 }; 1326 1327 static struct s3c64xx_spi_port_config exynos4_spi_port_config = { 1328 .fifo_lvl_mask = { 0x1ff, 0x7F, 0x7F }, 1329 .rx_lvl_offset = 15, 1330 .tx_st_done = 25, 1331 .high_speed = true, 1332 .clk_from_cmu = true, 1333 }; 1334 1335 static struct s3c64xx_spi_port_config exynos5440_spi_port_config = { 1336 .fifo_lvl_mask = { 0x1ff }, 1337 .rx_lvl_offset = 15, 1338 .tx_st_done = 25, 1339 .high_speed = true, 1340 .clk_from_cmu = true, 1341 .quirks = S3C64XX_SPI_QUIRK_POLL, 1342 }; 1343 1344 static struct platform_device_id s3c64xx_spi_driver_ids[] = { 1345 { 1346 .name = "s3c2443-spi", 1347 .driver_data = (kernel_ulong_t)&s3c2443_spi_port_config, 1348 }, { 1349 .name = "s3c6410-spi", 1350 .driver_data = (kernel_ulong_t)&s3c6410_spi_port_config, 1351 }, { 1352 .name = "s5pv210-spi", 1353 .driver_data = (kernel_ulong_t)&s5pv210_spi_port_config, 1354 }, { 1355 .name = "exynos4210-spi", 1356 .driver_data = (kernel_ulong_t)&exynos4_spi_port_config, 1357 }, 1358 { }, 1359 }; 1360 1361 static const struct of_device_id s3c64xx_spi_dt_match[] = { 1362 { .compatible = "samsung,s3c2443-spi", 1363 .data = (void *)&s3c2443_spi_port_config, 1364 }, 1365 { .compatible = "samsung,s3c6410-spi", 1366 .data = (void *)&s3c6410_spi_port_config, 1367 }, 1368 { .compatible = "samsung,s5pv210-spi", 1369 .data = (void *)&s5pv210_spi_port_config, 1370 }, 1371 { .compatible = "samsung,exynos4210-spi", 1372 .data = (void *)&exynos4_spi_port_config, 1373 }, 1374 { .compatible = "samsung,exynos5440-spi", 1375 .data = (void *)&exynos5440_spi_port_config, 1376 }, 1377 { }, 1378 }; 1379 MODULE_DEVICE_TABLE(of, s3c64xx_spi_dt_match); 1380 1381 static struct platform_driver s3c64xx_spi_driver = { 1382 .driver = { 1383 .name = "s3c64xx-spi", 1384 .owner = THIS_MODULE, 1385 .pm = &s3c64xx_spi_pm, 1386 .of_match_table = of_match_ptr(s3c64xx_spi_dt_match), 1387 }, 1388 .probe = s3c64xx_spi_probe, 1389 .remove = s3c64xx_spi_remove, 1390 .id_table = s3c64xx_spi_driver_ids, 1391 }; 1392 MODULE_ALIAS("platform:s3c64xx-spi"); 1393 1394 module_platform_driver(s3c64xx_spi_driver); 1395 1396 MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>"); 1397 MODULE_DESCRIPTION("S3C64XX SPI Controller Driver"); 1398 MODULE_LICENSE("GPL"); 1399