1 /* 2 * Copyright (C) 2009 Samsung Electronics Ltd. 3 * Jaswinder Singh <jassi.brar@samsung.com> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 18 */ 19 20 #include <linux/init.h> 21 #include <linux/module.h> 22 #include <linux/interrupt.h> 23 #include <linux/delay.h> 24 #include <linux/clk.h> 25 #include <linux/dma-mapping.h> 26 #include <linux/dmaengine.h> 27 #include <linux/platform_device.h> 28 #include <linux/pm_runtime.h> 29 #include <linux/spi/spi.h> 30 #include <linux/gpio.h> 31 #include <linux/of.h> 32 #include <linux/of_gpio.h> 33 34 #include <linux/platform_data/spi-s3c64xx.h> 35 36 #define MAX_SPI_PORTS 6 37 #define S3C64XX_SPI_QUIRK_POLL (1 << 0) 38 #define S3C64XX_SPI_QUIRK_CS_AUTO (1 << 1) 39 40 /* Registers and bit-fields */ 41 42 #define S3C64XX_SPI_CH_CFG 0x00 43 #define S3C64XX_SPI_CLK_CFG 0x04 44 #define S3C64XX_SPI_MODE_CFG 0x08 45 #define S3C64XX_SPI_SLAVE_SEL 0x0C 46 #define S3C64XX_SPI_INT_EN 0x10 47 #define S3C64XX_SPI_STATUS 0x14 48 #define S3C64XX_SPI_TX_DATA 0x18 49 #define S3C64XX_SPI_RX_DATA 0x1C 50 #define S3C64XX_SPI_PACKET_CNT 0x20 51 #define S3C64XX_SPI_PENDING_CLR 0x24 52 #define S3C64XX_SPI_SWAP_CFG 0x28 53 #define S3C64XX_SPI_FB_CLK 0x2C 54 55 #define S3C64XX_SPI_CH_HS_EN (1<<6) /* High Speed Enable */ 56 #define S3C64XX_SPI_CH_SW_RST (1<<5) 57 #define S3C64XX_SPI_CH_SLAVE (1<<4) 58 #define S3C64XX_SPI_CPOL_L (1<<3) 59 #define S3C64XX_SPI_CPHA_B (1<<2) 60 #define S3C64XX_SPI_CH_RXCH_ON (1<<1) 61 #define S3C64XX_SPI_CH_TXCH_ON (1<<0) 62 63 #define S3C64XX_SPI_CLKSEL_SRCMSK (3<<9) 64 #define S3C64XX_SPI_CLKSEL_SRCSHFT 9 65 #define S3C64XX_SPI_ENCLK_ENABLE (1<<8) 66 #define S3C64XX_SPI_PSR_MASK 0xff 67 68 #define S3C64XX_SPI_MODE_CH_TSZ_BYTE (0<<29) 69 #define S3C64XX_SPI_MODE_CH_TSZ_HALFWORD (1<<29) 70 #define S3C64XX_SPI_MODE_CH_TSZ_WORD (2<<29) 71 #define S3C64XX_SPI_MODE_CH_TSZ_MASK (3<<29) 72 #define S3C64XX_SPI_MODE_BUS_TSZ_BYTE (0<<17) 73 #define S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD (1<<17) 74 #define S3C64XX_SPI_MODE_BUS_TSZ_WORD (2<<17) 75 #define S3C64XX_SPI_MODE_BUS_TSZ_MASK (3<<17) 76 #define S3C64XX_SPI_MODE_RXDMA_ON (1<<2) 77 #define S3C64XX_SPI_MODE_TXDMA_ON (1<<1) 78 #define S3C64XX_SPI_MODE_4BURST (1<<0) 79 80 #define S3C64XX_SPI_SLAVE_AUTO (1<<1) 81 #define S3C64XX_SPI_SLAVE_SIG_INACT (1<<0) 82 #define S3C64XX_SPI_SLAVE_NSC_CNT_2 (2<<4) 83 84 #define S3C64XX_SPI_INT_TRAILING_EN (1<<6) 85 #define S3C64XX_SPI_INT_RX_OVERRUN_EN (1<<5) 86 #define S3C64XX_SPI_INT_RX_UNDERRUN_EN (1<<4) 87 #define S3C64XX_SPI_INT_TX_OVERRUN_EN (1<<3) 88 #define S3C64XX_SPI_INT_TX_UNDERRUN_EN (1<<2) 89 #define S3C64XX_SPI_INT_RX_FIFORDY_EN (1<<1) 90 #define S3C64XX_SPI_INT_TX_FIFORDY_EN (1<<0) 91 92 #define S3C64XX_SPI_ST_RX_OVERRUN_ERR (1<<5) 93 #define S3C64XX_SPI_ST_RX_UNDERRUN_ERR (1<<4) 94 #define S3C64XX_SPI_ST_TX_OVERRUN_ERR (1<<3) 95 #define S3C64XX_SPI_ST_TX_UNDERRUN_ERR (1<<2) 96 #define S3C64XX_SPI_ST_RX_FIFORDY (1<<1) 97 #define S3C64XX_SPI_ST_TX_FIFORDY (1<<0) 98 99 #define S3C64XX_SPI_PACKET_CNT_EN (1<<16) 100 101 #define S3C64XX_SPI_PND_TX_UNDERRUN_CLR (1<<4) 102 #define S3C64XX_SPI_PND_TX_OVERRUN_CLR (1<<3) 103 #define S3C64XX_SPI_PND_RX_UNDERRUN_CLR (1<<2) 104 #define S3C64XX_SPI_PND_RX_OVERRUN_CLR (1<<1) 105 #define S3C64XX_SPI_PND_TRAILING_CLR (1<<0) 106 107 #define S3C64XX_SPI_SWAP_RX_HALF_WORD (1<<7) 108 #define S3C64XX_SPI_SWAP_RX_BYTE (1<<6) 109 #define S3C64XX_SPI_SWAP_RX_BIT (1<<5) 110 #define S3C64XX_SPI_SWAP_RX_EN (1<<4) 111 #define S3C64XX_SPI_SWAP_TX_HALF_WORD (1<<3) 112 #define S3C64XX_SPI_SWAP_TX_BYTE (1<<2) 113 #define S3C64XX_SPI_SWAP_TX_BIT (1<<1) 114 #define S3C64XX_SPI_SWAP_TX_EN (1<<0) 115 116 #define S3C64XX_SPI_FBCLK_MSK (3<<0) 117 118 #define FIFO_LVL_MASK(i) ((i)->port_conf->fifo_lvl_mask[i->port_id]) 119 #define S3C64XX_SPI_ST_TX_DONE(v, i) (((v) & \ 120 (1 << (i)->port_conf->tx_st_done)) ? 1 : 0) 121 #define TX_FIFO_LVL(v, i) (((v) >> 6) & FIFO_LVL_MASK(i)) 122 #define RX_FIFO_LVL(v, i) (((v) >> (i)->port_conf->rx_lvl_offset) & \ 123 FIFO_LVL_MASK(i)) 124 125 #define S3C64XX_SPI_MAX_TRAILCNT 0x3ff 126 #define S3C64XX_SPI_TRAILCNT_OFF 19 127 128 #define S3C64XX_SPI_TRAILCNT S3C64XX_SPI_MAX_TRAILCNT 129 130 #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t) 131 #define is_polling(x) (x->port_conf->quirks & S3C64XX_SPI_QUIRK_POLL) 132 133 #define RXBUSY (1<<2) 134 #define TXBUSY (1<<3) 135 136 struct s3c64xx_spi_dma_data { 137 struct dma_chan *ch; 138 enum dma_transfer_direction direction; 139 unsigned int dmach; 140 }; 141 142 /** 143 * struct s3c64xx_spi_info - SPI Controller hardware info 144 * @fifo_lvl_mask: Bit-mask for {TX|RX}_FIFO_LVL bits in SPI_STATUS register. 145 * @rx_lvl_offset: Bit offset of RX_FIFO_LVL bits in SPI_STATUS regiter. 146 * @tx_st_done: Bit offset of TX_DONE bit in SPI_STATUS regiter. 147 * @high_speed: True, if the controller supports HIGH_SPEED_EN bit. 148 * @clk_from_cmu: True, if the controller does not include a clock mux and 149 * prescaler unit. 150 * 151 * The Samsung s3c64xx SPI controller are used on various Samsung SoC's but 152 * differ in some aspects such as the size of the fifo and spi bus clock 153 * setup. Such differences are specified to the driver using this structure 154 * which is provided as driver data to the driver. 155 */ 156 struct s3c64xx_spi_port_config { 157 int fifo_lvl_mask[MAX_SPI_PORTS]; 158 int rx_lvl_offset; 159 int tx_st_done; 160 int quirks; 161 bool high_speed; 162 bool clk_from_cmu; 163 }; 164 165 /** 166 * struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver. 167 * @clk: Pointer to the spi clock. 168 * @src_clk: Pointer to the clock used to generate SPI signals. 169 * @master: Pointer to the SPI Protocol master. 170 * @cntrlr_info: Platform specific data for the controller this driver manages. 171 * @tgl_spi: Pointer to the last CS left untoggled by the cs_change hint. 172 * @lock: Controller specific lock. 173 * @state: Set of FLAGS to indicate status. 174 * @rx_dmach: Controller's DMA channel for Rx. 175 * @tx_dmach: Controller's DMA channel for Tx. 176 * @sfr_start: BUS address of SPI controller regs. 177 * @regs: Pointer to ioremap'ed controller registers. 178 * @irq: interrupt 179 * @xfer_completion: To indicate completion of xfer task. 180 * @cur_mode: Stores the active configuration of the controller. 181 * @cur_bpw: Stores the active bits per word settings. 182 * @cur_speed: Stores the active xfer clock speed. 183 */ 184 struct s3c64xx_spi_driver_data { 185 void __iomem *regs; 186 struct clk *clk; 187 struct clk *src_clk; 188 struct platform_device *pdev; 189 struct spi_master *master; 190 struct s3c64xx_spi_info *cntrlr_info; 191 struct spi_device *tgl_spi; 192 spinlock_t lock; 193 unsigned long sfr_start; 194 struct completion xfer_completion; 195 unsigned state; 196 unsigned cur_mode, cur_bpw; 197 unsigned cur_speed; 198 struct s3c64xx_spi_dma_data rx_dma; 199 struct s3c64xx_spi_dma_data tx_dma; 200 struct s3c64xx_spi_port_config *port_conf; 201 unsigned int port_id; 202 }; 203 204 static void flush_fifo(struct s3c64xx_spi_driver_data *sdd) 205 { 206 void __iomem *regs = sdd->regs; 207 unsigned long loops; 208 u32 val; 209 210 writel(0, regs + S3C64XX_SPI_PACKET_CNT); 211 212 val = readl(regs + S3C64XX_SPI_CH_CFG); 213 val &= ~(S3C64XX_SPI_CH_RXCH_ON | S3C64XX_SPI_CH_TXCH_ON); 214 writel(val, regs + S3C64XX_SPI_CH_CFG); 215 216 val = readl(regs + S3C64XX_SPI_CH_CFG); 217 val |= S3C64XX_SPI_CH_SW_RST; 218 val &= ~S3C64XX_SPI_CH_HS_EN; 219 writel(val, regs + S3C64XX_SPI_CH_CFG); 220 221 /* Flush TxFIFO*/ 222 loops = msecs_to_loops(1); 223 do { 224 val = readl(regs + S3C64XX_SPI_STATUS); 225 } while (TX_FIFO_LVL(val, sdd) && loops--); 226 227 if (loops == 0) 228 dev_warn(&sdd->pdev->dev, "Timed out flushing TX FIFO\n"); 229 230 /* Flush RxFIFO*/ 231 loops = msecs_to_loops(1); 232 do { 233 val = readl(regs + S3C64XX_SPI_STATUS); 234 if (RX_FIFO_LVL(val, sdd)) 235 readl(regs + S3C64XX_SPI_RX_DATA); 236 else 237 break; 238 } while (loops--); 239 240 if (loops == 0) 241 dev_warn(&sdd->pdev->dev, "Timed out flushing RX FIFO\n"); 242 243 val = readl(regs + S3C64XX_SPI_CH_CFG); 244 val &= ~S3C64XX_SPI_CH_SW_RST; 245 writel(val, regs + S3C64XX_SPI_CH_CFG); 246 247 val = readl(regs + S3C64XX_SPI_MODE_CFG); 248 val &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON); 249 writel(val, regs + S3C64XX_SPI_MODE_CFG); 250 } 251 252 static void s3c64xx_spi_dmacb(void *data) 253 { 254 struct s3c64xx_spi_driver_data *sdd; 255 struct s3c64xx_spi_dma_data *dma = data; 256 unsigned long flags; 257 258 if (dma->direction == DMA_DEV_TO_MEM) 259 sdd = container_of(data, 260 struct s3c64xx_spi_driver_data, rx_dma); 261 else 262 sdd = container_of(data, 263 struct s3c64xx_spi_driver_data, tx_dma); 264 265 spin_lock_irqsave(&sdd->lock, flags); 266 267 if (dma->direction == DMA_DEV_TO_MEM) { 268 sdd->state &= ~RXBUSY; 269 if (!(sdd->state & TXBUSY)) 270 complete(&sdd->xfer_completion); 271 } else { 272 sdd->state &= ~TXBUSY; 273 if (!(sdd->state & RXBUSY)) 274 complete(&sdd->xfer_completion); 275 } 276 277 spin_unlock_irqrestore(&sdd->lock, flags); 278 } 279 280 static void prepare_dma(struct s3c64xx_spi_dma_data *dma, 281 struct sg_table *sgt) 282 { 283 struct s3c64xx_spi_driver_data *sdd; 284 struct dma_slave_config config; 285 struct dma_async_tx_descriptor *desc; 286 287 memset(&config, 0, sizeof(config)); 288 289 if (dma->direction == DMA_DEV_TO_MEM) { 290 sdd = container_of((void *)dma, 291 struct s3c64xx_spi_driver_data, rx_dma); 292 config.direction = dma->direction; 293 config.src_addr = sdd->sfr_start + S3C64XX_SPI_RX_DATA; 294 config.src_addr_width = sdd->cur_bpw / 8; 295 config.src_maxburst = 1; 296 dmaengine_slave_config(dma->ch, &config); 297 } else { 298 sdd = container_of((void *)dma, 299 struct s3c64xx_spi_driver_data, tx_dma); 300 config.direction = dma->direction; 301 config.dst_addr = sdd->sfr_start + S3C64XX_SPI_TX_DATA; 302 config.dst_addr_width = sdd->cur_bpw / 8; 303 config.dst_maxburst = 1; 304 dmaengine_slave_config(dma->ch, &config); 305 } 306 307 desc = dmaengine_prep_slave_sg(dma->ch, sgt->sgl, sgt->nents, 308 dma->direction, DMA_PREP_INTERRUPT); 309 310 desc->callback = s3c64xx_spi_dmacb; 311 desc->callback_param = dma; 312 313 dmaengine_submit(desc); 314 dma_async_issue_pending(dma->ch); 315 } 316 317 static int s3c64xx_spi_prepare_transfer(struct spi_master *spi) 318 { 319 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi); 320 dma_filter_fn filter = sdd->cntrlr_info->filter; 321 struct device *dev = &sdd->pdev->dev; 322 dma_cap_mask_t mask; 323 int ret; 324 325 if (!is_polling(sdd)) { 326 dma_cap_zero(mask); 327 dma_cap_set(DMA_SLAVE, mask); 328 329 /* Acquire DMA channels */ 330 sdd->rx_dma.ch = dma_request_slave_channel_compat(mask, filter, 331 (void *)sdd->rx_dma.dmach, dev, "rx"); 332 if (!sdd->rx_dma.ch) { 333 dev_err(dev, "Failed to get RX DMA channel\n"); 334 ret = -EBUSY; 335 goto out; 336 } 337 spi->dma_rx = sdd->rx_dma.ch; 338 339 sdd->tx_dma.ch = dma_request_slave_channel_compat(mask, filter, 340 (void *)sdd->tx_dma.dmach, dev, "tx"); 341 if (!sdd->tx_dma.ch) { 342 dev_err(dev, "Failed to get TX DMA channel\n"); 343 ret = -EBUSY; 344 goto out_rx; 345 } 346 spi->dma_tx = sdd->tx_dma.ch; 347 } 348 349 return 0; 350 351 out_rx: 352 dma_release_channel(sdd->rx_dma.ch); 353 out: 354 return ret; 355 } 356 357 static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi) 358 { 359 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi); 360 361 /* Free DMA channels */ 362 if (!is_polling(sdd)) { 363 dma_release_channel(sdd->rx_dma.ch); 364 dma_release_channel(sdd->tx_dma.ch); 365 } 366 367 return 0; 368 } 369 370 static bool s3c64xx_spi_can_dma(struct spi_master *master, 371 struct spi_device *spi, 372 struct spi_transfer *xfer) 373 { 374 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); 375 376 return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1; 377 } 378 379 static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, 380 struct spi_device *spi, 381 struct spi_transfer *xfer, int dma_mode) 382 { 383 void __iomem *regs = sdd->regs; 384 u32 modecfg, chcfg; 385 386 modecfg = readl(regs + S3C64XX_SPI_MODE_CFG); 387 modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON); 388 389 chcfg = readl(regs + S3C64XX_SPI_CH_CFG); 390 chcfg &= ~S3C64XX_SPI_CH_TXCH_ON; 391 392 if (dma_mode) { 393 chcfg &= ~S3C64XX_SPI_CH_RXCH_ON; 394 } else { 395 /* Always shift in data in FIFO, even if xfer is Tx only, 396 * this helps setting PCKT_CNT value for generating clocks 397 * as exactly needed. 398 */ 399 chcfg |= S3C64XX_SPI_CH_RXCH_ON; 400 writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff) 401 | S3C64XX_SPI_PACKET_CNT_EN, 402 regs + S3C64XX_SPI_PACKET_CNT); 403 } 404 405 if (xfer->tx_buf != NULL) { 406 sdd->state |= TXBUSY; 407 chcfg |= S3C64XX_SPI_CH_TXCH_ON; 408 if (dma_mode) { 409 modecfg |= S3C64XX_SPI_MODE_TXDMA_ON; 410 prepare_dma(&sdd->tx_dma, &xfer->tx_sg); 411 } else { 412 switch (sdd->cur_bpw) { 413 case 32: 414 iowrite32_rep(regs + S3C64XX_SPI_TX_DATA, 415 xfer->tx_buf, xfer->len / 4); 416 break; 417 case 16: 418 iowrite16_rep(regs + S3C64XX_SPI_TX_DATA, 419 xfer->tx_buf, xfer->len / 2); 420 break; 421 default: 422 iowrite8_rep(regs + S3C64XX_SPI_TX_DATA, 423 xfer->tx_buf, xfer->len); 424 break; 425 } 426 } 427 } 428 429 if (xfer->rx_buf != NULL) { 430 sdd->state |= RXBUSY; 431 432 if (sdd->port_conf->high_speed && sdd->cur_speed >= 30000000UL 433 && !(sdd->cur_mode & SPI_CPHA)) 434 chcfg |= S3C64XX_SPI_CH_HS_EN; 435 436 if (dma_mode) { 437 modecfg |= S3C64XX_SPI_MODE_RXDMA_ON; 438 chcfg |= S3C64XX_SPI_CH_RXCH_ON; 439 writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff) 440 | S3C64XX_SPI_PACKET_CNT_EN, 441 regs + S3C64XX_SPI_PACKET_CNT); 442 prepare_dma(&sdd->rx_dma, &xfer->rx_sg); 443 } 444 } 445 446 writel(modecfg, regs + S3C64XX_SPI_MODE_CFG); 447 writel(chcfg, regs + S3C64XX_SPI_CH_CFG); 448 } 449 450 static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd, 451 int timeout_ms) 452 { 453 void __iomem *regs = sdd->regs; 454 unsigned long val = 1; 455 u32 status; 456 457 /* max fifo depth available */ 458 u32 max_fifo = (FIFO_LVL_MASK(sdd) >> 1) + 1; 459 460 if (timeout_ms) 461 val = msecs_to_loops(timeout_ms); 462 463 do { 464 status = readl(regs + S3C64XX_SPI_STATUS); 465 } while (RX_FIFO_LVL(status, sdd) < max_fifo && --val); 466 467 /* return the actual received data length */ 468 return RX_FIFO_LVL(status, sdd); 469 } 470 471 static int wait_for_dma(struct s3c64xx_spi_driver_data *sdd, 472 struct spi_transfer *xfer) 473 { 474 void __iomem *regs = sdd->regs; 475 unsigned long val; 476 u32 status; 477 int ms; 478 479 /* millisecs to xfer 'len' bytes @ 'cur_speed' */ 480 ms = xfer->len * 8 * 1000 / sdd->cur_speed; 481 ms += 10; /* some tolerance */ 482 483 val = msecs_to_jiffies(ms) + 10; 484 val = wait_for_completion_timeout(&sdd->xfer_completion, val); 485 486 /* 487 * If the previous xfer was completed within timeout, then 488 * proceed further else return -EIO. 489 * DmaTx returns after simply writing data in the FIFO, 490 * w/o waiting for real transmission on the bus to finish. 491 * DmaRx returns only after Dma read data from FIFO which 492 * needs bus transmission to finish, so we don't worry if 493 * Xfer involved Rx(with or without Tx). 494 */ 495 if (val && !xfer->rx_buf) { 496 val = msecs_to_loops(10); 497 status = readl(regs + S3C64XX_SPI_STATUS); 498 while ((TX_FIFO_LVL(status, sdd) 499 || !S3C64XX_SPI_ST_TX_DONE(status, sdd)) 500 && --val) { 501 cpu_relax(); 502 status = readl(regs + S3C64XX_SPI_STATUS); 503 } 504 505 } 506 507 /* If timed out while checking rx/tx status return error */ 508 if (!val) 509 return -EIO; 510 511 return 0; 512 } 513 514 static int wait_for_pio(struct s3c64xx_spi_driver_data *sdd, 515 struct spi_transfer *xfer) 516 { 517 void __iomem *regs = sdd->regs; 518 unsigned long val; 519 u32 status; 520 int loops; 521 u32 cpy_len; 522 u8 *buf; 523 int ms; 524 525 /* millisecs to xfer 'len' bytes @ 'cur_speed' */ 526 ms = xfer->len * 8 * 1000 / sdd->cur_speed; 527 ms += 10; /* some tolerance */ 528 529 val = msecs_to_loops(ms); 530 do { 531 status = readl(regs + S3C64XX_SPI_STATUS); 532 } while (RX_FIFO_LVL(status, sdd) < xfer->len && --val); 533 534 535 /* If it was only Tx */ 536 if (!xfer->rx_buf) { 537 sdd->state &= ~TXBUSY; 538 return 0; 539 } 540 541 /* 542 * If the receive length is bigger than the controller fifo 543 * size, calculate the loops and read the fifo as many times. 544 * loops = length / max fifo size (calculated by using the 545 * fifo mask). 546 * For any size less than the fifo size the below code is 547 * executed atleast once. 548 */ 549 loops = xfer->len / ((FIFO_LVL_MASK(sdd) >> 1) + 1); 550 buf = xfer->rx_buf; 551 do { 552 /* wait for data to be received in the fifo */ 553 cpy_len = s3c64xx_spi_wait_for_timeout(sdd, 554 (loops ? ms : 0)); 555 556 switch (sdd->cur_bpw) { 557 case 32: 558 ioread32_rep(regs + S3C64XX_SPI_RX_DATA, 559 buf, cpy_len / 4); 560 break; 561 case 16: 562 ioread16_rep(regs + S3C64XX_SPI_RX_DATA, 563 buf, cpy_len / 2); 564 break; 565 default: 566 ioread8_rep(regs + S3C64XX_SPI_RX_DATA, 567 buf, cpy_len); 568 break; 569 } 570 571 buf = buf + cpy_len; 572 } while (loops--); 573 sdd->state &= ~RXBUSY; 574 575 return 0; 576 } 577 578 static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) 579 { 580 void __iomem *regs = sdd->regs; 581 u32 val; 582 583 /* Disable Clock */ 584 if (sdd->port_conf->clk_from_cmu) { 585 clk_disable_unprepare(sdd->src_clk); 586 } else { 587 val = readl(regs + S3C64XX_SPI_CLK_CFG); 588 val &= ~S3C64XX_SPI_ENCLK_ENABLE; 589 writel(val, regs + S3C64XX_SPI_CLK_CFG); 590 } 591 592 /* Set Polarity and Phase */ 593 val = readl(regs + S3C64XX_SPI_CH_CFG); 594 val &= ~(S3C64XX_SPI_CH_SLAVE | 595 S3C64XX_SPI_CPOL_L | 596 S3C64XX_SPI_CPHA_B); 597 598 if (sdd->cur_mode & SPI_CPOL) 599 val |= S3C64XX_SPI_CPOL_L; 600 601 if (sdd->cur_mode & SPI_CPHA) 602 val |= S3C64XX_SPI_CPHA_B; 603 604 writel(val, regs + S3C64XX_SPI_CH_CFG); 605 606 /* Set Channel & DMA Mode */ 607 val = readl(regs + S3C64XX_SPI_MODE_CFG); 608 val &= ~(S3C64XX_SPI_MODE_BUS_TSZ_MASK 609 | S3C64XX_SPI_MODE_CH_TSZ_MASK); 610 611 switch (sdd->cur_bpw) { 612 case 32: 613 val |= S3C64XX_SPI_MODE_BUS_TSZ_WORD; 614 val |= S3C64XX_SPI_MODE_CH_TSZ_WORD; 615 break; 616 case 16: 617 val |= S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD; 618 val |= S3C64XX_SPI_MODE_CH_TSZ_HALFWORD; 619 break; 620 default: 621 val |= S3C64XX_SPI_MODE_BUS_TSZ_BYTE; 622 val |= S3C64XX_SPI_MODE_CH_TSZ_BYTE; 623 break; 624 } 625 626 writel(val, regs + S3C64XX_SPI_MODE_CFG); 627 628 if (sdd->port_conf->clk_from_cmu) { 629 /* Configure Clock */ 630 /* There is half-multiplier before the SPI */ 631 clk_set_rate(sdd->src_clk, sdd->cur_speed * 2); 632 /* Enable Clock */ 633 clk_prepare_enable(sdd->src_clk); 634 } else { 635 /* Configure Clock */ 636 val = readl(regs + S3C64XX_SPI_CLK_CFG); 637 val &= ~S3C64XX_SPI_PSR_MASK; 638 val |= ((clk_get_rate(sdd->src_clk) / sdd->cur_speed / 2 - 1) 639 & S3C64XX_SPI_PSR_MASK); 640 writel(val, regs + S3C64XX_SPI_CLK_CFG); 641 642 /* Enable Clock */ 643 val = readl(regs + S3C64XX_SPI_CLK_CFG); 644 val |= S3C64XX_SPI_ENCLK_ENABLE; 645 writel(val, regs + S3C64XX_SPI_CLK_CFG); 646 } 647 } 648 649 #define XFER_DMAADDR_INVALID DMA_BIT_MASK(32) 650 651 static int s3c64xx_spi_prepare_message(struct spi_master *master, 652 struct spi_message *msg) 653 { 654 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); 655 struct spi_device *spi = msg->spi; 656 struct s3c64xx_spi_csinfo *cs = spi->controller_data; 657 658 /* If Master's(controller) state differs from that needed by Slave */ 659 if (sdd->cur_speed != spi->max_speed_hz 660 || sdd->cur_mode != spi->mode 661 || sdd->cur_bpw != spi->bits_per_word) { 662 sdd->cur_bpw = spi->bits_per_word; 663 sdd->cur_speed = spi->max_speed_hz; 664 sdd->cur_mode = spi->mode; 665 s3c64xx_spi_config(sdd); 666 } 667 668 /* Configure feedback delay */ 669 writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK); 670 671 return 0; 672 } 673 674 static int s3c64xx_spi_transfer_one(struct spi_master *master, 675 struct spi_device *spi, 676 struct spi_transfer *xfer) 677 { 678 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); 679 int status; 680 u32 speed; 681 u8 bpw; 682 unsigned long flags; 683 int use_dma; 684 685 reinit_completion(&sdd->xfer_completion); 686 687 /* Only BPW and Speed may change across transfers */ 688 bpw = xfer->bits_per_word; 689 speed = xfer->speed_hz ? : spi->max_speed_hz; 690 691 if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) { 692 sdd->cur_bpw = bpw; 693 sdd->cur_speed = speed; 694 s3c64xx_spi_config(sdd); 695 } 696 697 /* Polling method for xfers not bigger than FIFO capacity */ 698 use_dma = 0; 699 if (!is_polling(sdd) && 700 (sdd->rx_dma.ch && sdd->tx_dma.ch && 701 (xfer->len > ((FIFO_LVL_MASK(sdd) >> 1) + 1)))) 702 use_dma = 1; 703 704 spin_lock_irqsave(&sdd->lock, flags); 705 706 /* Pending only which is to be done */ 707 sdd->state &= ~RXBUSY; 708 sdd->state &= ~TXBUSY; 709 710 enable_datapath(sdd, spi, xfer, use_dma); 711 712 /* Start the signals */ 713 if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO)) 714 writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL); 715 else 716 writel(readl(sdd->regs + S3C64XX_SPI_SLAVE_SEL) 717 | S3C64XX_SPI_SLAVE_AUTO | S3C64XX_SPI_SLAVE_NSC_CNT_2, 718 sdd->regs + S3C64XX_SPI_SLAVE_SEL); 719 720 spin_unlock_irqrestore(&sdd->lock, flags); 721 722 if (use_dma) 723 status = wait_for_dma(sdd, xfer); 724 else 725 status = wait_for_pio(sdd, xfer); 726 727 if (status) { 728 dev_err(&spi->dev, "I/O Error: rx-%d tx-%d res:rx-%c tx-%c len-%d\n", 729 xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0, 730 (sdd->state & RXBUSY) ? 'f' : 'p', 731 (sdd->state & TXBUSY) ? 'f' : 'p', 732 xfer->len); 733 734 if (use_dma) { 735 if (xfer->tx_buf != NULL 736 && (sdd->state & TXBUSY)) 737 dmaengine_terminate_all(sdd->tx_dma.ch); 738 if (xfer->rx_buf != NULL 739 && (sdd->state & RXBUSY)) 740 dmaengine_terminate_all(sdd->rx_dma.ch); 741 } 742 } else { 743 flush_fifo(sdd); 744 } 745 746 return status; 747 } 748 749 static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata( 750 struct spi_device *spi) 751 { 752 struct s3c64xx_spi_csinfo *cs; 753 struct device_node *slave_np, *data_np = NULL; 754 u32 fb_delay = 0; 755 756 slave_np = spi->dev.of_node; 757 if (!slave_np) { 758 dev_err(&spi->dev, "device node not found\n"); 759 return ERR_PTR(-EINVAL); 760 } 761 762 data_np = of_get_child_by_name(slave_np, "controller-data"); 763 if (!data_np) { 764 dev_err(&spi->dev, "child node 'controller-data' not found\n"); 765 return ERR_PTR(-EINVAL); 766 } 767 768 cs = kzalloc(sizeof(*cs), GFP_KERNEL); 769 if (!cs) { 770 of_node_put(data_np); 771 return ERR_PTR(-ENOMEM); 772 } 773 774 of_property_read_u32(data_np, "samsung,spi-feedback-delay", &fb_delay); 775 cs->fb_delay = fb_delay; 776 of_node_put(data_np); 777 return cs; 778 } 779 780 /* 781 * Here we only check the validity of requested configuration 782 * and save the configuration in a local data-structure. 783 * The controller is actually configured only just before we 784 * get a message to transfer. 785 */ 786 static int s3c64xx_spi_setup(struct spi_device *spi) 787 { 788 struct s3c64xx_spi_csinfo *cs = spi->controller_data; 789 struct s3c64xx_spi_driver_data *sdd; 790 struct s3c64xx_spi_info *sci; 791 int err; 792 793 sdd = spi_master_get_devdata(spi->master); 794 if (spi->dev.of_node) { 795 cs = s3c64xx_get_slave_ctrldata(spi); 796 spi->controller_data = cs; 797 } else if (cs) { 798 /* On non-DT platforms the SPI core will set spi->cs_gpio 799 * to -ENOENT. The GPIO pin used to drive the chip select 800 * is defined by using platform data so spi->cs_gpio value 801 * has to be override to have the proper GPIO pin number. 802 */ 803 spi->cs_gpio = cs->line; 804 } 805 806 if (IS_ERR_OR_NULL(cs)) { 807 dev_err(&spi->dev, "No CS for SPI(%d)\n", spi->chip_select); 808 return -ENODEV; 809 } 810 811 if (!spi_get_ctldata(spi)) { 812 if (gpio_is_valid(spi->cs_gpio)) { 813 err = gpio_request_one(spi->cs_gpio, GPIOF_OUT_INIT_HIGH, 814 dev_name(&spi->dev)); 815 if (err) { 816 dev_err(&spi->dev, 817 "Failed to get /CS gpio [%d]: %d\n", 818 spi->cs_gpio, err); 819 goto err_gpio_req; 820 } 821 } 822 823 spi_set_ctldata(spi, cs); 824 } 825 826 sci = sdd->cntrlr_info; 827 828 pm_runtime_get_sync(&sdd->pdev->dev); 829 830 /* Check if we can provide the requested rate */ 831 if (!sdd->port_conf->clk_from_cmu) { 832 u32 psr, speed; 833 834 /* Max possible */ 835 speed = clk_get_rate(sdd->src_clk) / 2 / (0 + 1); 836 837 if (spi->max_speed_hz > speed) 838 spi->max_speed_hz = speed; 839 840 psr = clk_get_rate(sdd->src_clk) / 2 / spi->max_speed_hz - 1; 841 psr &= S3C64XX_SPI_PSR_MASK; 842 if (psr == S3C64XX_SPI_PSR_MASK) 843 psr--; 844 845 speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1); 846 if (spi->max_speed_hz < speed) { 847 if (psr+1 < S3C64XX_SPI_PSR_MASK) { 848 psr++; 849 } else { 850 err = -EINVAL; 851 goto setup_exit; 852 } 853 } 854 855 speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1); 856 if (spi->max_speed_hz >= speed) { 857 spi->max_speed_hz = speed; 858 } else { 859 dev_err(&spi->dev, "Can't set %dHz transfer speed\n", 860 spi->max_speed_hz); 861 err = -EINVAL; 862 goto setup_exit; 863 } 864 } 865 866 pm_runtime_put(&sdd->pdev->dev); 867 if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO)) 868 writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL); 869 return 0; 870 871 setup_exit: 872 pm_runtime_put(&sdd->pdev->dev); 873 /* setup() returns with device de-selected */ 874 if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO)) 875 writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL); 876 877 if (gpio_is_valid(spi->cs_gpio)) 878 gpio_free(spi->cs_gpio); 879 spi_set_ctldata(spi, NULL); 880 881 err_gpio_req: 882 if (spi->dev.of_node) 883 kfree(cs); 884 885 return err; 886 } 887 888 static void s3c64xx_spi_cleanup(struct spi_device *spi) 889 { 890 struct s3c64xx_spi_csinfo *cs = spi_get_ctldata(spi); 891 892 if (gpio_is_valid(spi->cs_gpio)) { 893 gpio_free(spi->cs_gpio); 894 if (spi->dev.of_node) 895 kfree(cs); 896 else { 897 /* On non-DT platforms, the SPI core sets 898 * spi->cs_gpio to -ENOENT and .setup() 899 * overrides it with the GPIO pin value 900 * passed using platform data. 901 */ 902 spi->cs_gpio = -ENOENT; 903 } 904 } 905 906 spi_set_ctldata(spi, NULL); 907 } 908 909 static irqreturn_t s3c64xx_spi_irq(int irq, void *data) 910 { 911 struct s3c64xx_spi_driver_data *sdd = data; 912 struct spi_master *spi = sdd->master; 913 unsigned int val, clr = 0; 914 915 val = readl(sdd->regs + S3C64XX_SPI_STATUS); 916 917 if (val & S3C64XX_SPI_ST_RX_OVERRUN_ERR) { 918 clr = S3C64XX_SPI_PND_RX_OVERRUN_CLR; 919 dev_err(&spi->dev, "RX overrun\n"); 920 } 921 if (val & S3C64XX_SPI_ST_RX_UNDERRUN_ERR) { 922 clr |= S3C64XX_SPI_PND_RX_UNDERRUN_CLR; 923 dev_err(&spi->dev, "RX underrun\n"); 924 } 925 if (val & S3C64XX_SPI_ST_TX_OVERRUN_ERR) { 926 clr |= S3C64XX_SPI_PND_TX_OVERRUN_CLR; 927 dev_err(&spi->dev, "TX overrun\n"); 928 } 929 if (val & S3C64XX_SPI_ST_TX_UNDERRUN_ERR) { 930 clr |= S3C64XX_SPI_PND_TX_UNDERRUN_CLR; 931 dev_err(&spi->dev, "TX underrun\n"); 932 } 933 934 /* Clear the pending irq by setting and then clearing it */ 935 writel(clr, sdd->regs + S3C64XX_SPI_PENDING_CLR); 936 writel(0, sdd->regs + S3C64XX_SPI_PENDING_CLR); 937 938 return IRQ_HANDLED; 939 } 940 941 static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel) 942 { 943 struct s3c64xx_spi_info *sci = sdd->cntrlr_info; 944 void __iomem *regs = sdd->regs; 945 unsigned int val; 946 947 sdd->cur_speed = 0; 948 949 if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO)) 950 writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL); 951 952 /* Disable Interrupts - we use Polling if not DMA mode */ 953 writel(0, regs + S3C64XX_SPI_INT_EN); 954 955 if (!sdd->port_conf->clk_from_cmu) 956 writel(sci->src_clk_nr << S3C64XX_SPI_CLKSEL_SRCSHFT, 957 regs + S3C64XX_SPI_CLK_CFG); 958 writel(0, regs + S3C64XX_SPI_MODE_CFG); 959 writel(0, regs + S3C64XX_SPI_PACKET_CNT); 960 961 /* Clear any irq pending bits, should set and clear the bits */ 962 val = S3C64XX_SPI_PND_RX_OVERRUN_CLR | 963 S3C64XX_SPI_PND_RX_UNDERRUN_CLR | 964 S3C64XX_SPI_PND_TX_OVERRUN_CLR | 965 S3C64XX_SPI_PND_TX_UNDERRUN_CLR; 966 writel(val, regs + S3C64XX_SPI_PENDING_CLR); 967 writel(0, regs + S3C64XX_SPI_PENDING_CLR); 968 969 writel(0, regs + S3C64XX_SPI_SWAP_CFG); 970 971 val = readl(regs + S3C64XX_SPI_MODE_CFG); 972 val &= ~S3C64XX_SPI_MODE_4BURST; 973 val &= ~(S3C64XX_SPI_MAX_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF); 974 val |= (S3C64XX_SPI_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF); 975 writel(val, regs + S3C64XX_SPI_MODE_CFG); 976 977 flush_fifo(sdd); 978 } 979 980 #ifdef CONFIG_OF 981 static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev) 982 { 983 struct s3c64xx_spi_info *sci; 984 u32 temp; 985 986 sci = devm_kzalloc(dev, sizeof(*sci), GFP_KERNEL); 987 if (!sci) 988 return ERR_PTR(-ENOMEM); 989 990 if (of_property_read_u32(dev->of_node, "samsung,spi-src-clk", &temp)) { 991 dev_warn(dev, "spi bus clock parent not specified, using clock at index 0 as parent\n"); 992 sci->src_clk_nr = 0; 993 } else { 994 sci->src_clk_nr = temp; 995 } 996 997 if (of_property_read_u32(dev->of_node, "num-cs", &temp)) { 998 dev_warn(dev, "number of chip select lines not specified, assuming 1 chip select line\n"); 999 sci->num_cs = 1; 1000 } else { 1001 sci->num_cs = temp; 1002 } 1003 1004 return sci; 1005 } 1006 #else 1007 static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev) 1008 { 1009 return dev_get_platdata(dev); 1010 } 1011 #endif 1012 1013 static const struct of_device_id s3c64xx_spi_dt_match[]; 1014 1015 static inline struct s3c64xx_spi_port_config *s3c64xx_spi_get_port_config( 1016 struct platform_device *pdev) 1017 { 1018 #ifdef CONFIG_OF 1019 if (pdev->dev.of_node) { 1020 const struct of_device_id *match; 1021 match = of_match_node(s3c64xx_spi_dt_match, pdev->dev.of_node); 1022 return (struct s3c64xx_spi_port_config *)match->data; 1023 } 1024 #endif 1025 return (struct s3c64xx_spi_port_config *) 1026 platform_get_device_id(pdev)->driver_data; 1027 } 1028 1029 static int s3c64xx_spi_probe(struct platform_device *pdev) 1030 { 1031 struct resource *mem_res; 1032 struct resource *res; 1033 struct s3c64xx_spi_driver_data *sdd; 1034 struct s3c64xx_spi_info *sci = dev_get_platdata(&pdev->dev); 1035 struct spi_master *master; 1036 int ret, irq; 1037 char clk_name[16]; 1038 1039 if (!sci && pdev->dev.of_node) { 1040 sci = s3c64xx_spi_parse_dt(&pdev->dev); 1041 if (IS_ERR(sci)) 1042 return PTR_ERR(sci); 1043 } 1044 1045 if (!sci) { 1046 dev_err(&pdev->dev, "platform_data missing!\n"); 1047 return -ENODEV; 1048 } 1049 1050 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1051 if (mem_res == NULL) { 1052 dev_err(&pdev->dev, "Unable to get SPI MEM resource\n"); 1053 return -ENXIO; 1054 } 1055 1056 irq = platform_get_irq(pdev, 0); 1057 if (irq < 0) { 1058 dev_warn(&pdev->dev, "Failed to get IRQ: %d\n", irq); 1059 return irq; 1060 } 1061 1062 master = spi_alloc_master(&pdev->dev, 1063 sizeof(struct s3c64xx_spi_driver_data)); 1064 if (master == NULL) { 1065 dev_err(&pdev->dev, "Unable to allocate SPI Master\n"); 1066 return -ENOMEM; 1067 } 1068 1069 platform_set_drvdata(pdev, master); 1070 1071 sdd = spi_master_get_devdata(master); 1072 sdd->port_conf = s3c64xx_spi_get_port_config(pdev); 1073 sdd->master = master; 1074 sdd->cntrlr_info = sci; 1075 sdd->pdev = pdev; 1076 sdd->sfr_start = mem_res->start; 1077 if (pdev->dev.of_node) { 1078 ret = of_alias_get_id(pdev->dev.of_node, "spi"); 1079 if (ret < 0) { 1080 dev_err(&pdev->dev, "failed to get alias id, errno %d\n", 1081 ret); 1082 goto err0; 1083 } 1084 sdd->port_id = ret; 1085 } else { 1086 sdd->port_id = pdev->id; 1087 } 1088 1089 sdd->cur_bpw = 8; 1090 1091 if (!sdd->pdev->dev.of_node) { 1092 res = platform_get_resource(pdev, IORESOURCE_DMA, 0); 1093 if (!res) { 1094 dev_warn(&pdev->dev, "Unable to get SPI tx dma resource. Switching to poll mode\n"); 1095 sdd->port_conf->quirks = S3C64XX_SPI_QUIRK_POLL; 1096 } else 1097 sdd->tx_dma.dmach = res->start; 1098 1099 res = platform_get_resource(pdev, IORESOURCE_DMA, 1); 1100 if (!res) { 1101 dev_warn(&pdev->dev, "Unable to get SPI rx dma resource. Switching to poll mode\n"); 1102 sdd->port_conf->quirks = S3C64XX_SPI_QUIRK_POLL; 1103 } else 1104 sdd->rx_dma.dmach = res->start; 1105 } 1106 1107 sdd->tx_dma.direction = DMA_MEM_TO_DEV; 1108 sdd->rx_dma.direction = DMA_DEV_TO_MEM; 1109 1110 master->dev.of_node = pdev->dev.of_node; 1111 master->bus_num = sdd->port_id; 1112 master->setup = s3c64xx_spi_setup; 1113 master->cleanup = s3c64xx_spi_cleanup; 1114 master->prepare_transfer_hardware = s3c64xx_spi_prepare_transfer; 1115 master->prepare_message = s3c64xx_spi_prepare_message; 1116 master->transfer_one = s3c64xx_spi_transfer_one; 1117 master->unprepare_transfer_hardware = s3c64xx_spi_unprepare_transfer; 1118 master->num_chipselect = sci->num_cs; 1119 master->dma_alignment = 8; 1120 master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) | 1121 SPI_BPW_MASK(8); 1122 /* the spi->mode bits understood by this driver: */ 1123 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; 1124 master->auto_runtime_pm = true; 1125 if (!is_polling(sdd)) 1126 master->can_dma = s3c64xx_spi_can_dma; 1127 1128 sdd->regs = devm_ioremap_resource(&pdev->dev, mem_res); 1129 if (IS_ERR(sdd->regs)) { 1130 ret = PTR_ERR(sdd->regs); 1131 goto err0; 1132 } 1133 1134 if (sci->cfg_gpio && sci->cfg_gpio()) { 1135 dev_err(&pdev->dev, "Unable to config gpio\n"); 1136 ret = -EBUSY; 1137 goto err0; 1138 } 1139 1140 /* Setup clocks */ 1141 sdd->clk = devm_clk_get(&pdev->dev, "spi"); 1142 if (IS_ERR(sdd->clk)) { 1143 dev_err(&pdev->dev, "Unable to acquire clock 'spi'\n"); 1144 ret = PTR_ERR(sdd->clk); 1145 goto err0; 1146 } 1147 1148 if (clk_prepare_enable(sdd->clk)) { 1149 dev_err(&pdev->dev, "Couldn't enable clock 'spi'\n"); 1150 ret = -EBUSY; 1151 goto err0; 1152 } 1153 1154 sprintf(clk_name, "spi_busclk%d", sci->src_clk_nr); 1155 sdd->src_clk = devm_clk_get(&pdev->dev, clk_name); 1156 if (IS_ERR(sdd->src_clk)) { 1157 dev_err(&pdev->dev, 1158 "Unable to acquire clock '%s'\n", clk_name); 1159 ret = PTR_ERR(sdd->src_clk); 1160 goto err2; 1161 } 1162 1163 if (clk_prepare_enable(sdd->src_clk)) { 1164 dev_err(&pdev->dev, "Couldn't enable clock '%s'\n", clk_name); 1165 ret = -EBUSY; 1166 goto err2; 1167 } 1168 1169 /* Setup Deufult Mode */ 1170 s3c64xx_spi_hwinit(sdd, sdd->port_id); 1171 1172 spin_lock_init(&sdd->lock); 1173 init_completion(&sdd->xfer_completion); 1174 1175 ret = devm_request_irq(&pdev->dev, irq, s3c64xx_spi_irq, 0, 1176 "spi-s3c64xx", sdd); 1177 if (ret != 0) { 1178 dev_err(&pdev->dev, "Failed to request IRQ %d: %d\n", 1179 irq, ret); 1180 goto err3; 1181 } 1182 1183 writel(S3C64XX_SPI_INT_RX_OVERRUN_EN | S3C64XX_SPI_INT_RX_UNDERRUN_EN | 1184 S3C64XX_SPI_INT_TX_OVERRUN_EN | S3C64XX_SPI_INT_TX_UNDERRUN_EN, 1185 sdd->regs + S3C64XX_SPI_INT_EN); 1186 1187 pm_runtime_set_active(&pdev->dev); 1188 pm_runtime_enable(&pdev->dev); 1189 1190 ret = devm_spi_register_master(&pdev->dev, master); 1191 if (ret != 0) { 1192 dev_err(&pdev->dev, "cannot register SPI master: %d\n", ret); 1193 goto err3; 1194 } 1195 1196 dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d with %d Slaves attached\n", 1197 sdd->port_id, master->num_chipselect); 1198 dev_dbg(&pdev->dev, "\tIOmem=[%pR]\tDMA=[Rx-%d, Tx-%d]\n", 1199 mem_res, 1200 sdd->rx_dma.dmach, sdd->tx_dma.dmach); 1201 1202 return 0; 1203 1204 err3: 1205 clk_disable_unprepare(sdd->src_clk); 1206 err2: 1207 clk_disable_unprepare(sdd->clk); 1208 err0: 1209 spi_master_put(master); 1210 1211 return ret; 1212 } 1213 1214 static int s3c64xx_spi_remove(struct platform_device *pdev) 1215 { 1216 struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); 1217 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); 1218 1219 pm_runtime_disable(&pdev->dev); 1220 1221 writel(0, sdd->regs + S3C64XX_SPI_INT_EN); 1222 1223 clk_disable_unprepare(sdd->src_clk); 1224 1225 clk_disable_unprepare(sdd->clk); 1226 1227 return 0; 1228 } 1229 1230 #ifdef CONFIG_PM_SLEEP 1231 static int s3c64xx_spi_suspend(struct device *dev) 1232 { 1233 struct spi_master *master = dev_get_drvdata(dev); 1234 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); 1235 1236 int ret = spi_master_suspend(master); 1237 if (ret) 1238 return ret; 1239 1240 if (!pm_runtime_suspended(dev)) { 1241 clk_disable_unprepare(sdd->clk); 1242 clk_disable_unprepare(sdd->src_clk); 1243 } 1244 1245 sdd->cur_speed = 0; /* Output Clock is stopped */ 1246 1247 return 0; 1248 } 1249 1250 static int s3c64xx_spi_resume(struct device *dev) 1251 { 1252 struct spi_master *master = dev_get_drvdata(dev); 1253 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); 1254 struct s3c64xx_spi_info *sci = sdd->cntrlr_info; 1255 1256 if (sci->cfg_gpio) 1257 sci->cfg_gpio(); 1258 1259 if (!pm_runtime_suspended(dev)) { 1260 clk_prepare_enable(sdd->src_clk); 1261 clk_prepare_enable(sdd->clk); 1262 } 1263 1264 s3c64xx_spi_hwinit(sdd, sdd->port_id); 1265 1266 return spi_master_resume(master); 1267 } 1268 #endif /* CONFIG_PM_SLEEP */ 1269 1270 #ifdef CONFIG_PM 1271 static int s3c64xx_spi_runtime_suspend(struct device *dev) 1272 { 1273 struct spi_master *master = dev_get_drvdata(dev); 1274 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); 1275 1276 clk_disable_unprepare(sdd->clk); 1277 clk_disable_unprepare(sdd->src_clk); 1278 1279 return 0; 1280 } 1281 1282 static int s3c64xx_spi_runtime_resume(struct device *dev) 1283 { 1284 struct spi_master *master = dev_get_drvdata(dev); 1285 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); 1286 int ret; 1287 1288 ret = clk_prepare_enable(sdd->src_clk); 1289 if (ret != 0) 1290 return ret; 1291 1292 ret = clk_prepare_enable(sdd->clk); 1293 if (ret != 0) { 1294 clk_disable_unprepare(sdd->src_clk); 1295 return ret; 1296 } 1297 1298 return 0; 1299 } 1300 #endif /* CONFIG_PM */ 1301 1302 static const struct dev_pm_ops s3c64xx_spi_pm = { 1303 SET_SYSTEM_SLEEP_PM_OPS(s3c64xx_spi_suspend, s3c64xx_spi_resume) 1304 SET_RUNTIME_PM_OPS(s3c64xx_spi_runtime_suspend, 1305 s3c64xx_spi_runtime_resume, NULL) 1306 }; 1307 1308 static struct s3c64xx_spi_port_config s3c2443_spi_port_config = { 1309 .fifo_lvl_mask = { 0x7f }, 1310 .rx_lvl_offset = 13, 1311 .tx_st_done = 21, 1312 .high_speed = true, 1313 }; 1314 1315 static struct s3c64xx_spi_port_config s3c6410_spi_port_config = { 1316 .fifo_lvl_mask = { 0x7f, 0x7F }, 1317 .rx_lvl_offset = 13, 1318 .tx_st_done = 21, 1319 }; 1320 1321 static struct s3c64xx_spi_port_config s5pv210_spi_port_config = { 1322 .fifo_lvl_mask = { 0x1ff, 0x7F }, 1323 .rx_lvl_offset = 15, 1324 .tx_st_done = 25, 1325 .high_speed = true, 1326 }; 1327 1328 static struct s3c64xx_spi_port_config exynos4_spi_port_config = { 1329 .fifo_lvl_mask = { 0x1ff, 0x7F, 0x7F }, 1330 .rx_lvl_offset = 15, 1331 .tx_st_done = 25, 1332 .high_speed = true, 1333 .clk_from_cmu = true, 1334 }; 1335 1336 static struct s3c64xx_spi_port_config exynos5440_spi_port_config = { 1337 .fifo_lvl_mask = { 0x1ff }, 1338 .rx_lvl_offset = 15, 1339 .tx_st_done = 25, 1340 .high_speed = true, 1341 .clk_from_cmu = true, 1342 .quirks = S3C64XX_SPI_QUIRK_POLL, 1343 }; 1344 1345 static struct s3c64xx_spi_port_config exynos7_spi_port_config = { 1346 .fifo_lvl_mask = { 0x1ff, 0x7F, 0x7F, 0x7F, 0x7F, 0x1ff}, 1347 .rx_lvl_offset = 15, 1348 .tx_st_done = 25, 1349 .high_speed = true, 1350 .clk_from_cmu = true, 1351 .quirks = S3C64XX_SPI_QUIRK_CS_AUTO, 1352 }; 1353 1354 static struct platform_device_id s3c64xx_spi_driver_ids[] = { 1355 { 1356 .name = "s3c2443-spi", 1357 .driver_data = (kernel_ulong_t)&s3c2443_spi_port_config, 1358 }, { 1359 .name = "s3c6410-spi", 1360 .driver_data = (kernel_ulong_t)&s3c6410_spi_port_config, 1361 }, { 1362 .name = "s5pv210-spi", 1363 .driver_data = (kernel_ulong_t)&s5pv210_spi_port_config, 1364 }, { 1365 .name = "exynos4210-spi", 1366 .driver_data = (kernel_ulong_t)&exynos4_spi_port_config, 1367 }, 1368 { }, 1369 }; 1370 1371 static const struct of_device_id s3c64xx_spi_dt_match[] = { 1372 { .compatible = "samsung,s3c2443-spi", 1373 .data = (void *)&s3c2443_spi_port_config, 1374 }, 1375 { .compatible = "samsung,s3c6410-spi", 1376 .data = (void *)&s3c6410_spi_port_config, 1377 }, 1378 { .compatible = "samsung,s5pv210-spi", 1379 .data = (void *)&s5pv210_spi_port_config, 1380 }, 1381 { .compatible = "samsung,exynos4210-spi", 1382 .data = (void *)&exynos4_spi_port_config, 1383 }, 1384 { .compatible = "samsung,exynos5440-spi", 1385 .data = (void *)&exynos5440_spi_port_config, 1386 }, 1387 { .compatible = "samsung,exynos7-spi", 1388 .data = (void *)&exynos7_spi_port_config, 1389 }, 1390 { }, 1391 }; 1392 MODULE_DEVICE_TABLE(of, s3c64xx_spi_dt_match); 1393 1394 static struct platform_driver s3c64xx_spi_driver = { 1395 .driver = { 1396 .name = "s3c64xx-spi", 1397 .pm = &s3c64xx_spi_pm, 1398 .of_match_table = of_match_ptr(s3c64xx_spi_dt_match), 1399 }, 1400 .probe = s3c64xx_spi_probe, 1401 .remove = s3c64xx_spi_remove, 1402 .id_table = s3c64xx_spi_driver_ids, 1403 }; 1404 MODULE_ALIAS("platform:s3c64xx-spi"); 1405 1406 module_platform_driver(s3c64xx_spi_driver); 1407 1408 MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>"); 1409 MODULE_DESCRIPTION("S3C64XX SPI Controller Driver"); 1410 MODULE_LICENSE("GPL"); 1411