1 /* 2 * Copyright (C) 2009 Samsung Electronics Ltd. 3 * Jaswinder Singh <jassi.brar@samsung.com> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 18 */ 19 20 #include <linux/init.h> 21 #include <linux/module.h> 22 #include <linux/workqueue.h> 23 #include <linux/interrupt.h> 24 #include <linux/delay.h> 25 #include <linux/clk.h> 26 #include <linux/dma-mapping.h> 27 #include <linux/dmaengine.h> 28 #include <linux/platform_device.h> 29 #include <linux/pm_runtime.h> 30 #include <linux/spi/spi.h> 31 #include <linux/gpio.h> 32 #include <linux/of.h> 33 #include <linux/of_gpio.h> 34 35 #include <linux/platform_data/spi-s3c64xx.h> 36 37 #ifdef CONFIG_S3C_DMA 38 #include <mach/dma.h> 39 #endif 40 41 #define MAX_SPI_PORTS 3 42 #define S3C64XX_SPI_QUIRK_POLL (1 << 0) 43 44 /* Registers and bit-fields */ 45 46 #define S3C64XX_SPI_CH_CFG 0x00 47 #define S3C64XX_SPI_CLK_CFG 0x04 48 #define S3C64XX_SPI_MODE_CFG 0x08 49 #define S3C64XX_SPI_SLAVE_SEL 0x0C 50 #define S3C64XX_SPI_INT_EN 0x10 51 #define S3C64XX_SPI_STATUS 0x14 52 #define S3C64XX_SPI_TX_DATA 0x18 53 #define S3C64XX_SPI_RX_DATA 0x1C 54 #define S3C64XX_SPI_PACKET_CNT 0x20 55 #define S3C64XX_SPI_PENDING_CLR 0x24 56 #define S3C64XX_SPI_SWAP_CFG 0x28 57 #define S3C64XX_SPI_FB_CLK 0x2C 58 59 #define S3C64XX_SPI_CH_HS_EN (1<<6) /* High Speed Enable */ 60 #define S3C64XX_SPI_CH_SW_RST (1<<5) 61 #define S3C64XX_SPI_CH_SLAVE (1<<4) 62 #define S3C64XX_SPI_CPOL_L (1<<3) 63 #define S3C64XX_SPI_CPHA_B (1<<2) 64 #define S3C64XX_SPI_CH_RXCH_ON (1<<1) 65 #define S3C64XX_SPI_CH_TXCH_ON (1<<0) 66 67 #define S3C64XX_SPI_CLKSEL_SRCMSK (3<<9) 68 #define S3C64XX_SPI_CLKSEL_SRCSHFT 9 69 #define S3C64XX_SPI_ENCLK_ENABLE (1<<8) 70 #define S3C64XX_SPI_PSR_MASK 0xff 71 72 #define S3C64XX_SPI_MODE_CH_TSZ_BYTE (0<<29) 73 #define S3C64XX_SPI_MODE_CH_TSZ_HALFWORD (1<<29) 74 #define S3C64XX_SPI_MODE_CH_TSZ_WORD (2<<29) 75 #define S3C64XX_SPI_MODE_CH_TSZ_MASK (3<<29) 76 #define S3C64XX_SPI_MODE_BUS_TSZ_BYTE (0<<17) 77 #define S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD (1<<17) 78 #define S3C64XX_SPI_MODE_BUS_TSZ_WORD (2<<17) 79 #define S3C64XX_SPI_MODE_BUS_TSZ_MASK (3<<17) 80 #define S3C64XX_SPI_MODE_RXDMA_ON (1<<2) 81 #define S3C64XX_SPI_MODE_TXDMA_ON (1<<1) 82 #define S3C64XX_SPI_MODE_4BURST (1<<0) 83 84 #define S3C64XX_SPI_SLAVE_AUTO (1<<1) 85 #define S3C64XX_SPI_SLAVE_SIG_INACT (1<<0) 86 87 #define S3C64XX_SPI_INT_TRAILING_EN (1<<6) 88 #define S3C64XX_SPI_INT_RX_OVERRUN_EN (1<<5) 89 #define S3C64XX_SPI_INT_RX_UNDERRUN_EN (1<<4) 90 #define S3C64XX_SPI_INT_TX_OVERRUN_EN (1<<3) 91 #define S3C64XX_SPI_INT_TX_UNDERRUN_EN (1<<2) 92 #define S3C64XX_SPI_INT_RX_FIFORDY_EN (1<<1) 93 #define S3C64XX_SPI_INT_TX_FIFORDY_EN (1<<0) 94 95 #define S3C64XX_SPI_ST_RX_OVERRUN_ERR (1<<5) 96 #define S3C64XX_SPI_ST_RX_UNDERRUN_ERR (1<<4) 97 #define S3C64XX_SPI_ST_TX_OVERRUN_ERR (1<<3) 98 #define S3C64XX_SPI_ST_TX_UNDERRUN_ERR (1<<2) 99 #define S3C64XX_SPI_ST_RX_FIFORDY (1<<1) 100 #define S3C64XX_SPI_ST_TX_FIFORDY (1<<0) 101 102 #define S3C64XX_SPI_PACKET_CNT_EN (1<<16) 103 104 #define S3C64XX_SPI_PND_TX_UNDERRUN_CLR (1<<4) 105 #define S3C64XX_SPI_PND_TX_OVERRUN_CLR (1<<3) 106 #define S3C64XX_SPI_PND_RX_UNDERRUN_CLR (1<<2) 107 #define S3C64XX_SPI_PND_RX_OVERRUN_CLR (1<<1) 108 #define S3C64XX_SPI_PND_TRAILING_CLR (1<<0) 109 110 #define S3C64XX_SPI_SWAP_RX_HALF_WORD (1<<7) 111 #define S3C64XX_SPI_SWAP_RX_BYTE (1<<6) 112 #define S3C64XX_SPI_SWAP_RX_BIT (1<<5) 113 #define S3C64XX_SPI_SWAP_RX_EN (1<<4) 114 #define S3C64XX_SPI_SWAP_TX_HALF_WORD (1<<3) 115 #define S3C64XX_SPI_SWAP_TX_BYTE (1<<2) 116 #define S3C64XX_SPI_SWAP_TX_BIT (1<<1) 117 #define S3C64XX_SPI_SWAP_TX_EN (1<<0) 118 119 #define S3C64XX_SPI_FBCLK_MSK (3<<0) 120 121 #define FIFO_LVL_MASK(i) ((i)->port_conf->fifo_lvl_mask[i->port_id]) 122 #define S3C64XX_SPI_ST_TX_DONE(v, i) (((v) & \ 123 (1 << (i)->port_conf->tx_st_done)) ? 1 : 0) 124 #define TX_FIFO_LVL(v, i) (((v) >> 6) & FIFO_LVL_MASK(i)) 125 #define RX_FIFO_LVL(v, i) (((v) >> (i)->port_conf->rx_lvl_offset) & \ 126 FIFO_LVL_MASK(i)) 127 128 #define S3C64XX_SPI_MAX_TRAILCNT 0x3ff 129 #define S3C64XX_SPI_TRAILCNT_OFF 19 130 131 #define S3C64XX_SPI_TRAILCNT S3C64XX_SPI_MAX_TRAILCNT 132 133 #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t) 134 #define is_polling(x) (x->port_conf->quirks & S3C64XX_SPI_QUIRK_POLL) 135 136 #define RXBUSY (1<<2) 137 #define TXBUSY (1<<3) 138 139 struct s3c64xx_spi_dma_data { 140 struct dma_chan *ch; 141 enum dma_transfer_direction direction; 142 unsigned int dmach; 143 }; 144 145 /** 146 * struct s3c64xx_spi_info - SPI Controller hardware info 147 * @fifo_lvl_mask: Bit-mask for {TX|RX}_FIFO_LVL bits in SPI_STATUS register. 148 * @rx_lvl_offset: Bit offset of RX_FIFO_LVL bits in SPI_STATUS regiter. 149 * @tx_st_done: Bit offset of TX_DONE bit in SPI_STATUS regiter. 150 * @high_speed: True, if the controller supports HIGH_SPEED_EN bit. 151 * @clk_from_cmu: True, if the controller does not include a clock mux and 152 * prescaler unit. 153 * 154 * The Samsung s3c64xx SPI controller are used on various Samsung SoC's but 155 * differ in some aspects such as the size of the fifo and spi bus clock 156 * setup. Such differences are specified to the driver using this structure 157 * which is provided as driver data to the driver. 158 */ 159 struct s3c64xx_spi_port_config { 160 int fifo_lvl_mask[MAX_SPI_PORTS]; 161 int rx_lvl_offset; 162 int tx_st_done; 163 int quirks; 164 bool high_speed; 165 bool clk_from_cmu; 166 }; 167 168 /** 169 * struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver. 170 * @clk: Pointer to the spi clock. 171 * @src_clk: Pointer to the clock used to generate SPI signals. 172 * @master: Pointer to the SPI Protocol master. 173 * @cntrlr_info: Platform specific data for the controller this driver manages. 174 * @tgl_spi: Pointer to the last CS left untoggled by the cs_change hint. 175 * @lock: Controller specific lock. 176 * @state: Set of FLAGS to indicate status. 177 * @rx_dmach: Controller's DMA channel for Rx. 178 * @tx_dmach: Controller's DMA channel for Tx. 179 * @sfr_start: BUS address of SPI controller regs. 180 * @regs: Pointer to ioremap'ed controller registers. 181 * @irq: interrupt 182 * @xfer_completion: To indicate completion of xfer task. 183 * @cur_mode: Stores the active configuration of the controller. 184 * @cur_bpw: Stores the active bits per word settings. 185 * @cur_speed: Stores the active xfer clock speed. 186 */ 187 struct s3c64xx_spi_driver_data { 188 void __iomem *regs; 189 struct clk *clk; 190 struct clk *src_clk; 191 struct platform_device *pdev; 192 struct spi_master *master; 193 struct s3c64xx_spi_info *cntrlr_info; 194 struct spi_device *tgl_spi; 195 spinlock_t lock; 196 unsigned long sfr_start; 197 struct completion xfer_completion; 198 unsigned state; 199 unsigned cur_mode, cur_bpw; 200 unsigned cur_speed; 201 struct s3c64xx_spi_dma_data rx_dma; 202 struct s3c64xx_spi_dma_data tx_dma; 203 #ifdef CONFIG_S3C_DMA 204 struct samsung_dma_ops *ops; 205 #endif 206 struct s3c64xx_spi_port_config *port_conf; 207 unsigned int port_id; 208 bool cs_gpio; 209 }; 210 211 static void flush_fifo(struct s3c64xx_spi_driver_data *sdd) 212 { 213 void __iomem *regs = sdd->regs; 214 unsigned long loops; 215 u32 val; 216 217 writel(0, regs + S3C64XX_SPI_PACKET_CNT); 218 219 val = readl(regs + S3C64XX_SPI_CH_CFG); 220 val &= ~(S3C64XX_SPI_CH_RXCH_ON | S3C64XX_SPI_CH_TXCH_ON); 221 writel(val, regs + S3C64XX_SPI_CH_CFG); 222 223 val = readl(regs + S3C64XX_SPI_CH_CFG); 224 val |= S3C64XX_SPI_CH_SW_RST; 225 val &= ~S3C64XX_SPI_CH_HS_EN; 226 writel(val, regs + S3C64XX_SPI_CH_CFG); 227 228 /* Flush TxFIFO*/ 229 loops = msecs_to_loops(1); 230 do { 231 val = readl(regs + S3C64XX_SPI_STATUS); 232 } while (TX_FIFO_LVL(val, sdd) && loops--); 233 234 if (loops == 0) 235 dev_warn(&sdd->pdev->dev, "Timed out flushing TX FIFO\n"); 236 237 /* Flush RxFIFO*/ 238 loops = msecs_to_loops(1); 239 do { 240 val = readl(regs + S3C64XX_SPI_STATUS); 241 if (RX_FIFO_LVL(val, sdd)) 242 readl(regs + S3C64XX_SPI_RX_DATA); 243 else 244 break; 245 } while (loops--); 246 247 if (loops == 0) 248 dev_warn(&sdd->pdev->dev, "Timed out flushing RX FIFO\n"); 249 250 val = readl(regs + S3C64XX_SPI_CH_CFG); 251 val &= ~S3C64XX_SPI_CH_SW_RST; 252 writel(val, regs + S3C64XX_SPI_CH_CFG); 253 254 val = readl(regs + S3C64XX_SPI_MODE_CFG); 255 val &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON); 256 writel(val, regs + S3C64XX_SPI_MODE_CFG); 257 } 258 259 static void s3c64xx_spi_dmacb(void *data) 260 { 261 struct s3c64xx_spi_driver_data *sdd; 262 struct s3c64xx_spi_dma_data *dma = data; 263 unsigned long flags; 264 265 if (dma->direction == DMA_DEV_TO_MEM) 266 sdd = container_of(data, 267 struct s3c64xx_spi_driver_data, rx_dma); 268 else 269 sdd = container_of(data, 270 struct s3c64xx_spi_driver_data, tx_dma); 271 272 spin_lock_irqsave(&sdd->lock, flags); 273 274 if (dma->direction == DMA_DEV_TO_MEM) { 275 sdd->state &= ~RXBUSY; 276 if (!(sdd->state & TXBUSY)) 277 complete(&sdd->xfer_completion); 278 } else { 279 sdd->state &= ~TXBUSY; 280 if (!(sdd->state & RXBUSY)) 281 complete(&sdd->xfer_completion); 282 } 283 284 spin_unlock_irqrestore(&sdd->lock, flags); 285 } 286 287 #ifdef CONFIG_S3C_DMA 288 /* FIXME: remove this section once arch/arm/mach-s3c64xx uses dmaengine */ 289 290 static struct s3c2410_dma_client s3c64xx_spi_dma_client = { 291 .name = "samsung-spi-dma", 292 }; 293 294 static void prepare_dma(struct s3c64xx_spi_dma_data *dma, 295 unsigned len, dma_addr_t buf) 296 { 297 struct s3c64xx_spi_driver_data *sdd; 298 struct samsung_dma_prep info; 299 struct samsung_dma_config config; 300 301 if (dma->direction == DMA_DEV_TO_MEM) { 302 sdd = container_of((void *)dma, 303 struct s3c64xx_spi_driver_data, rx_dma); 304 config.direction = sdd->rx_dma.direction; 305 config.fifo = sdd->sfr_start + S3C64XX_SPI_RX_DATA; 306 config.width = sdd->cur_bpw / 8; 307 sdd->ops->config((enum dma_ch)sdd->rx_dma.ch, &config); 308 } else { 309 sdd = container_of((void *)dma, 310 struct s3c64xx_spi_driver_data, tx_dma); 311 config.direction = sdd->tx_dma.direction; 312 config.fifo = sdd->sfr_start + S3C64XX_SPI_TX_DATA; 313 config.width = sdd->cur_bpw / 8; 314 sdd->ops->config((enum dma_ch)sdd->tx_dma.ch, &config); 315 } 316 317 info.cap = DMA_SLAVE; 318 info.len = len; 319 info.fp = s3c64xx_spi_dmacb; 320 info.fp_param = dma; 321 info.direction = dma->direction; 322 info.buf = buf; 323 324 sdd->ops->prepare((enum dma_ch)dma->ch, &info); 325 sdd->ops->trigger((enum dma_ch)dma->ch); 326 } 327 328 static int acquire_dma(struct s3c64xx_spi_driver_data *sdd) 329 { 330 struct samsung_dma_req req; 331 struct device *dev = &sdd->pdev->dev; 332 333 sdd->ops = samsung_dma_get_ops(); 334 335 req.cap = DMA_SLAVE; 336 req.client = &s3c64xx_spi_dma_client; 337 338 sdd->rx_dma.ch = (struct dma_chan *)(unsigned long)sdd->ops->request( 339 sdd->rx_dma.dmach, &req, dev, "rx"); 340 sdd->tx_dma.ch = (struct dma_chan *)(unsigned long)sdd->ops->request( 341 sdd->tx_dma.dmach, &req, dev, "tx"); 342 343 return 1; 344 } 345 346 static int s3c64xx_spi_prepare_transfer(struct spi_master *spi) 347 { 348 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi); 349 350 /* 351 * If DMA resource was not available during 352 * probe, no need to continue with dma requests 353 * else Acquire DMA channels 354 */ 355 while (!is_polling(sdd) && !acquire_dma(sdd)) 356 usleep_range(10000, 11000); 357 358 return 0; 359 } 360 361 static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi) 362 { 363 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi); 364 365 /* Free DMA channels */ 366 if (!is_polling(sdd)) { 367 sdd->ops->release((enum dma_ch)sdd->rx_dma.ch, 368 &s3c64xx_spi_dma_client); 369 sdd->ops->release((enum dma_ch)sdd->tx_dma.ch, 370 &s3c64xx_spi_dma_client); 371 } 372 373 return 0; 374 } 375 376 static void s3c64xx_spi_dma_stop(struct s3c64xx_spi_driver_data *sdd, 377 struct s3c64xx_spi_dma_data *dma) 378 { 379 sdd->ops->stop((enum dma_ch)dma->ch); 380 } 381 #else 382 383 static void prepare_dma(struct s3c64xx_spi_dma_data *dma, 384 unsigned len, dma_addr_t buf) 385 { 386 struct s3c64xx_spi_driver_data *sdd; 387 struct dma_slave_config config; 388 struct dma_async_tx_descriptor *desc; 389 390 memset(&config, 0, sizeof(config)); 391 392 if (dma->direction == DMA_DEV_TO_MEM) { 393 sdd = container_of((void *)dma, 394 struct s3c64xx_spi_driver_data, rx_dma); 395 config.direction = dma->direction; 396 config.src_addr = sdd->sfr_start + S3C64XX_SPI_RX_DATA; 397 config.src_addr_width = sdd->cur_bpw / 8; 398 config.src_maxburst = 1; 399 dmaengine_slave_config(dma->ch, &config); 400 } else { 401 sdd = container_of((void *)dma, 402 struct s3c64xx_spi_driver_data, tx_dma); 403 config.direction = dma->direction; 404 config.dst_addr = sdd->sfr_start + S3C64XX_SPI_TX_DATA; 405 config.dst_addr_width = sdd->cur_bpw / 8; 406 config.dst_maxburst = 1; 407 dmaengine_slave_config(dma->ch, &config); 408 } 409 410 desc = dmaengine_prep_slave_single(dma->ch, buf, len, 411 dma->direction, DMA_PREP_INTERRUPT); 412 413 desc->callback = s3c64xx_spi_dmacb; 414 desc->callback_param = dma; 415 416 dmaengine_submit(desc); 417 dma_async_issue_pending(dma->ch); 418 } 419 420 static int s3c64xx_spi_prepare_transfer(struct spi_master *spi) 421 { 422 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi); 423 dma_filter_fn filter = sdd->cntrlr_info->filter; 424 struct device *dev = &sdd->pdev->dev; 425 dma_cap_mask_t mask; 426 int ret; 427 428 if (!is_polling(sdd)) { 429 dma_cap_zero(mask); 430 dma_cap_set(DMA_SLAVE, mask); 431 432 /* Acquire DMA channels */ 433 sdd->rx_dma.ch = dma_request_slave_channel_compat(mask, filter, 434 (void *)sdd->rx_dma.dmach, dev, "rx"); 435 if (!sdd->rx_dma.ch) { 436 dev_err(dev, "Failed to get RX DMA channel\n"); 437 ret = -EBUSY; 438 goto out; 439 } 440 441 sdd->tx_dma.ch = dma_request_slave_channel_compat(mask, filter, 442 (void *)sdd->tx_dma.dmach, dev, "tx"); 443 if (!sdd->tx_dma.ch) { 444 dev_err(dev, "Failed to get TX DMA channel\n"); 445 ret = -EBUSY; 446 goto out_rx; 447 } 448 } 449 450 ret = pm_runtime_get_sync(&sdd->pdev->dev); 451 if (ret < 0) { 452 dev_err(dev, "Failed to enable device: %d\n", ret); 453 goto out_tx; 454 } 455 456 return 0; 457 458 out_tx: 459 dma_release_channel(sdd->tx_dma.ch); 460 out_rx: 461 dma_release_channel(sdd->rx_dma.ch); 462 out: 463 return ret; 464 } 465 466 static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi) 467 { 468 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi); 469 470 /* Free DMA channels */ 471 if (!is_polling(sdd)) { 472 dma_release_channel(sdd->rx_dma.ch); 473 dma_release_channel(sdd->tx_dma.ch); 474 } 475 476 pm_runtime_put(&sdd->pdev->dev); 477 return 0; 478 } 479 480 static void s3c64xx_spi_dma_stop(struct s3c64xx_spi_driver_data *sdd, 481 struct s3c64xx_spi_dma_data *dma) 482 { 483 dmaengine_terminate_all(dma->ch); 484 } 485 #endif 486 487 static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, 488 struct spi_device *spi, 489 struct spi_transfer *xfer, int dma_mode) 490 { 491 void __iomem *regs = sdd->regs; 492 u32 modecfg, chcfg; 493 494 modecfg = readl(regs + S3C64XX_SPI_MODE_CFG); 495 modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON); 496 497 chcfg = readl(regs + S3C64XX_SPI_CH_CFG); 498 chcfg &= ~S3C64XX_SPI_CH_TXCH_ON; 499 500 if (dma_mode) { 501 chcfg &= ~S3C64XX_SPI_CH_RXCH_ON; 502 } else { 503 /* Always shift in data in FIFO, even if xfer is Tx only, 504 * this helps setting PCKT_CNT value for generating clocks 505 * as exactly needed. 506 */ 507 chcfg |= S3C64XX_SPI_CH_RXCH_ON; 508 writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff) 509 | S3C64XX_SPI_PACKET_CNT_EN, 510 regs + S3C64XX_SPI_PACKET_CNT); 511 } 512 513 if (xfer->tx_buf != NULL) { 514 sdd->state |= TXBUSY; 515 chcfg |= S3C64XX_SPI_CH_TXCH_ON; 516 if (dma_mode) { 517 modecfg |= S3C64XX_SPI_MODE_TXDMA_ON; 518 prepare_dma(&sdd->tx_dma, xfer->len, xfer->tx_dma); 519 } else { 520 switch (sdd->cur_bpw) { 521 case 32: 522 iowrite32_rep(regs + S3C64XX_SPI_TX_DATA, 523 xfer->tx_buf, xfer->len / 4); 524 break; 525 case 16: 526 iowrite16_rep(regs + S3C64XX_SPI_TX_DATA, 527 xfer->tx_buf, xfer->len / 2); 528 break; 529 default: 530 iowrite8_rep(regs + S3C64XX_SPI_TX_DATA, 531 xfer->tx_buf, xfer->len); 532 break; 533 } 534 } 535 } 536 537 if (xfer->rx_buf != NULL) { 538 sdd->state |= RXBUSY; 539 540 if (sdd->port_conf->high_speed && sdd->cur_speed >= 30000000UL 541 && !(sdd->cur_mode & SPI_CPHA)) 542 chcfg |= S3C64XX_SPI_CH_HS_EN; 543 544 if (dma_mode) { 545 modecfg |= S3C64XX_SPI_MODE_RXDMA_ON; 546 chcfg |= S3C64XX_SPI_CH_RXCH_ON; 547 writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff) 548 | S3C64XX_SPI_PACKET_CNT_EN, 549 regs + S3C64XX_SPI_PACKET_CNT); 550 prepare_dma(&sdd->rx_dma, xfer->len, xfer->rx_dma); 551 } 552 } 553 554 writel(modecfg, regs + S3C64XX_SPI_MODE_CFG); 555 writel(chcfg, regs + S3C64XX_SPI_CH_CFG); 556 } 557 558 static inline void enable_cs(struct s3c64xx_spi_driver_data *sdd, 559 struct spi_device *spi) 560 { 561 if (sdd->tgl_spi != NULL) { /* If last device toggled after mssg */ 562 if (sdd->tgl_spi != spi) { /* if last mssg on diff device */ 563 /* Deselect the last toggled device */ 564 if (spi->cs_gpio >= 0) 565 gpio_set_value(spi->cs_gpio, 566 spi->mode & SPI_CS_HIGH ? 0 : 1); 567 } 568 sdd->tgl_spi = NULL; 569 } 570 571 if (spi->cs_gpio >= 0) 572 gpio_set_value(spi->cs_gpio, spi->mode & SPI_CS_HIGH ? 1 : 0); 573 } 574 575 static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd, 576 int timeout_ms) 577 { 578 void __iomem *regs = sdd->regs; 579 unsigned long val = 1; 580 u32 status; 581 582 /* max fifo depth available */ 583 u32 max_fifo = (FIFO_LVL_MASK(sdd) >> 1) + 1; 584 585 if (timeout_ms) 586 val = msecs_to_loops(timeout_ms); 587 588 do { 589 status = readl(regs + S3C64XX_SPI_STATUS); 590 } while (RX_FIFO_LVL(status, sdd) < max_fifo && --val); 591 592 /* return the actual received data length */ 593 return RX_FIFO_LVL(status, sdd); 594 } 595 596 static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd, 597 struct spi_transfer *xfer, int dma_mode) 598 { 599 void __iomem *regs = sdd->regs; 600 unsigned long val; 601 int ms; 602 603 /* millisecs to xfer 'len' bytes @ 'cur_speed' */ 604 ms = xfer->len * 8 * 1000 / sdd->cur_speed; 605 ms += 10; /* some tolerance */ 606 607 if (dma_mode) { 608 val = msecs_to_jiffies(ms) + 10; 609 val = wait_for_completion_timeout(&sdd->xfer_completion, val); 610 } else { 611 u32 status; 612 val = msecs_to_loops(ms); 613 do { 614 status = readl(regs + S3C64XX_SPI_STATUS); 615 } while (RX_FIFO_LVL(status, sdd) < xfer->len && --val); 616 } 617 618 if (dma_mode) { 619 u32 status; 620 621 /* 622 * If the previous xfer was completed within timeout, then 623 * proceed further else return -EIO. 624 * DmaTx returns after simply writing data in the FIFO, 625 * w/o waiting for real transmission on the bus to finish. 626 * DmaRx returns only after Dma read data from FIFO which 627 * needs bus transmission to finish, so we don't worry if 628 * Xfer involved Rx(with or without Tx). 629 */ 630 if (val && !xfer->rx_buf) { 631 val = msecs_to_loops(10); 632 status = readl(regs + S3C64XX_SPI_STATUS); 633 while ((TX_FIFO_LVL(status, sdd) 634 || !S3C64XX_SPI_ST_TX_DONE(status, sdd)) 635 && --val) { 636 cpu_relax(); 637 status = readl(regs + S3C64XX_SPI_STATUS); 638 } 639 640 } 641 642 /* If timed out while checking rx/tx status return error */ 643 if (!val) 644 return -EIO; 645 } else { 646 int loops; 647 u32 cpy_len; 648 u8 *buf; 649 650 /* If it was only Tx */ 651 if (!xfer->rx_buf) { 652 sdd->state &= ~TXBUSY; 653 return 0; 654 } 655 656 /* 657 * If the receive length is bigger than the controller fifo 658 * size, calculate the loops and read the fifo as many times. 659 * loops = length / max fifo size (calculated by using the 660 * fifo mask). 661 * For any size less than the fifo size the below code is 662 * executed atleast once. 663 */ 664 loops = xfer->len / ((FIFO_LVL_MASK(sdd) >> 1) + 1); 665 buf = xfer->rx_buf; 666 do { 667 /* wait for data to be received in the fifo */ 668 cpy_len = s3c64xx_spi_wait_for_timeout(sdd, 669 (loops ? ms : 0)); 670 671 switch (sdd->cur_bpw) { 672 case 32: 673 ioread32_rep(regs + S3C64XX_SPI_RX_DATA, 674 buf, cpy_len / 4); 675 break; 676 case 16: 677 ioread16_rep(regs + S3C64XX_SPI_RX_DATA, 678 buf, cpy_len / 2); 679 break; 680 default: 681 ioread8_rep(regs + S3C64XX_SPI_RX_DATA, 682 buf, cpy_len); 683 break; 684 } 685 686 buf = buf + cpy_len; 687 } while (loops--); 688 sdd->state &= ~RXBUSY; 689 } 690 691 return 0; 692 } 693 694 static inline void disable_cs(struct s3c64xx_spi_driver_data *sdd, 695 struct spi_device *spi) 696 { 697 if (sdd->tgl_spi == spi) 698 sdd->tgl_spi = NULL; 699 700 if (spi->cs_gpio >= 0) 701 gpio_set_value(spi->cs_gpio, spi->mode & SPI_CS_HIGH ? 0 : 1); 702 } 703 704 static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) 705 { 706 void __iomem *regs = sdd->regs; 707 u32 val; 708 709 /* Disable Clock */ 710 if (sdd->port_conf->clk_from_cmu) { 711 clk_disable_unprepare(sdd->src_clk); 712 } else { 713 val = readl(regs + S3C64XX_SPI_CLK_CFG); 714 val &= ~S3C64XX_SPI_ENCLK_ENABLE; 715 writel(val, regs + S3C64XX_SPI_CLK_CFG); 716 } 717 718 /* Set Polarity and Phase */ 719 val = readl(regs + S3C64XX_SPI_CH_CFG); 720 val &= ~(S3C64XX_SPI_CH_SLAVE | 721 S3C64XX_SPI_CPOL_L | 722 S3C64XX_SPI_CPHA_B); 723 724 if (sdd->cur_mode & SPI_CPOL) 725 val |= S3C64XX_SPI_CPOL_L; 726 727 if (sdd->cur_mode & SPI_CPHA) 728 val |= S3C64XX_SPI_CPHA_B; 729 730 writel(val, regs + S3C64XX_SPI_CH_CFG); 731 732 /* Set Channel & DMA Mode */ 733 val = readl(regs + S3C64XX_SPI_MODE_CFG); 734 val &= ~(S3C64XX_SPI_MODE_BUS_TSZ_MASK 735 | S3C64XX_SPI_MODE_CH_TSZ_MASK); 736 737 switch (sdd->cur_bpw) { 738 case 32: 739 val |= S3C64XX_SPI_MODE_BUS_TSZ_WORD; 740 val |= S3C64XX_SPI_MODE_CH_TSZ_WORD; 741 break; 742 case 16: 743 val |= S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD; 744 val |= S3C64XX_SPI_MODE_CH_TSZ_HALFWORD; 745 break; 746 default: 747 val |= S3C64XX_SPI_MODE_BUS_TSZ_BYTE; 748 val |= S3C64XX_SPI_MODE_CH_TSZ_BYTE; 749 break; 750 } 751 752 writel(val, regs + S3C64XX_SPI_MODE_CFG); 753 754 if (sdd->port_conf->clk_from_cmu) { 755 /* Configure Clock */ 756 /* There is half-multiplier before the SPI */ 757 clk_set_rate(sdd->src_clk, sdd->cur_speed * 2); 758 /* Enable Clock */ 759 clk_prepare_enable(sdd->src_clk); 760 } else { 761 /* Configure Clock */ 762 val = readl(regs + S3C64XX_SPI_CLK_CFG); 763 val &= ~S3C64XX_SPI_PSR_MASK; 764 val |= ((clk_get_rate(sdd->src_clk) / sdd->cur_speed / 2 - 1) 765 & S3C64XX_SPI_PSR_MASK); 766 writel(val, regs + S3C64XX_SPI_CLK_CFG); 767 768 /* Enable Clock */ 769 val = readl(regs + S3C64XX_SPI_CLK_CFG); 770 val |= S3C64XX_SPI_ENCLK_ENABLE; 771 writel(val, regs + S3C64XX_SPI_CLK_CFG); 772 } 773 } 774 775 #define XFER_DMAADDR_INVALID DMA_BIT_MASK(32) 776 777 static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd, 778 struct spi_message *msg) 779 { 780 struct device *dev = &sdd->pdev->dev; 781 struct spi_transfer *xfer; 782 783 if (is_polling(sdd) || msg->is_dma_mapped) 784 return 0; 785 786 /* First mark all xfer unmapped */ 787 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 788 xfer->rx_dma = XFER_DMAADDR_INVALID; 789 xfer->tx_dma = XFER_DMAADDR_INVALID; 790 } 791 792 /* Map until end or first fail */ 793 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 794 795 if (xfer->len <= ((FIFO_LVL_MASK(sdd) >> 1) + 1)) 796 continue; 797 798 if (xfer->tx_buf != NULL) { 799 xfer->tx_dma = dma_map_single(dev, 800 (void *)xfer->tx_buf, xfer->len, 801 DMA_TO_DEVICE); 802 if (dma_mapping_error(dev, xfer->tx_dma)) { 803 dev_err(dev, "dma_map_single Tx failed\n"); 804 xfer->tx_dma = XFER_DMAADDR_INVALID; 805 return -ENOMEM; 806 } 807 } 808 809 if (xfer->rx_buf != NULL) { 810 xfer->rx_dma = dma_map_single(dev, xfer->rx_buf, 811 xfer->len, DMA_FROM_DEVICE); 812 if (dma_mapping_error(dev, xfer->rx_dma)) { 813 dev_err(dev, "dma_map_single Rx failed\n"); 814 dma_unmap_single(dev, xfer->tx_dma, 815 xfer->len, DMA_TO_DEVICE); 816 xfer->tx_dma = XFER_DMAADDR_INVALID; 817 xfer->rx_dma = XFER_DMAADDR_INVALID; 818 return -ENOMEM; 819 } 820 } 821 } 822 823 return 0; 824 } 825 826 static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd, 827 struct spi_message *msg) 828 { 829 struct device *dev = &sdd->pdev->dev; 830 struct spi_transfer *xfer; 831 832 if (is_polling(sdd) || msg->is_dma_mapped) 833 return; 834 835 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 836 837 if (xfer->len <= ((FIFO_LVL_MASK(sdd) >> 1) + 1)) 838 continue; 839 840 if (xfer->rx_buf != NULL 841 && xfer->rx_dma != XFER_DMAADDR_INVALID) 842 dma_unmap_single(dev, xfer->rx_dma, 843 xfer->len, DMA_FROM_DEVICE); 844 845 if (xfer->tx_buf != NULL 846 && xfer->tx_dma != XFER_DMAADDR_INVALID) 847 dma_unmap_single(dev, xfer->tx_dma, 848 xfer->len, DMA_TO_DEVICE); 849 } 850 } 851 852 static int s3c64xx_spi_prepare_message(struct spi_master *master, 853 struct spi_message *msg) 854 { 855 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); 856 struct spi_device *spi = msg->spi; 857 struct s3c64xx_spi_csinfo *cs = spi->controller_data; 858 859 /* If Master's(controller) state differs from that needed by Slave */ 860 if (sdd->cur_speed != spi->max_speed_hz 861 || sdd->cur_mode != spi->mode 862 || sdd->cur_bpw != spi->bits_per_word) { 863 sdd->cur_bpw = spi->bits_per_word; 864 sdd->cur_speed = spi->max_speed_hz; 865 sdd->cur_mode = spi->mode; 866 s3c64xx_spi_config(sdd); 867 } 868 869 /* Map all the transfers if needed */ 870 if (s3c64xx_spi_map_mssg(sdd, msg)) { 871 dev_err(&spi->dev, 872 "Xfer: Unable to map message buffers!\n"); 873 return -ENOMEM; 874 } 875 876 /* Configure feedback delay */ 877 writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK); 878 879 return 0; 880 } 881 882 static int s3c64xx_spi_transfer_one(struct spi_master *master, 883 struct spi_device *spi, 884 struct spi_transfer *xfer) 885 { 886 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); 887 int status; 888 u32 speed; 889 u8 bpw; 890 unsigned long flags; 891 int use_dma; 892 893 reinit_completion(&sdd->xfer_completion); 894 895 /* Only BPW and Speed may change across transfers */ 896 bpw = xfer->bits_per_word; 897 speed = xfer->speed_hz ? : spi->max_speed_hz; 898 899 if (xfer->len % (bpw / 8)) { 900 dev_err(&spi->dev, 901 "Xfer length(%u) not a multiple of word size(%u)\n", 902 xfer->len, bpw / 8); 903 return -EIO; 904 } 905 906 if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) { 907 sdd->cur_bpw = bpw; 908 sdd->cur_speed = speed; 909 s3c64xx_spi_config(sdd); 910 } 911 912 /* Polling method for xfers not bigger than FIFO capacity */ 913 use_dma = 0; 914 if (!is_polling(sdd) && 915 (sdd->rx_dma.ch && sdd->tx_dma.ch && 916 (xfer->len > ((FIFO_LVL_MASK(sdd) >> 1) + 1)))) 917 use_dma = 1; 918 919 spin_lock_irqsave(&sdd->lock, flags); 920 921 /* Pending only which is to be done */ 922 sdd->state &= ~RXBUSY; 923 sdd->state &= ~TXBUSY; 924 925 enable_datapath(sdd, spi, xfer, use_dma); 926 927 /* Start the signals */ 928 writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL); 929 930 /* Start the signals */ 931 writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL); 932 933 spin_unlock_irqrestore(&sdd->lock, flags); 934 935 status = wait_for_xfer(sdd, xfer, use_dma); 936 937 if (status) { 938 dev_err(&spi->dev, "I/O Error: rx-%d tx-%d res:rx-%c tx-%c len-%d\n", 939 xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0, 940 (sdd->state & RXBUSY) ? 'f' : 'p', 941 (sdd->state & TXBUSY) ? 'f' : 'p', 942 xfer->len); 943 944 if (use_dma) { 945 if (xfer->tx_buf != NULL 946 && (sdd->state & TXBUSY)) 947 s3c64xx_spi_dma_stop(sdd, &sdd->tx_dma); 948 if (xfer->rx_buf != NULL 949 && (sdd->state & RXBUSY)) 950 s3c64xx_spi_dma_stop(sdd, &sdd->rx_dma); 951 } 952 } else { 953 flush_fifo(sdd); 954 } 955 956 return status; 957 } 958 959 static int s3c64xx_spi_unprepare_message(struct spi_master *master, 960 struct spi_message *msg) 961 { 962 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); 963 964 s3c64xx_spi_unmap_mssg(sdd, msg); 965 966 return 0; 967 } 968 969 static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata( 970 struct spi_device *spi) 971 { 972 struct s3c64xx_spi_csinfo *cs; 973 struct device_node *slave_np, *data_np = NULL; 974 struct s3c64xx_spi_driver_data *sdd; 975 u32 fb_delay = 0; 976 977 sdd = spi_master_get_devdata(spi->master); 978 slave_np = spi->dev.of_node; 979 if (!slave_np) { 980 dev_err(&spi->dev, "device node not found\n"); 981 return ERR_PTR(-EINVAL); 982 } 983 984 data_np = of_get_child_by_name(slave_np, "controller-data"); 985 if (!data_np) { 986 dev_err(&spi->dev, "child node 'controller-data' not found\n"); 987 return ERR_PTR(-EINVAL); 988 } 989 990 cs = kzalloc(sizeof(*cs), GFP_KERNEL); 991 if (!cs) { 992 dev_err(&spi->dev, "could not allocate memory for controller data\n"); 993 of_node_put(data_np); 994 return ERR_PTR(-ENOMEM); 995 } 996 997 /* The CS line is asserted/deasserted by the gpio pin */ 998 if (sdd->cs_gpio) 999 cs->line = of_get_named_gpio(data_np, "cs-gpio", 0); 1000 1001 if (!gpio_is_valid(cs->line)) { 1002 dev_err(&spi->dev, "chip select gpio is not specified or invalid\n"); 1003 kfree(cs); 1004 of_node_put(data_np); 1005 return ERR_PTR(-EINVAL); 1006 } 1007 1008 of_property_read_u32(data_np, "samsung,spi-feedback-delay", &fb_delay); 1009 cs->fb_delay = fb_delay; 1010 of_node_put(data_np); 1011 return cs; 1012 } 1013 1014 /* 1015 * Here we only check the validity of requested configuration 1016 * and save the configuration in a local data-structure. 1017 * The controller is actually configured only just before we 1018 * get a message to transfer. 1019 */ 1020 static int s3c64xx_spi_setup(struct spi_device *spi) 1021 { 1022 struct s3c64xx_spi_csinfo *cs = spi->controller_data; 1023 struct s3c64xx_spi_driver_data *sdd; 1024 struct s3c64xx_spi_info *sci; 1025 int err; 1026 1027 sdd = spi_master_get_devdata(spi->master); 1028 if (!cs && spi->dev.of_node) { 1029 cs = s3c64xx_get_slave_ctrldata(spi); 1030 spi->controller_data = cs; 1031 } 1032 1033 if (IS_ERR_OR_NULL(cs)) { 1034 dev_err(&spi->dev, "No CS for SPI(%d)\n", spi->chip_select); 1035 return -ENODEV; 1036 } 1037 1038 if (!spi_get_ctldata(spi)) { 1039 /* Request gpio only if cs line is asserted by gpio pins */ 1040 if (sdd->cs_gpio) { 1041 err = gpio_request_one(cs->line, GPIOF_OUT_INIT_HIGH, 1042 dev_name(&spi->dev)); 1043 if (err) { 1044 dev_err(&spi->dev, 1045 "Failed to get /CS gpio [%d]: %d\n", 1046 cs->line, err); 1047 goto err_gpio_req; 1048 } 1049 1050 spi->cs_gpio = cs->line; 1051 } 1052 1053 spi_set_ctldata(spi, cs); 1054 } 1055 1056 sci = sdd->cntrlr_info; 1057 1058 pm_runtime_get_sync(&sdd->pdev->dev); 1059 1060 /* Check if we can provide the requested rate */ 1061 if (!sdd->port_conf->clk_from_cmu) { 1062 u32 psr, speed; 1063 1064 /* Max possible */ 1065 speed = clk_get_rate(sdd->src_clk) / 2 / (0 + 1); 1066 1067 if (spi->max_speed_hz > speed) 1068 spi->max_speed_hz = speed; 1069 1070 psr = clk_get_rate(sdd->src_clk) / 2 / spi->max_speed_hz - 1; 1071 psr &= S3C64XX_SPI_PSR_MASK; 1072 if (psr == S3C64XX_SPI_PSR_MASK) 1073 psr--; 1074 1075 speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1); 1076 if (spi->max_speed_hz < speed) { 1077 if (psr+1 < S3C64XX_SPI_PSR_MASK) { 1078 psr++; 1079 } else { 1080 err = -EINVAL; 1081 goto setup_exit; 1082 } 1083 } 1084 1085 speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1); 1086 if (spi->max_speed_hz >= speed) { 1087 spi->max_speed_hz = speed; 1088 } else { 1089 dev_err(&spi->dev, "Can't set %dHz transfer speed\n", 1090 spi->max_speed_hz); 1091 err = -EINVAL; 1092 goto setup_exit; 1093 } 1094 } 1095 1096 pm_runtime_put(&sdd->pdev->dev); 1097 writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL); 1098 disable_cs(sdd, spi); 1099 return 0; 1100 1101 setup_exit: 1102 pm_runtime_put(&sdd->pdev->dev); 1103 /* setup() returns with device de-selected */ 1104 writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL); 1105 disable_cs(sdd, spi); 1106 1107 gpio_free(cs->line); 1108 spi_set_ctldata(spi, NULL); 1109 1110 err_gpio_req: 1111 if (spi->dev.of_node) 1112 kfree(cs); 1113 1114 return err; 1115 } 1116 1117 static void s3c64xx_spi_cleanup(struct spi_device *spi) 1118 { 1119 struct s3c64xx_spi_csinfo *cs = spi_get_ctldata(spi); 1120 struct s3c64xx_spi_driver_data *sdd; 1121 1122 sdd = spi_master_get_devdata(spi->master); 1123 if (spi->cs_gpio) { 1124 gpio_free(spi->cs_gpio); 1125 if (spi->dev.of_node) 1126 kfree(cs); 1127 } 1128 spi_set_ctldata(spi, NULL); 1129 } 1130 1131 static irqreturn_t s3c64xx_spi_irq(int irq, void *data) 1132 { 1133 struct s3c64xx_spi_driver_data *sdd = data; 1134 struct spi_master *spi = sdd->master; 1135 unsigned int val, clr = 0; 1136 1137 val = readl(sdd->regs + S3C64XX_SPI_STATUS); 1138 1139 if (val & S3C64XX_SPI_ST_RX_OVERRUN_ERR) { 1140 clr = S3C64XX_SPI_PND_RX_OVERRUN_CLR; 1141 dev_err(&spi->dev, "RX overrun\n"); 1142 } 1143 if (val & S3C64XX_SPI_ST_RX_UNDERRUN_ERR) { 1144 clr |= S3C64XX_SPI_PND_RX_UNDERRUN_CLR; 1145 dev_err(&spi->dev, "RX underrun\n"); 1146 } 1147 if (val & S3C64XX_SPI_ST_TX_OVERRUN_ERR) { 1148 clr |= S3C64XX_SPI_PND_TX_OVERRUN_CLR; 1149 dev_err(&spi->dev, "TX overrun\n"); 1150 } 1151 if (val & S3C64XX_SPI_ST_TX_UNDERRUN_ERR) { 1152 clr |= S3C64XX_SPI_PND_TX_UNDERRUN_CLR; 1153 dev_err(&spi->dev, "TX underrun\n"); 1154 } 1155 1156 /* Clear the pending irq by setting and then clearing it */ 1157 writel(clr, sdd->regs + S3C64XX_SPI_PENDING_CLR); 1158 writel(0, sdd->regs + S3C64XX_SPI_PENDING_CLR); 1159 1160 return IRQ_HANDLED; 1161 } 1162 1163 static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel) 1164 { 1165 struct s3c64xx_spi_info *sci = sdd->cntrlr_info; 1166 void __iomem *regs = sdd->regs; 1167 unsigned int val; 1168 1169 sdd->cur_speed = 0; 1170 1171 writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL); 1172 1173 /* Disable Interrupts - we use Polling if not DMA mode */ 1174 writel(0, regs + S3C64XX_SPI_INT_EN); 1175 1176 if (!sdd->port_conf->clk_from_cmu) 1177 writel(sci->src_clk_nr << S3C64XX_SPI_CLKSEL_SRCSHFT, 1178 regs + S3C64XX_SPI_CLK_CFG); 1179 writel(0, regs + S3C64XX_SPI_MODE_CFG); 1180 writel(0, regs + S3C64XX_SPI_PACKET_CNT); 1181 1182 /* Clear any irq pending bits, should set and clear the bits */ 1183 val = S3C64XX_SPI_PND_RX_OVERRUN_CLR | 1184 S3C64XX_SPI_PND_RX_UNDERRUN_CLR | 1185 S3C64XX_SPI_PND_TX_OVERRUN_CLR | 1186 S3C64XX_SPI_PND_TX_UNDERRUN_CLR; 1187 writel(val, regs + S3C64XX_SPI_PENDING_CLR); 1188 writel(0, regs + S3C64XX_SPI_PENDING_CLR); 1189 1190 writel(0, regs + S3C64XX_SPI_SWAP_CFG); 1191 1192 val = readl(regs + S3C64XX_SPI_MODE_CFG); 1193 val &= ~S3C64XX_SPI_MODE_4BURST; 1194 val &= ~(S3C64XX_SPI_MAX_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF); 1195 val |= (S3C64XX_SPI_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF); 1196 writel(val, regs + S3C64XX_SPI_MODE_CFG); 1197 1198 flush_fifo(sdd); 1199 } 1200 1201 #ifdef CONFIG_OF 1202 static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev) 1203 { 1204 struct s3c64xx_spi_info *sci; 1205 u32 temp; 1206 1207 sci = devm_kzalloc(dev, sizeof(*sci), GFP_KERNEL); 1208 if (!sci) { 1209 dev_err(dev, "memory allocation for spi_info failed\n"); 1210 return ERR_PTR(-ENOMEM); 1211 } 1212 1213 if (of_property_read_u32(dev->of_node, "samsung,spi-src-clk", &temp)) { 1214 dev_warn(dev, "spi bus clock parent not specified, using clock at index 0 as parent\n"); 1215 sci->src_clk_nr = 0; 1216 } else { 1217 sci->src_clk_nr = temp; 1218 } 1219 1220 if (of_property_read_u32(dev->of_node, "num-cs", &temp)) { 1221 dev_warn(dev, "number of chip select lines not specified, assuming 1 chip select line\n"); 1222 sci->num_cs = 1; 1223 } else { 1224 sci->num_cs = temp; 1225 } 1226 1227 return sci; 1228 } 1229 #else 1230 static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev) 1231 { 1232 return dev_get_platdata(dev); 1233 } 1234 #endif 1235 1236 static const struct of_device_id s3c64xx_spi_dt_match[]; 1237 1238 static inline struct s3c64xx_spi_port_config *s3c64xx_spi_get_port_config( 1239 struct platform_device *pdev) 1240 { 1241 #ifdef CONFIG_OF 1242 if (pdev->dev.of_node) { 1243 const struct of_device_id *match; 1244 match = of_match_node(s3c64xx_spi_dt_match, pdev->dev.of_node); 1245 return (struct s3c64xx_spi_port_config *)match->data; 1246 } 1247 #endif 1248 return (struct s3c64xx_spi_port_config *) 1249 platform_get_device_id(pdev)->driver_data; 1250 } 1251 1252 static int s3c64xx_spi_probe(struct platform_device *pdev) 1253 { 1254 struct resource *mem_res; 1255 struct resource *res; 1256 struct s3c64xx_spi_driver_data *sdd; 1257 struct s3c64xx_spi_info *sci = dev_get_platdata(&pdev->dev); 1258 struct spi_master *master; 1259 int ret, irq; 1260 char clk_name[16]; 1261 1262 if (!sci && pdev->dev.of_node) { 1263 sci = s3c64xx_spi_parse_dt(&pdev->dev); 1264 if (IS_ERR(sci)) 1265 return PTR_ERR(sci); 1266 } 1267 1268 if (!sci) { 1269 dev_err(&pdev->dev, "platform_data missing!\n"); 1270 return -ENODEV; 1271 } 1272 1273 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1274 if (mem_res == NULL) { 1275 dev_err(&pdev->dev, "Unable to get SPI MEM resource\n"); 1276 return -ENXIO; 1277 } 1278 1279 irq = platform_get_irq(pdev, 0); 1280 if (irq < 0) { 1281 dev_warn(&pdev->dev, "Failed to get IRQ: %d\n", irq); 1282 return irq; 1283 } 1284 1285 master = spi_alloc_master(&pdev->dev, 1286 sizeof(struct s3c64xx_spi_driver_data)); 1287 if (master == NULL) { 1288 dev_err(&pdev->dev, "Unable to allocate SPI Master\n"); 1289 return -ENOMEM; 1290 } 1291 1292 platform_set_drvdata(pdev, master); 1293 1294 sdd = spi_master_get_devdata(master); 1295 sdd->port_conf = s3c64xx_spi_get_port_config(pdev); 1296 sdd->master = master; 1297 sdd->cntrlr_info = sci; 1298 sdd->pdev = pdev; 1299 sdd->sfr_start = mem_res->start; 1300 sdd->cs_gpio = true; 1301 if (pdev->dev.of_node) { 1302 if (!of_find_property(pdev->dev.of_node, "cs-gpio", NULL)) 1303 sdd->cs_gpio = false; 1304 1305 ret = of_alias_get_id(pdev->dev.of_node, "spi"); 1306 if (ret < 0) { 1307 dev_err(&pdev->dev, "failed to get alias id, errno %d\n", 1308 ret); 1309 goto err0; 1310 } 1311 sdd->port_id = ret; 1312 } else { 1313 sdd->port_id = pdev->id; 1314 } 1315 1316 sdd->cur_bpw = 8; 1317 1318 if (!sdd->pdev->dev.of_node) { 1319 res = platform_get_resource(pdev, IORESOURCE_DMA, 0); 1320 if (!res) { 1321 dev_warn(&pdev->dev, "Unable to get SPI tx dma resource. Switching to poll mode\n"); 1322 sdd->port_conf->quirks = S3C64XX_SPI_QUIRK_POLL; 1323 } else 1324 sdd->tx_dma.dmach = res->start; 1325 1326 res = platform_get_resource(pdev, IORESOURCE_DMA, 1); 1327 if (!res) { 1328 dev_warn(&pdev->dev, "Unable to get SPI rx dma resource. Switching to poll mode\n"); 1329 sdd->port_conf->quirks = S3C64XX_SPI_QUIRK_POLL; 1330 } else 1331 sdd->rx_dma.dmach = res->start; 1332 } 1333 1334 sdd->tx_dma.direction = DMA_MEM_TO_DEV; 1335 sdd->rx_dma.direction = DMA_DEV_TO_MEM; 1336 1337 master->dev.of_node = pdev->dev.of_node; 1338 master->bus_num = sdd->port_id; 1339 master->setup = s3c64xx_spi_setup; 1340 master->cleanup = s3c64xx_spi_cleanup; 1341 master->prepare_transfer_hardware = s3c64xx_spi_prepare_transfer; 1342 master->prepare_message = s3c64xx_spi_prepare_message; 1343 master->transfer_one = s3c64xx_spi_transfer_one; 1344 master->unprepare_message = s3c64xx_spi_unprepare_message; 1345 master->unprepare_transfer_hardware = s3c64xx_spi_unprepare_transfer; 1346 master->num_chipselect = sci->num_cs; 1347 master->dma_alignment = 8; 1348 master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) | 1349 SPI_BPW_MASK(8); 1350 /* the spi->mode bits understood by this driver: */ 1351 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; 1352 master->auto_runtime_pm = true; 1353 1354 sdd->regs = devm_ioremap_resource(&pdev->dev, mem_res); 1355 if (IS_ERR(sdd->regs)) { 1356 ret = PTR_ERR(sdd->regs); 1357 goto err0; 1358 } 1359 1360 if (sci->cfg_gpio && sci->cfg_gpio()) { 1361 dev_err(&pdev->dev, "Unable to config gpio\n"); 1362 ret = -EBUSY; 1363 goto err0; 1364 } 1365 1366 /* Setup clocks */ 1367 sdd->clk = devm_clk_get(&pdev->dev, "spi"); 1368 if (IS_ERR(sdd->clk)) { 1369 dev_err(&pdev->dev, "Unable to acquire clock 'spi'\n"); 1370 ret = PTR_ERR(sdd->clk); 1371 goto err0; 1372 } 1373 1374 if (clk_prepare_enable(sdd->clk)) { 1375 dev_err(&pdev->dev, "Couldn't enable clock 'spi'\n"); 1376 ret = -EBUSY; 1377 goto err0; 1378 } 1379 1380 sprintf(clk_name, "spi_busclk%d", sci->src_clk_nr); 1381 sdd->src_clk = devm_clk_get(&pdev->dev, clk_name); 1382 if (IS_ERR(sdd->src_clk)) { 1383 dev_err(&pdev->dev, 1384 "Unable to acquire clock '%s'\n", clk_name); 1385 ret = PTR_ERR(sdd->src_clk); 1386 goto err2; 1387 } 1388 1389 if (clk_prepare_enable(sdd->src_clk)) { 1390 dev_err(&pdev->dev, "Couldn't enable clock '%s'\n", clk_name); 1391 ret = -EBUSY; 1392 goto err2; 1393 } 1394 1395 /* Setup Deufult Mode */ 1396 s3c64xx_spi_hwinit(sdd, sdd->port_id); 1397 1398 spin_lock_init(&sdd->lock); 1399 init_completion(&sdd->xfer_completion); 1400 1401 ret = devm_request_irq(&pdev->dev, irq, s3c64xx_spi_irq, 0, 1402 "spi-s3c64xx", sdd); 1403 if (ret != 0) { 1404 dev_err(&pdev->dev, "Failed to request IRQ %d: %d\n", 1405 irq, ret); 1406 goto err3; 1407 } 1408 1409 writel(S3C64XX_SPI_INT_RX_OVERRUN_EN | S3C64XX_SPI_INT_RX_UNDERRUN_EN | 1410 S3C64XX_SPI_INT_TX_OVERRUN_EN | S3C64XX_SPI_INT_TX_UNDERRUN_EN, 1411 sdd->regs + S3C64XX_SPI_INT_EN); 1412 1413 pm_runtime_set_active(&pdev->dev); 1414 pm_runtime_enable(&pdev->dev); 1415 1416 ret = devm_spi_register_master(&pdev->dev, master); 1417 if (ret != 0) { 1418 dev_err(&pdev->dev, "cannot register SPI master: %d\n", ret); 1419 goto err3; 1420 } 1421 1422 dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d with %d Slaves attached\n", 1423 sdd->port_id, master->num_chipselect); 1424 dev_dbg(&pdev->dev, "\tIOmem=[%pR]\tDMA=[Rx-%d, Tx-%d]\n", 1425 mem_res, 1426 sdd->rx_dma.dmach, sdd->tx_dma.dmach); 1427 1428 return 0; 1429 1430 err3: 1431 clk_disable_unprepare(sdd->src_clk); 1432 err2: 1433 clk_disable_unprepare(sdd->clk); 1434 err0: 1435 spi_master_put(master); 1436 1437 return ret; 1438 } 1439 1440 static int s3c64xx_spi_remove(struct platform_device *pdev) 1441 { 1442 struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); 1443 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); 1444 1445 pm_runtime_disable(&pdev->dev); 1446 1447 writel(0, sdd->regs + S3C64XX_SPI_INT_EN); 1448 1449 clk_disable_unprepare(sdd->src_clk); 1450 1451 clk_disable_unprepare(sdd->clk); 1452 1453 return 0; 1454 } 1455 1456 #ifdef CONFIG_PM_SLEEP 1457 static int s3c64xx_spi_suspend(struct device *dev) 1458 { 1459 struct spi_master *master = dev_get_drvdata(dev); 1460 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); 1461 1462 int ret = spi_master_suspend(master); 1463 if (ret) 1464 return ret; 1465 1466 if (!pm_runtime_suspended(dev)) { 1467 clk_disable_unprepare(sdd->clk); 1468 clk_disable_unprepare(sdd->src_clk); 1469 } 1470 1471 sdd->cur_speed = 0; /* Output Clock is stopped */ 1472 1473 return 0; 1474 } 1475 1476 static int s3c64xx_spi_resume(struct device *dev) 1477 { 1478 struct spi_master *master = dev_get_drvdata(dev); 1479 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); 1480 struct s3c64xx_spi_info *sci = sdd->cntrlr_info; 1481 1482 if (sci->cfg_gpio) 1483 sci->cfg_gpio(); 1484 1485 if (!pm_runtime_suspended(dev)) { 1486 clk_prepare_enable(sdd->src_clk); 1487 clk_prepare_enable(sdd->clk); 1488 } 1489 1490 s3c64xx_spi_hwinit(sdd, sdd->port_id); 1491 1492 return spi_master_resume(master); 1493 } 1494 #endif /* CONFIG_PM_SLEEP */ 1495 1496 #ifdef CONFIG_PM_RUNTIME 1497 static int s3c64xx_spi_runtime_suspend(struct device *dev) 1498 { 1499 struct spi_master *master = dev_get_drvdata(dev); 1500 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); 1501 1502 clk_disable_unprepare(sdd->clk); 1503 clk_disable_unprepare(sdd->src_clk); 1504 1505 return 0; 1506 } 1507 1508 static int s3c64xx_spi_runtime_resume(struct device *dev) 1509 { 1510 struct spi_master *master = dev_get_drvdata(dev); 1511 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); 1512 int ret; 1513 1514 ret = clk_prepare_enable(sdd->src_clk); 1515 if (ret != 0) 1516 return ret; 1517 1518 ret = clk_prepare_enable(sdd->clk); 1519 if (ret != 0) { 1520 clk_disable_unprepare(sdd->src_clk); 1521 return ret; 1522 } 1523 1524 return 0; 1525 } 1526 #endif /* CONFIG_PM_RUNTIME */ 1527 1528 static const struct dev_pm_ops s3c64xx_spi_pm = { 1529 SET_SYSTEM_SLEEP_PM_OPS(s3c64xx_spi_suspend, s3c64xx_spi_resume) 1530 SET_RUNTIME_PM_OPS(s3c64xx_spi_runtime_suspend, 1531 s3c64xx_spi_runtime_resume, NULL) 1532 }; 1533 1534 static struct s3c64xx_spi_port_config s3c2443_spi_port_config = { 1535 .fifo_lvl_mask = { 0x7f }, 1536 .rx_lvl_offset = 13, 1537 .tx_st_done = 21, 1538 .high_speed = true, 1539 }; 1540 1541 static struct s3c64xx_spi_port_config s3c6410_spi_port_config = { 1542 .fifo_lvl_mask = { 0x7f, 0x7F }, 1543 .rx_lvl_offset = 13, 1544 .tx_st_done = 21, 1545 }; 1546 1547 static struct s3c64xx_spi_port_config s5p64x0_spi_port_config = { 1548 .fifo_lvl_mask = { 0x1ff, 0x7F }, 1549 .rx_lvl_offset = 15, 1550 .tx_st_done = 25, 1551 }; 1552 1553 static struct s3c64xx_spi_port_config s5pc100_spi_port_config = { 1554 .fifo_lvl_mask = { 0x7f, 0x7F }, 1555 .rx_lvl_offset = 13, 1556 .tx_st_done = 21, 1557 .high_speed = true, 1558 }; 1559 1560 static struct s3c64xx_spi_port_config s5pv210_spi_port_config = { 1561 .fifo_lvl_mask = { 0x1ff, 0x7F }, 1562 .rx_lvl_offset = 15, 1563 .tx_st_done = 25, 1564 .high_speed = true, 1565 }; 1566 1567 static struct s3c64xx_spi_port_config exynos4_spi_port_config = { 1568 .fifo_lvl_mask = { 0x1ff, 0x7F, 0x7F }, 1569 .rx_lvl_offset = 15, 1570 .tx_st_done = 25, 1571 .high_speed = true, 1572 .clk_from_cmu = true, 1573 }; 1574 1575 static struct s3c64xx_spi_port_config exynos5440_spi_port_config = { 1576 .fifo_lvl_mask = { 0x1ff }, 1577 .rx_lvl_offset = 15, 1578 .tx_st_done = 25, 1579 .high_speed = true, 1580 .clk_from_cmu = true, 1581 .quirks = S3C64XX_SPI_QUIRK_POLL, 1582 }; 1583 1584 static struct platform_device_id s3c64xx_spi_driver_ids[] = { 1585 { 1586 .name = "s3c2443-spi", 1587 .driver_data = (kernel_ulong_t)&s3c2443_spi_port_config, 1588 }, { 1589 .name = "s3c6410-spi", 1590 .driver_data = (kernel_ulong_t)&s3c6410_spi_port_config, 1591 }, { 1592 .name = "s5p64x0-spi", 1593 .driver_data = (kernel_ulong_t)&s5p64x0_spi_port_config, 1594 }, { 1595 .name = "s5pc100-spi", 1596 .driver_data = (kernel_ulong_t)&s5pc100_spi_port_config, 1597 }, { 1598 .name = "s5pv210-spi", 1599 .driver_data = (kernel_ulong_t)&s5pv210_spi_port_config, 1600 }, { 1601 .name = "exynos4210-spi", 1602 .driver_data = (kernel_ulong_t)&exynos4_spi_port_config, 1603 }, 1604 { }, 1605 }; 1606 1607 static const struct of_device_id s3c64xx_spi_dt_match[] = { 1608 { .compatible = "samsung,s3c2443-spi", 1609 .data = (void *)&s3c2443_spi_port_config, 1610 }, 1611 { .compatible = "samsung,s3c6410-spi", 1612 .data = (void *)&s3c6410_spi_port_config, 1613 }, 1614 { .compatible = "samsung,s5pc100-spi", 1615 .data = (void *)&s5pc100_spi_port_config, 1616 }, 1617 { .compatible = "samsung,s5pv210-spi", 1618 .data = (void *)&s5pv210_spi_port_config, 1619 }, 1620 { .compatible = "samsung,exynos4210-spi", 1621 .data = (void *)&exynos4_spi_port_config, 1622 }, 1623 { .compatible = "samsung,exynos5440-spi", 1624 .data = (void *)&exynos5440_spi_port_config, 1625 }, 1626 { }, 1627 }; 1628 MODULE_DEVICE_TABLE(of, s3c64xx_spi_dt_match); 1629 1630 static struct platform_driver s3c64xx_spi_driver = { 1631 .driver = { 1632 .name = "s3c64xx-spi", 1633 .owner = THIS_MODULE, 1634 .pm = &s3c64xx_spi_pm, 1635 .of_match_table = of_match_ptr(s3c64xx_spi_dt_match), 1636 }, 1637 .probe = s3c64xx_spi_probe, 1638 .remove = s3c64xx_spi_remove, 1639 .id_table = s3c64xx_spi_driver_ids, 1640 }; 1641 MODULE_ALIAS("platform:s3c64xx-spi"); 1642 1643 module_platform_driver(s3c64xx_spi_driver); 1644 1645 MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>"); 1646 MODULE_DESCRIPTION("S3C64XX SPI Controller Driver"); 1647 MODULE_LICENSE("GPL"); 1648