1 /* 2 * Copyright (C) 2009 Samsung Electronics Ltd. 3 * Jaswinder Singh <jassi.brar@samsung.com> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 18 */ 19 20 #include <linux/init.h> 21 #include <linux/module.h> 22 #include <linux/workqueue.h> 23 #include <linux/interrupt.h> 24 #include <linux/delay.h> 25 #include <linux/clk.h> 26 #include <linux/dma-mapping.h> 27 #include <linux/dmaengine.h> 28 #include <linux/platform_device.h> 29 #include <linux/pm_runtime.h> 30 #include <linux/spi/spi.h> 31 #include <linux/gpio.h> 32 #include <linux/of.h> 33 #include <linux/of_gpio.h> 34 35 #include <linux/platform_data/spi-s3c64xx.h> 36 37 #ifdef CONFIG_S3C_DMA 38 #include <mach/dma.h> 39 #endif 40 41 #define MAX_SPI_PORTS 3 42 #define S3C64XX_SPI_QUIRK_POLL (1 << 0) 43 44 /* Registers and bit-fields */ 45 46 #define S3C64XX_SPI_CH_CFG 0x00 47 #define S3C64XX_SPI_CLK_CFG 0x04 48 #define S3C64XX_SPI_MODE_CFG 0x08 49 #define S3C64XX_SPI_SLAVE_SEL 0x0C 50 #define S3C64XX_SPI_INT_EN 0x10 51 #define S3C64XX_SPI_STATUS 0x14 52 #define S3C64XX_SPI_TX_DATA 0x18 53 #define S3C64XX_SPI_RX_DATA 0x1C 54 #define S3C64XX_SPI_PACKET_CNT 0x20 55 #define S3C64XX_SPI_PENDING_CLR 0x24 56 #define S3C64XX_SPI_SWAP_CFG 0x28 57 #define S3C64XX_SPI_FB_CLK 0x2C 58 59 #define S3C64XX_SPI_CH_HS_EN (1<<6) /* High Speed Enable */ 60 #define S3C64XX_SPI_CH_SW_RST (1<<5) 61 #define S3C64XX_SPI_CH_SLAVE (1<<4) 62 #define S3C64XX_SPI_CPOL_L (1<<3) 63 #define S3C64XX_SPI_CPHA_B (1<<2) 64 #define S3C64XX_SPI_CH_RXCH_ON (1<<1) 65 #define S3C64XX_SPI_CH_TXCH_ON (1<<0) 66 67 #define S3C64XX_SPI_CLKSEL_SRCMSK (3<<9) 68 #define S3C64XX_SPI_CLKSEL_SRCSHFT 9 69 #define S3C64XX_SPI_ENCLK_ENABLE (1<<8) 70 #define S3C64XX_SPI_PSR_MASK 0xff 71 72 #define S3C64XX_SPI_MODE_CH_TSZ_BYTE (0<<29) 73 #define S3C64XX_SPI_MODE_CH_TSZ_HALFWORD (1<<29) 74 #define S3C64XX_SPI_MODE_CH_TSZ_WORD (2<<29) 75 #define S3C64XX_SPI_MODE_CH_TSZ_MASK (3<<29) 76 #define S3C64XX_SPI_MODE_BUS_TSZ_BYTE (0<<17) 77 #define S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD (1<<17) 78 #define S3C64XX_SPI_MODE_BUS_TSZ_WORD (2<<17) 79 #define S3C64XX_SPI_MODE_BUS_TSZ_MASK (3<<17) 80 #define S3C64XX_SPI_MODE_RXDMA_ON (1<<2) 81 #define S3C64XX_SPI_MODE_TXDMA_ON (1<<1) 82 #define S3C64XX_SPI_MODE_4BURST (1<<0) 83 84 #define S3C64XX_SPI_SLAVE_AUTO (1<<1) 85 #define S3C64XX_SPI_SLAVE_SIG_INACT (1<<0) 86 87 #define S3C64XX_SPI_INT_TRAILING_EN (1<<6) 88 #define S3C64XX_SPI_INT_RX_OVERRUN_EN (1<<5) 89 #define S3C64XX_SPI_INT_RX_UNDERRUN_EN (1<<4) 90 #define S3C64XX_SPI_INT_TX_OVERRUN_EN (1<<3) 91 #define S3C64XX_SPI_INT_TX_UNDERRUN_EN (1<<2) 92 #define S3C64XX_SPI_INT_RX_FIFORDY_EN (1<<1) 93 #define S3C64XX_SPI_INT_TX_FIFORDY_EN (1<<0) 94 95 #define S3C64XX_SPI_ST_RX_OVERRUN_ERR (1<<5) 96 #define S3C64XX_SPI_ST_RX_UNDERRUN_ERR (1<<4) 97 #define S3C64XX_SPI_ST_TX_OVERRUN_ERR (1<<3) 98 #define S3C64XX_SPI_ST_TX_UNDERRUN_ERR (1<<2) 99 #define S3C64XX_SPI_ST_RX_FIFORDY (1<<1) 100 #define S3C64XX_SPI_ST_TX_FIFORDY (1<<0) 101 102 #define S3C64XX_SPI_PACKET_CNT_EN (1<<16) 103 104 #define S3C64XX_SPI_PND_TX_UNDERRUN_CLR (1<<4) 105 #define S3C64XX_SPI_PND_TX_OVERRUN_CLR (1<<3) 106 #define S3C64XX_SPI_PND_RX_UNDERRUN_CLR (1<<2) 107 #define S3C64XX_SPI_PND_RX_OVERRUN_CLR (1<<1) 108 #define S3C64XX_SPI_PND_TRAILING_CLR (1<<0) 109 110 #define S3C64XX_SPI_SWAP_RX_HALF_WORD (1<<7) 111 #define S3C64XX_SPI_SWAP_RX_BYTE (1<<6) 112 #define S3C64XX_SPI_SWAP_RX_BIT (1<<5) 113 #define S3C64XX_SPI_SWAP_RX_EN (1<<4) 114 #define S3C64XX_SPI_SWAP_TX_HALF_WORD (1<<3) 115 #define S3C64XX_SPI_SWAP_TX_BYTE (1<<2) 116 #define S3C64XX_SPI_SWAP_TX_BIT (1<<1) 117 #define S3C64XX_SPI_SWAP_TX_EN (1<<0) 118 119 #define S3C64XX_SPI_FBCLK_MSK (3<<0) 120 121 #define FIFO_LVL_MASK(i) ((i)->port_conf->fifo_lvl_mask[i->port_id]) 122 #define S3C64XX_SPI_ST_TX_DONE(v, i) (((v) & \ 123 (1 << (i)->port_conf->tx_st_done)) ? 1 : 0) 124 #define TX_FIFO_LVL(v, i) (((v) >> 6) & FIFO_LVL_MASK(i)) 125 #define RX_FIFO_LVL(v, i) (((v) >> (i)->port_conf->rx_lvl_offset) & \ 126 FIFO_LVL_MASK(i)) 127 128 #define S3C64XX_SPI_MAX_TRAILCNT 0x3ff 129 #define S3C64XX_SPI_TRAILCNT_OFF 19 130 131 #define S3C64XX_SPI_TRAILCNT S3C64XX_SPI_MAX_TRAILCNT 132 133 #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t) 134 #define is_polling(x) (x->port_conf->quirks & S3C64XX_SPI_QUIRK_POLL) 135 136 #define RXBUSY (1<<2) 137 #define TXBUSY (1<<3) 138 139 struct s3c64xx_spi_dma_data { 140 struct dma_chan *ch; 141 enum dma_transfer_direction direction; 142 unsigned int dmach; 143 }; 144 145 /** 146 * struct s3c64xx_spi_info - SPI Controller hardware info 147 * @fifo_lvl_mask: Bit-mask for {TX|RX}_FIFO_LVL bits in SPI_STATUS register. 148 * @rx_lvl_offset: Bit offset of RX_FIFO_LVL bits in SPI_STATUS regiter. 149 * @tx_st_done: Bit offset of TX_DONE bit in SPI_STATUS regiter. 150 * @high_speed: True, if the controller supports HIGH_SPEED_EN bit. 151 * @clk_from_cmu: True, if the controller does not include a clock mux and 152 * prescaler unit. 153 * 154 * The Samsung s3c64xx SPI controller are used on various Samsung SoC's but 155 * differ in some aspects such as the size of the fifo and spi bus clock 156 * setup. Such differences are specified to the driver using this structure 157 * which is provided as driver data to the driver. 158 */ 159 struct s3c64xx_spi_port_config { 160 int fifo_lvl_mask[MAX_SPI_PORTS]; 161 int rx_lvl_offset; 162 int tx_st_done; 163 int quirks; 164 bool high_speed; 165 bool clk_from_cmu; 166 }; 167 168 /** 169 * struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver. 170 * @clk: Pointer to the spi clock. 171 * @src_clk: Pointer to the clock used to generate SPI signals. 172 * @master: Pointer to the SPI Protocol master. 173 * @cntrlr_info: Platform specific data for the controller this driver manages. 174 * @tgl_spi: Pointer to the last CS left untoggled by the cs_change hint. 175 * @queue: To log SPI xfer requests. 176 * @lock: Controller specific lock. 177 * @state: Set of FLAGS to indicate status. 178 * @rx_dmach: Controller's DMA channel for Rx. 179 * @tx_dmach: Controller's DMA channel for Tx. 180 * @sfr_start: BUS address of SPI controller regs. 181 * @regs: Pointer to ioremap'ed controller registers. 182 * @irq: interrupt 183 * @xfer_completion: To indicate completion of xfer task. 184 * @cur_mode: Stores the active configuration of the controller. 185 * @cur_bpw: Stores the active bits per word settings. 186 * @cur_speed: Stores the active xfer clock speed. 187 */ 188 struct s3c64xx_spi_driver_data { 189 void __iomem *regs; 190 struct clk *clk; 191 struct clk *src_clk; 192 struct platform_device *pdev; 193 struct spi_master *master; 194 struct s3c64xx_spi_info *cntrlr_info; 195 struct spi_device *tgl_spi; 196 struct list_head queue; 197 spinlock_t lock; 198 unsigned long sfr_start; 199 struct completion xfer_completion; 200 unsigned state; 201 unsigned cur_mode, cur_bpw; 202 unsigned cur_speed; 203 struct s3c64xx_spi_dma_data rx_dma; 204 struct s3c64xx_spi_dma_data tx_dma; 205 #ifdef CONFIG_S3C_DMA 206 struct samsung_dma_ops *ops; 207 #endif 208 struct s3c64xx_spi_port_config *port_conf; 209 unsigned int port_id; 210 unsigned long gpios[4]; 211 bool cs_gpio; 212 }; 213 214 static void flush_fifo(struct s3c64xx_spi_driver_data *sdd) 215 { 216 void __iomem *regs = sdd->regs; 217 unsigned long loops; 218 u32 val; 219 220 writel(0, regs + S3C64XX_SPI_PACKET_CNT); 221 222 val = readl(regs + S3C64XX_SPI_CH_CFG); 223 val &= ~(S3C64XX_SPI_CH_RXCH_ON | S3C64XX_SPI_CH_TXCH_ON); 224 writel(val, regs + S3C64XX_SPI_CH_CFG); 225 226 val = readl(regs + S3C64XX_SPI_CH_CFG); 227 val |= S3C64XX_SPI_CH_SW_RST; 228 val &= ~S3C64XX_SPI_CH_HS_EN; 229 writel(val, regs + S3C64XX_SPI_CH_CFG); 230 231 /* Flush TxFIFO*/ 232 loops = msecs_to_loops(1); 233 do { 234 val = readl(regs + S3C64XX_SPI_STATUS); 235 } while (TX_FIFO_LVL(val, sdd) && loops--); 236 237 if (loops == 0) 238 dev_warn(&sdd->pdev->dev, "Timed out flushing TX FIFO\n"); 239 240 /* Flush RxFIFO*/ 241 loops = msecs_to_loops(1); 242 do { 243 val = readl(regs + S3C64XX_SPI_STATUS); 244 if (RX_FIFO_LVL(val, sdd)) 245 readl(regs + S3C64XX_SPI_RX_DATA); 246 else 247 break; 248 } while (loops--); 249 250 if (loops == 0) 251 dev_warn(&sdd->pdev->dev, "Timed out flushing RX FIFO\n"); 252 253 val = readl(regs + S3C64XX_SPI_CH_CFG); 254 val &= ~S3C64XX_SPI_CH_SW_RST; 255 writel(val, regs + S3C64XX_SPI_CH_CFG); 256 257 val = readl(regs + S3C64XX_SPI_MODE_CFG); 258 val &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON); 259 writel(val, regs + S3C64XX_SPI_MODE_CFG); 260 } 261 262 static void s3c64xx_spi_dmacb(void *data) 263 { 264 struct s3c64xx_spi_driver_data *sdd; 265 struct s3c64xx_spi_dma_data *dma = data; 266 unsigned long flags; 267 268 if (dma->direction == DMA_DEV_TO_MEM) 269 sdd = container_of(data, 270 struct s3c64xx_spi_driver_data, rx_dma); 271 else 272 sdd = container_of(data, 273 struct s3c64xx_spi_driver_data, tx_dma); 274 275 spin_lock_irqsave(&sdd->lock, flags); 276 277 if (dma->direction == DMA_DEV_TO_MEM) { 278 sdd->state &= ~RXBUSY; 279 if (!(sdd->state & TXBUSY)) 280 complete(&sdd->xfer_completion); 281 } else { 282 sdd->state &= ~TXBUSY; 283 if (!(sdd->state & RXBUSY)) 284 complete(&sdd->xfer_completion); 285 } 286 287 spin_unlock_irqrestore(&sdd->lock, flags); 288 } 289 290 #ifdef CONFIG_S3C_DMA 291 /* FIXME: remove this section once arch/arm/mach-s3c64xx uses dmaengine */ 292 293 static struct s3c2410_dma_client s3c64xx_spi_dma_client = { 294 .name = "samsung-spi-dma", 295 }; 296 297 static void prepare_dma(struct s3c64xx_spi_dma_data *dma, 298 unsigned len, dma_addr_t buf) 299 { 300 struct s3c64xx_spi_driver_data *sdd; 301 struct samsung_dma_prep info; 302 struct samsung_dma_config config; 303 304 if (dma->direction == DMA_DEV_TO_MEM) { 305 sdd = container_of((void *)dma, 306 struct s3c64xx_spi_driver_data, rx_dma); 307 config.direction = sdd->rx_dma.direction; 308 config.fifo = sdd->sfr_start + S3C64XX_SPI_RX_DATA; 309 config.width = sdd->cur_bpw / 8; 310 sdd->ops->config((enum dma_ch)sdd->rx_dma.ch, &config); 311 } else { 312 sdd = container_of((void *)dma, 313 struct s3c64xx_spi_driver_data, tx_dma); 314 config.direction = sdd->tx_dma.direction; 315 config.fifo = sdd->sfr_start + S3C64XX_SPI_TX_DATA; 316 config.width = sdd->cur_bpw / 8; 317 sdd->ops->config((enum dma_ch)sdd->tx_dma.ch, &config); 318 } 319 320 info.cap = DMA_SLAVE; 321 info.len = len; 322 info.fp = s3c64xx_spi_dmacb; 323 info.fp_param = dma; 324 info.direction = dma->direction; 325 info.buf = buf; 326 327 sdd->ops->prepare((enum dma_ch)dma->ch, &info); 328 sdd->ops->trigger((enum dma_ch)dma->ch); 329 } 330 331 static int acquire_dma(struct s3c64xx_spi_driver_data *sdd) 332 { 333 struct samsung_dma_req req; 334 struct device *dev = &sdd->pdev->dev; 335 336 sdd->ops = samsung_dma_get_ops(); 337 338 req.cap = DMA_SLAVE; 339 req.client = &s3c64xx_spi_dma_client; 340 341 sdd->rx_dma.ch = (void *)sdd->ops->request(sdd->rx_dma.dmach, &req, dev, "rx"); 342 sdd->tx_dma.ch = (void *)sdd->ops->request(sdd->tx_dma.dmach, &req, dev, "tx"); 343 344 return 1; 345 } 346 347 static int s3c64xx_spi_prepare_transfer(struct spi_master *spi) 348 { 349 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi); 350 351 /* 352 * If DMA resource was not available during 353 * probe, no need to continue with dma requests 354 * else Acquire DMA channels 355 */ 356 while (!is_polling(sdd) && !acquire_dma(sdd)) 357 usleep_range(10000, 11000); 358 359 pm_runtime_get_sync(&sdd->pdev->dev); 360 361 return 0; 362 } 363 364 static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi) 365 { 366 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi); 367 368 /* Free DMA channels */ 369 if (!is_polling(sdd)) { 370 sdd->ops->release((enum dma_ch)sdd->rx_dma.ch, 371 &s3c64xx_spi_dma_client); 372 sdd->ops->release((enum dma_ch)sdd->tx_dma.ch, 373 &s3c64xx_spi_dma_client); 374 } 375 pm_runtime_put(&sdd->pdev->dev); 376 377 return 0; 378 } 379 380 static void s3c64xx_spi_dma_stop(struct s3c64xx_spi_driver_data *sdd, 381 struct s3c64xx_spi_dma_data *dma) 382 { 383 sdd->ops->stop((enum dma_ch)dma->ch); 384 } 385 #else 386 387 static void prepare_dma(struct s3c64xx_spi_dma_data *dma, 388 unsigned len, dma_addr_t buf) 389 { 390 struct s3c64xx_spi_driver_data *sdd; 391 struct dma_slave_config config; 392 struct scatterlist sg; 393 struct dma_async_tx_descriptor *desc; 394 395 if (dma->direction == DMA_DEV_TO_MEM) { 396 sdd = container_of((void *)dma, 397 struct s3c64xx_spi_driver_data, rx_dma); 398 config.direction = dma->direction; 399 config.src_addr = sdd->sfr_start + S3C64XX_SPI_RX_DATA; 400 config.src_addr_width = sdd->cur_bpw / 8; 401 config.src_maxburst = 1; 402 dmaengine_slave_config(dma->ch, &config); 403 } else { 404 sdd = container_of((void *)dma, 405 struct s3c64xx_spi_driver_data, tx_dma); 406 config.direction = dma->direction; 407 config.dst_addr = sdd->sfr_start + S3C64XX_SPI_TX_DATA; 408 config.dst_addr_width = sdd->cur_bpw / 8; 409 config.dst_maxburst = 1; 410 dmaengine_slave_config(dma->ch, &config); 411 } 412 413 sg_init_table(&sg, 1); 414 sg_dma_len(&sg) = len; 415 sg_set_page(&sg, pfn_to_page(PFN_DOWN(buf)), 416 len, offset_in_page(buf)); 417 sg_dma_address(&sg) = buf; 418 419 desc = dmaengine_prep_slave_sg(dma->ch, 420 &sg, 1, dma->direction, DMA_PREP_INTERRUPT); 421 422 desc->callback = s3c64xx_spi_dmacb; 423 desc->callback_param = dma; 424 425 dmaengine_submit(desc); 426 dma_async_issue_pending(dma->ch); 427 } 428 429 static int s3c64xx_spi_prepare_transfer(struct spi_master *spi) 430 { 431 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi); 432 dma_filter_fn filter = sdd->cntrlr_info->filter; 433 struct device *dev = &sdd->pdev->dev; 434 dma_cap_mask_t mask; 435 int ret; 436 437 if (is_polling(sdd)) 438 return 0; 439 440 dma_cap_zero(mask); 441 dma_cap_set(DMA_SLAVE, mask); 442 443 /* Acquire DMA channels */ 444 sdd->rx_dma.ch = dma_request_slave_channel_compat(mask, filter, 445 (void*)sdd->rx_dma.dmach, dev, "rx"); 446 if (!sdd->rx_dma.ch) { 447 dev_err(dev, "Failed to get RX DMA channel\n"); 448 ret = -EBUSY; 449 goto out; 450 } 451 452 sdd->tx_dma.ch = dma_request_slave_channel_compat(mask, filter, 453 (void*)sdd->tx_dma.dmach, dev, "tx"); 454 if (!sdd->tx_dma.ch) { 455 dev_err(dev, "Failed to get TX DMA channel\n"); 456 ret = -EBUSY; 457 goto out_rx; 458 } 459 460 ret = pm_runtime_get_sync(&sdd->pdev->dev); 461 if (ret < 0) { 462 dev_err(dev, "Failed to enable device: %d\n", ret); 463 goto out_tx; 464 } 465 466 return 0; 467 468 out_tx: 469 dma_release_channel(sdd->tx_dma.ch); 470 out_rx: 471 dma_release_channel(sdd->rx_dma.ch); 472 out: 473 return ret; 474 } 475 476 static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi) 477 { 478 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi); 479 480 /* Free DMA channels */ 481 if (!is_polling(sdd)) { 482 dma_release_channel(sdd->rx_dma.ch); 483 dma_release_channel(sdd->tx_dma.ch); 484 } 485 486 pm_runtime_put(&sdd->pdev->dev); 487 return 0; 488 } 489 490 static void s3c64xx_spi_dma_stop(struct s3c64xx_spi_driver_data *sdd, 491 struct s3c64xx_spi_dma_data *dma) 492 { 493 dmaengine_terminate_all(dma->ch); 494 } 495 #endif 496 497 static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, 498 struct spi_device *spi, 499 struct spi_transfer *xfer, int dma_mode) 500 { 501 void __iomem *regs = sdd->regs; 502 u32 modecfg, chcfg; 503 504 modecfg = readl(regs + S3C64XX_SPI_MODE_CFG); 505 modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON); 506 507 chcfg = readl(regs + S3C64XX_SPI_CH_CFG); 508 chcfg &= ~S3C64XX_SPI_CH_TXCH_ON; 509 510 if (dma_mode) { 511 chcfg &= ~S3C64XX_SPI_CH_RXCH_ON; 512 } else { 513 /* Always shift in data in FIFO, even if xfer is Tx only, 514 * this helps setting PCKT_CNT value for generating clocks 515 * as exactly needed. 516 */ 517 chcfg |= S3C64XX_SPI_CH_RXCH_ON; 518 writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff) 519 | S3C64XX_SPI_PACKET_CNT_EN, 520 regs + S3C64XX_SPI_PACKET_CNT); 521 } 522 523 if (xfer->tx_buf != NULL) { 524 sdd->state |= TXBUSY; 525 chcfg |= S3C64XX_SPI_CH_TXCH_ON; 526 if (dma_mode) { 527 modecfg |= S3C64XX_SPI_MODE_TXDMA_ON; 528 prepare_dma(&sdd->tx_dma, xfer->len, xfer->tx_dma); 529 } else { 530 switch (sdd->cur_bpw) { 531 case 32: 532 iowrite32_rep(regs + S3C64XX_SPI_TX_DATA, 533 xfer->tx_buf, xfer->len / 4); 534 break; 535 case 16: 536 iowrite16_rep(regs + S3C64XX_SPI_TX_DATA, 537 xfer->tx_buf, xfer->len / 2); 538 break; 539 default: 540 iowrite8_rep(regs + S3C64XX_SPI_TX_DATA, 541 xfer->tx_buf, xfer->len); 542 break; 543 } 544 } 545 } 546 547 if (xfer->rx_buf != NULL) { 548 sdd->state |= RXBUSY; 549 550 if (sdd->port_conf->high_speed && sdd->cur_speed >= 30000000UL 551 && !(sdd->cur_mode & SPI_CPHA)) 552 chcfg |= S3C64XX_SPI_CH_HS_EN; 553 554 if (dma_mode) { 555 modecfg |= S3C64XX_SPI_MODE_RXDMA_ON; 556 chcfg |= S3C64XX_SPI_CH_RXCH_ON; 557 writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff) 558 | S3C64XX_SPI_PACKET_CNT_EN, 559 regs + S3C64XX_SPI_PACKET_CNT); 560 prepare_dma(&sdd->rx_dma, xfer->len, xfer->rx_dma); 561 } 562 } 563 564 writel(modecfg, regs + S3C64XX_SPI_MODE_CFG); 565 writel(chcfg, regs + S3C64XX_SPI_CH_CFG); 566 } 567 568 static inline void enable_cs(struct s3c64xx_spi_driver_data *sdd, 569 struct spi_device *spi) 570 { 571 struct s3c64xx_spi_csinfo *cs; 572 573 if (sdd->tgl_spi != NULL) { /* If last device toggled after mssg */ 574 if (sdd->tgl_spi != spi) { /* if last mssg on diff device */ 575 /* Deselect the last toggled device */ 576 cs = sdd->tgl_spi->controller_data; 577 if (sdd->cs_gpio) 578 gpio_set_value(cs->line, 579 spi->mode & SPI_CS_HIGH ? 0 : 1); 580 } 581 sdd->tgl_spi = NULL; 582 } 583 584 cs = spi->controller_data; 585 if (sdd->cs_gpio) 586 gpio_set_value(cs->line, spi->mode & SPI_CS_HIGH ? 1 : 0); 587 588 /* Start the signals */ 589 writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL); 590 } 591 592 static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd, 593 int timeout_ms) 594 { 595 void __iomem *regs = sdd->regs; 596 unsigned long val = 1; 597 u32 status; 598 599 /* max fifo depth available */ 600 u32 max_fifo = (FIFO_LVL_MASK(sdd) >> 1) + 1; 601 602 if (timeout_ms) 603 val = msecs_to_loops(timeout_ms); 604 605 do { 606 status = readl(regs + S3C64XX_SPI_STATUS); 607 } while (RX_FIFO_LVL(status, sdd) < max_fifo && --val); 608 609 /* return the actual received data length */ 610 return RX_FIFO_LVL(status, sdd); 611 } 612 613 static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd, 614 struct spi_transfer *xfer, int dma_mode) 615 { 616 void __iomem *regs = sdd->regs; 617 unsigned long val; 618 int ms; 619 620 /* millisecs to xfer 'len' bytes @ 'cur_speed' */ 621 ms = xfer->len * 8 * 1000 / sdd->cur_speed; 622 ms += 10; /* some tolerance */ 623 624 if (dma_mode) { 625 val = msecs_to_jiffies(ms) + 10; 626 val = wait_for_completion_timeout(&sdd->xfer_completion, val); 627 } else { 628 u32 status; 629 val = msecs_to_loops(ms); 630 do { 631 status = readl(regs + S3C64XX_SPI_STATUS); 632 } while (RX_FIFO_LVL(status, sdd) < xfer->len && --val); 633 } 634 635 if (dma_mode) { 636 u32 status; 637 638 /* 639 * If the previous xfer was completed within timeout, then 640 * proceed further else return -EIO. 641 * DmaTx returns after simply writing data in the FIFO, 642 * w/o waiting for real transmission on the bus to finish. 643 * DmaRx returns only after Dma read data from FIFO which 644 * needs bus transmission to finish, so we don't worry if 645 * Xfer involved Rx(with or without Tx). 646 */ 647 if (val && !xfer->rx_buf) { 648 val = msecs_to_loops(10); 649 status = readl(regs + S3C64XX_SPI_STATUS); 650 while ((TX_FIFO_LVL(status, sdd) 651 || !S3C64XX_SPI_ST_TX_DONE(status, sdd)) 652 && --val) { 653 cpu_relax(); 654 status = readl(regs + S3C64XX_SPI_STATUS); 655 } 656 657 } 658 659 /* If timed out while checking rx/tx status return error */ 660 if (!val) 661 return -EIO; 662 } else { 663 int loops; 664 u32 cpy_len; 665 u8 *buf; 666 667 /* If it was only Tx */ 668 if (!xfer->rx_buf) { 669 sdd->state &= ~TXBUSY; 670 return 0; 671 } 672 673 /* 674 * If the receive length is bigger than the controller fifo 675 * size, calculate the loops and read the fifo as many times. 676 * loops = length / max fifo size (calculated by using the 677 * fifo mask). 678 * For any size less than the fifo size the below code is 679 * executed atleast once. 680 */ 681 loops = xfer->len / ((FIFO_LVL_MASK(sdd) >> 1) + 1); 682 buf = xfer->rx_buf; 683 do { 684 /* wait for data to be received in the fifo */ 685 cpy_len = s3c64xx_spi_wait_for_timeout(sdd, 686 (loops ? ms : 0)); 687 688 switch (sdd->cur_bpw) { 689 case 32: 690 ioread32_rep(regs + S3C64XX_SPI_RX_DATA, 691 buf, cpy_len / 4); 692 break; 693 case 16: 694 ioread16_rep(regs + S3C64XX_SPI_RX_DATA, 695 buf, cpy_len / 2); 696 break; 697 default: 698 ioread8_rep(regs + S3C64XX_SPI_RX_DATA, 699 buf, cpy_len); 700 break; 701 } 702 703 buf = buf + cpy_len; 704 } while (loops--); 705 sdd->state &= ~RXBUSY; 706 } 707 708 return 0; 709 } 710 711 static inline void disable_cs(struct s3c64xx_spi_driver_data *sdd, 712 struct spi_device *spi) 713 { 714 struct s3c64xx_spi_csinfo *cs = spi->controller_data; 715 716 if (sdd->tgl_spi == spi) 717 sdd->tgl_spi = NULL; 718 719 if (sdd->cs_gpio) 720 gpio_set_value(cs->line, spi->mode & SPI_CS_HIGH ? 0 : 1); 721 722 /* Quiese the signals */ 723 writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL); 724 } 725 726 static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) 727 { 728 void __iomem *regs = sdd->regs; 729 u32 val; 730 731 /* Disable Clock */ 732 if (sdd->port_conf->clk_from_cmu) { 733 clk_disable_unprepare(sdd->src_clk); 734 } else { 735 val = readl(regs + S3C64XX_SPI_CLK_CFG); 736 val &= ~S3C64XX_SPI_ENCLK_ENABLE; 737 writel(val, regs + S3C64XX_SPI_CLK_CFG); 738 } 739 740 /* Set Polarity and Phase */ 741 val = readl(regs + S3C64XX_SPI_CH_CFG); 742 val &= ~(S3C64XX_SPI_CH_SLAVE | 743 S3C64XX_SPI_CPOL_L | 744 S3C64XX_SPI_CPHA_B); 745 746 if (sdd->cur_mode & SPI_CPOL) 747 val |= S3C64XX_SPI_CPOL_L; 748 749 if (sdd->cur_mode & SPI_CPHA) 750 val |= S3C64XX_SPI_CPHA_B; 751 752 writel(val, regs + S3C64XX_SPI_CH_CFG); 753 754 /* Set Channel & DMA Mode */ 755 val = readl(regs + S3C64XX_SPI_MODE_CFG); 756 val &= ~(S3C64XX_SPI_MODE_BUS_TSZ_MASK 757 | S3C64XX_SPI_MODE_CH_TSZ_MASK); 758 759 switch (sdd->cur_bpw) { 760 case 32: 761 val |= S3C64XX_SPI_MODE_BUS_TSZ_WORD; 762 val |= S3C64XX_SPI_MODE_CH_TSZ_WORD; 763 break; 764 case 16: 765 val |= S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD; 766 val |= S3C64XX_SPI_MODE_CH_TSZ_HALFWORD; 767 break; 768 default: 769 val |= S3C64XX_SPI_MODE_BUS_TSZ_BYTE; 770 val |= S3C64XX_SPI_MODE_CH_TSZ_BYTE; 771 break; 772 } 773 774 writel(val, regs + S3C64XX_SPI_MODE_CFG); 775 776 if (sdd->port_conf->clk_from_cmu) { 777 /* Configure Clock */ 778 /* There is half-multiplier before the SPI */ 779 clk_set_rate(sdd->src_clk, sdd->cur_speed * 2); 780 /* Enable Clock */ 781 clk_prepare_enable(sdd->src_clk); 782 } else { 783 /* Configure Clock */ 784 val = readl(regs + S3C64XX_SPI_CLK_CFG); 785 val &= ~S3C64XX_SPI_PSR_MASK; 786 val |= ((clk_get_rate(sdd->src_clk) / sdd->cur_speed / 2 - 1) 787 & S3C64XX_SPI_PSR_MASK); 788 writel(val, regs + S3C64XX_SPI_CLK_CFG); 789 790 /* Enable Clock */ 791 val = readl(regs + S3C64XX_SPI_CLK_CFG); 792 val |= S3C64XX_SPI_ENCLK_ENABLE; 793 writel(val, regs + S3C64XX_SPI_CLK_CFG); 794 } 795 } 796 797 #define XFER_DMAADDR_INVALID DMA_BIT_MASK(32) 798 799 static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd, 800 struct spi_message *msg) 801 { 802 struct device *dev = &sdd->pdev->dev; 803 struct spi_transfer *xfer; 804 805 if (is_polling(sdd) || msg->is_dma_mapped) 806 return 0; 807 808 /* First mark all xfer unmapped */ 809 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 810 xfer->rx_dma = XFER_DMAADDR_INVALID; 811 xfer->tx_dma = XFER_DMAADDR_INVALID; 812 } 813 814 /* Map until end or first fail */ 815 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 816 817 if (xfer->len <= ((FIFO_LVL_MASK(sdd) >> 1) + 1)) 818 continue; 819 820 if (xfer->tx_buf != NULL) { 821 xfer->tx_dma = dma_map_single(dev, 822 (void *)xfer->tx_buf, xfer->len, 823 DMA_TO_DEVICE); 824 if (dma_mapping_error(dev, xfer->tx_dma)) { 825 dev_err(dev, "dma_map_single Tx failed\n"); 826 xfer->tx_dma = XFER_DMAADDR_INVALID; 827 return -ENOMEM; 828 } 829 } 830 831 if (xfer->rx_buf != NULL) { 832 xfer->rx_dma = dma_map_single(dev, xfer->rx_buf, 833 xfer->len, DMA_FROM_DEVICE); 834 if (dma_mapping_error(dev, xfer->rx_dma)) { 835 dev_err(dev, "dma_map_single Rx failed\n"); 836 dma_unmap_single(dev, xfer->tx_dma, 837 xfer->len, DMA_TO_DEVICE); 838 xfer->tx_dma = XFER_DMAADDR_INVALID; 839 xfer->rx_dma = XFER_DMAADDR_INVALID; 840 return -ENOMEM; 841 } 842 } 843 } 844 845 return 0; 846 } 847 848 static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd, 849 struct spi_message *msg) 850 { 851 struct device *dev = &sdd->pdev->dev; 852 struct spi_transfer *xfer; 853 854 if (is_polling(sdd) || msg->is_dma_mapped) 855 return; 856 857 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 858 859 if (xfer->len <= ((FIFO_LVL_MASK(sdd) >> 1) + 1)) 860 continue; 861 862 if (xfer->rx_buf != NULL 863 && xfer->rx_dma != XFER_DMAADDR_INVALID) 864 dma_unmap_single(dev, xfer->rx_dma, 865 xfer->len, DMA_FROM_DEVICE); 866 867 if (xfer->tx_buf != NULL 868 && xfer->tx_dma != XFER_DMAADDR_INVALID) 869 dma_unmap_single(dev, xfer->tx_dma, 870 xfer->len, DMA_TO_DEVICE); 871 } 872 } 873 874 static int s3c64xx_spi_transfer_one_message(struct spi_master *master, 875 struct spi_message *msg) 876 { 877 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); 878 struct spi_device *spi = msg->spi; 879 struct s3c64xx_spi_csinfo *cs = spi->controller_data; 880 struct spi_transfer *xfer; 881 int status = 0, cs_toggle = 0; 882 u32 speed; 883 u8 bpw; 884 885 /* If Master's(controller) state differs from that needed by Slave */ 886 if (sdd->cur_speed != spi->max_speed_hz 887 || sdd->cur_mode != spi->mode 888 || sdd->cur_bpw != spi->bits_per_word) { 889 sdd->cur_bpw = spi->bits_per_word; 890 sdd->cur_speed = spi->max_speed_hz; 891 sdd->cur_mode = spi->mode; 892 s3c64xx_spi_config(sdd); 893 } 894 895 /* Map all the transfers if needed */ 896 if (s3c64xx_spi_map_mssg(sdd, msg)) { 897 dev_err(&spi->dev, 898 "Xfer: Unable to map message buffers!\n"); 899 status = -ENOMEM; 900 goto out; 901 } 902 903 /* Configure feedback delay */ 904 writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK); 905 906 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 907 908 unsigned long flags; 909 int use_dma; 910 911 INIT_COMPLETION(sdd->xfer_completion); 912 913 /* Only BPW and Speed may change across transfers */ 914 bpw = xfer->bits_per_word; 915 speed = xfer->speed_hz ? : spi->max_speed_hz; 916 917 if (xfer->len % (bpw / 8)) { 918 dev_err(&spi->dev, 919 "Xfer length(%u) not a multiple of word size(%u)\n", 920 xfer->len, bpw / 8); 921 status = -EIO; 922 goto out; 923 } 924 925 if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) { 926 sdd->cur_bpw = bpw; 927 sdd->cur_speed = speed; 928 s3c64xx_spi_config(sdd); 929 } 930 931 /* Polling method for xfers not bigger than FIFO capacity */ 932 use_dma = 0; 933 if (!is_polling(sdd) && 934 (sdd->rx_dma.ch && sdd->tx_dma.ch && 935 (xfer->len > ((FIFO_LVL_MASK(sdd) >> 1) + 1)))) 936 use_dma = 1; 937 938 spin_lock_irqsave(&sdd->lock, flags); 939 940 /* Pending only which is to be done */ 941 sdd->state &= ~RXBUSY; 942 sdd->state &= ~TXBUSY; 943 944 enable_datapath(sdd, spi, xfer, use_dma); 945 946 /* Slave Select */ 947 enable_cs(sdd, spi); 948 949 spin_unlock_irqrestore(&sdd->lock, flags); 950 951 status = wait_for_xfer(sdd, xfer, use_dma); 952 953 if (status) { 954 dev_err(&spi->dev, "I/O Error: rx-%d tx-%d res:rx-%c tx-%c len-%d\n", 955 xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0, 956 (sdd->state & RXBUSY) ? 'f' : 'p', 957 (sdd->state & TXBUSY) ? 'f' : 'p', 958 xfer->len); 959 960 if (use_dma) { 961 if (xfer->tx_buf != NULL 962 && (sdd->state & TXBUSY)) 963 s3c64xx_spi_dma_stop(sdd, &sdd->tx_dma); 964 if (xfer->rx_buf != NULL 965 && (sdd->state & RXBUSY)) 966 s3c64xx_spi_dma_stop(sdd, &sdd->rx_dma); 967 } 968 969 goto out; 970 } 971 972 if (xfer->delay_usecs) 973 udelay(xfer->delay_usecs); 974 975 if (xfer->cs_change) { 976 /* Hint that the next mssg is gonna be 977 for the same device */ 978 if (list_is_last(&xfer->transfer_list, 979 &msg->transfers)) 980 cs_toggle = 1; 981 } 982 983 msg->actual_length += xfer->len; 984 985 flush_fifo(sdd); 986 } 987 988 out: 989 if (!cs_toggle || status) 990 disable_cs(sdd, spi); 991 else 992 sdd->tgl_spi = spi; 993 994 s3c64xx_spi_unmap_mssg(sdd, msg); 995 996 msg->status = status; 997 998 spi_finalize_current_message(master); 999 1000 return 0; 1001 } 1002 1003 static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata( 1004 struct spi_device *spi) 1005 { 1006 struct s3c64xx_spi_csinfo *cs; 1007 struct device_node *slave_np, *data_np = NULL; 1008 struct s3c64xx_spi_driver_data *sdd; 1009 u32 fb_delay = 0; 1010 1011 sdd = spi_master_get_devdata(spi->master); 1012 slave_np = spi->dev.of_node; 1013 if (!slave_np) { 1014 dev_err(&spi->dev, "device node not found\n"); 1015 return ERR_PTR(-EINVAL); 1016 } 1017 1018 data_np = of_get_child_by_name(slave_np, "controller-data"); 1019 if (!data_np) { 1020 dev_err(&spi->dev, "child node 'controller-data' not found\n"); 1021 return ERR_PTR(-EINVAL); 1022 } 1023 1024 cs = kzalloc(sizeof(*cs), GFP_KERNEL); 1025 if (!cs) { 1026 dev_err(&spi->dev, "could not allocate memory for controller data\n"); 1027 of_node_put(data_np); 1028 return ERR_PTR(-ENOMEM); 1029 } 1030 1031 /* The CS line is asserted/deasserted by the gpio pin */ 1032 if (sdd->cs_gpio) 1033 cs->line = of_get_named_gpio(data_np, "cs-gpio", 0); 1034 1035 if (!gpio_is_valid(cs->line)) { 1036 dev_err(&spi->dev, "chip select gpio is not specified or invalid\n"); 1037 kfree(cs); 1038 of_node_put(data_np); 1039 return ERR_PTR(-EINVAL); 1040 } 1041 1042 of_property_read_u32(data_np, "samsung,spi-feedback-delay", &fb_delay); 1043 cs->fb_delay = fb_delay; 1044 of_node_put(data_np); 1045 return cs; 1046 } 1047 1048 /* 1049 * Here we only check the validity of requested configuration 1050 * and save the configuration in a local data-structure. 1051 * The controller is actually configured only just before we 1052 * get a message to transfer. 1053 */ 1054 static int s3c64xx_spi_setup(struct spi_device *spi) 1055 { 1056 struct s3c64xx_spi_csinfo *cs = spi->controller_data; 1057 struct s3c64xx_spi_driver_data *sdd; 1058 struct s3c64xx_spi_info *sci; 1059 struct spi_message *msg; 1060 unsigned long flags; 1061 int err; 1062 1063 sdd = spi_master_get_devdata(spi->master); 1064 if (!cs && spi->dev.of_node) { 1065 cs = s3c64xx_get_slave_ctrldata(spi); 1066 spi->controller_data = cs; 1067 } 1068 1069 if (IS_ERR_OR_NULL(cs)) { 1070 dev_err(&spi->dev, "No CS for SPI(%d)\n", spi->chip_select); 1071 return -ENODEV; 1072 } 1073 1074 /* Request gpio only if cs line is asserted by gpio pins */ 1075 if (sdd->cs_gpio) { 1076 err = gpio_request_one(cs->line, GPIOF_OUT_INIT_HIGH, 1077 dev_name(&spi->dev)); 1078 if (err) { 1079 dev_err(&spi->dev, 1080 "Failed to get /CS gpio [%d]: %d\n", 1081 cs->line, err); 1082 goto err_gpio_req; 1083 } 1084 } 1085 1086 if (!spi_get_ctldata(spi)) 1087 spi_set_ctldata(spi, cs); 1088 1089 sci = sdd->cntrlr_info; 1090 1091 spin_lock_irqsave(&sdd->lock, flags); 1092 1093 list_for_each_entry(msg, &sdd->queue, queue) { 1094 /* Is some mssg is already queued for this device */ 1095 if (msg->spi == spi) { 1096 dev_err(&spi->dev, 1097 "setup: attempt while mssg in queue!\n"); 1098 spin_unlock_irqrestore(&sdd->lock, flags); 1099 err = -EBUSY; 1100 goto err_msgq; 1101 } 1102 } 1103 1104 spin_unlock_irqrestore(&sdd->lock, flags); 1105 1106 pm_runtime_get_sync(&sdd->pdev->dev); 1107 1108 /* Check if we can provide the requested rate */ 1109 if (!sdd->port_conf->clk_from_cmu) { 1110 u32 psr, speed; 1111 1112 /* Max possible */ 1113 speed = clk_get_rate(sdd->src_clk) / 2 / (0 + 1); 1114 1115 if (spi->max_speed_hz > speed) 1116 spi->max_speed_hz = speed; 1117 1118 psr = clk_get_rate(sdd->src_clk) / 2 / spi->max_speed_hz - 1; 1119 psr &= S3C64XX_SPI_PSR_MASK; 1120 if (psr == S3C64XX_SPI_PSR_MASK) 1121 psr--; 1122 1123 speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1); 1124 if (spi->max_speed_hz < speed) { 1125 if (psr+1 < S3C64XX_SPI_PSR_MASK) { 1126 psr++; 1127 } else { 1128 err = -EINVAL; 1129 goto setup_exit; 1130 } 1131 } 1132 1133 speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1); 1134 if (spi->max_speed_hz >= speed) { 1135 spi->max_speed_hz = speed; 1136 } else { 1137 dev_err(&spi->dev, "Can't set %dHz transfer speed\n", 1138 spi->max_speed_hz); 1139 err = -EINVAL; 1140 goto setup_exit; 1141 } 1142 } 1143 1144 pm_runtime_put(&sdd->pdev->dev); 1145 disable_cs(sdd, spi); 1146 return 0; 1147 1148 setup_exit: 1149 /* setup() returns with device de-selected */ 1150 disable_cs(sdd, spi); 1151 1152 err_msgq: 1153 gpio_free(cs->line); 1154 spi_set_ctldata(spi, NULL); 1155 1156 err_gpio_req: 1157 if (spi->dev.of_node) 1158 kfree(cs); 1159 1160 return err; 1161 } 1162 1163 static void s3c64xx_spi_cleanup(struct spi_device *spi) 1164 { 1165 struct s3c64xx_spi_csinfo *cs = spi_get_ctldata(spi); 1166 struct s3c64xx_spi_driver_data *sdd; 1167 1168 sdd = spi_master_get_devdata(spi->master); 1169 if (cs && sdd->cs_gpio) { 1170 gpio_free(cs->line); 1171 if (spi->dev.of_node) 1172 kfree(cs); 1173 } 1174 spi_set_ctldata(spi, NULL); 1175 } 1176 1177 static irqreturn_t s3c64xx_spi_irq(int irq, void *data) 1178 { 1179 struct s3c64xx_spi_driver_data *sdd = data; 1180 struct spi_master *spi = sdd->master; 1181 unsigned int val, clr = 0; 1182 1183 val = readl(sdd->regs + S3C64XX_SPI_STATUS); 1184 1185 if (val & S3C64XX_SPI_ST_RX_OVERRUN_ERR) { 1186 clr = S3C64XX_SPI_PND_RX_OVERRUN_CLR; 1187 dev_err(&spi->dev, "RX overrun\n"); 1188 } 1189 if (val & S3C64XX_SPI_ST_RX_UNDERRUN_ERR) { 1190 clr |= S3C64XX_SPI_PND_RX_UNDERRUN_CLR; 1191 dev_err(&spi->dev, "RX underrun\n"); 1192 } 1193 if (val & S3C64XX_SPI_ST_TX_OVERRUN_ERR) { 1194 clr |= S3C64XX_SPI_PND_TX_OVERRUN_CLR; 1195 dev_err(&spi->dev, "TX overrun\n"); 1196 } 1197 if (val & S3C64XX_SPI_ST_TX_UNDERRUN_ERR) { 1198 clr |= S3C64XX_SPI_PND_TX_UNDERRUN_CLR; 1199 dev_err(&spi->dev, "TX underrun\n"); 1200 } 1201 1202 /* Clear the pending irq by setting and then clearing it */ 1203 writel(clr, sdd->regs + S3C64XX_SPI_PENDING_CLR); 1204 writel(0, sdd->regs + S3C64XX_SPI_PENDING_CLR); 1205 1206 return IRQ_HANDLED; 1207 } 1208 1209 static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel) 1210 { 1211 struct s3c64xx_spi_info *sci = sdd->cntrlr_info; 1212 void __iomem *regs = sdd->regs; 1213 unsigned int val; 1214 1215 sdd->cur_speed = 0; 1216 1217 writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL); 1218 1219 /* Disable Interrupts - we use Polling if not DMA mode */ 1220 writel(0, regs + S3C64XX_SPI_INT_EN); 1221 1222 if (!sdd->port_conf->clk_from_cmu) 1223 writel(sci->src_clk_nr << S3C64XX_SPI_CLKSEL_SRCSHFT, 1224 regs + S3C64XX_SPI_CLK_CFG); 1225 writel(0, regs + S3C64XX_SPI_MODE_CFG); 1226 writel(0, regs + S3C64XX_SPI_PACKET_CNT); 1227 1228 /* Clear any irq pending bits, should set and clear the bits */ 1229 val = S3C64XX_SPI_PND_RX_OVERRUN_CLR | 1230 S3C64XX_SPI_PND_RX_UNDERRUN_CLR | 1231 S3C64XX_SPI_PND_TX_OVERRUN_CLR | 1232 S3C64XX_SPI_PND_TX_UNDERRUN_CLR; 1233 writel(val, regs + S3C64XX_SPI_PENDING_CLR); 1234 writel(0, regs + S3C64XX_SPI_PENDING_CLR); 1235 1236 writel(0, regs + S3C64XX_SPI_SWAP_CFG); 1237 1238 val = readl(regs + S3C64XX_SPI_MODE_CFG); 1239 val &= ~S3C64XX_SPI_MODE_4BURST; 1240 val &= ~(S3C64XX_SPI_MAX_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF); 1241 val |= (S3C64XX_SPI_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF); 1242 writel(val, regs + S3C64XX_SPI_MODE_CFG); 1243 1244 flush_fifo(sdd); 1245 } 1246 1247 #ifdef CONFIG_OF 1248 static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev) 1249 { 1250 struct s3c64xx_spi_info *sci; 1251 u32 temp; 1252 1253 sci = devm_kzalloc(dev, sizeof(*sci), GFP_KERNEL); 1254 if (!sci) { 1255 dev_err(dev, "memory allocation for spi_info failed\n"); 1256 return ERR_PTR(-ENOMEM); 1257 } 1258 1259 if (of_property_read_u32(dev->of_node, "samsung,spi-src-clk", &temp)) { 1260 dev_warn(dev, "spi bus clock parent not specified, using clock at index 0 as parent\n"); 1261 sci->src_clk_nr = 0; 1262 } else { 1263 sci->src_clk_nr = temp; 1264 } 1265 1266 if (of_property_read_u32(dev->of_node, "num-cs", &temp)) { 1267 dev_warn(dev, "number of chip select lines not specified, assuming 1 chip select line\n"); 1268 sci->num_cs = 1; 1269 } else { 1270 sci->num_cs = temp; 1271 } 1272 1273 return sci; 1274 } 1275 #else 1276 static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev) 1277 { 1278 return dev->platform_data; 1279 } 1280 #endif 1281 1282 static const struct of_device_id s3c64xx_spi_dt_match[]; 1283 1284 static inline struct s3c64xx_spi_port_config *s3c64xx_spi_get_port_config( 1285 struct platform_device *pdev) 1286 { 1287 #ifdef CONFIG_OF 1288 if (pdev->dev.of_node) { 1289 const struct of_device_id *match; 1290 match = of_match_node(s3c64xx_spi_dt_match, pdev->dev.of_node); 1291 return (struct s3c64xx_spi_port_config *)match->data; 1292 } 1293 #endif 1294 return (struct s3c64xx_spi_port_config *) 1295 platform_get_device_id(pdev)->driver_data; 1296 } 1297 1298 static int s3c64xx_spi_probe(struct platform_device *pdev) 1299 { 1300 struct resource *mem_res; 1301 struct resource *res; 1302 struct s3c64xx_spi_driver_data *sdd; 1303 struct s3c64xx_spi_info *sci = pdev->dev.platform_data; 1304 struct spi_master *master; 1305 int ret, irq; 1306 char clk_name[16]; 1307 1308 if (!sci && pdev->dev.of_node) { 1309 sci = s3c64xx_spi_parse_dt(&pdev->dev); 1310 if (IS_ERR(sci)) 1311 return PTR_ERR(sci); 1312 } 1313 1314 if (!sci) { 1315 dev_err(&pdev->dev, "platform_data missing!\n"); 1316 return -ENODEV; 1317 } 1318 1319 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1320 if (mem_res == NULL) { 1321 dev_err(&pdev->dev, "Unable to get SPI MEM resource\n"); 1322 return -ENXIO; 1323 } 1324 1325 irq = platform_get_irq(pdev, 0); 1326 if (irq < 0) { 1327 dev_warn(&pdev->dev, "Failed to get IRQ: %d\n", irq); 1328 return irq; 1329 } 1330 1331 master = spi_alloc_master(&pdev->dev, 1332 sizeof(struct s3c64xx_spi_driver_data)); 1333 if (master == NULL) { 1334 dev_err(&pdev->dev, "Unable to allocate SPI Master\n"); 1335 return -ENOMEM; 1336 } 1337 1338 platform_set_drvdata(pdev, master); 1339 1340 sdd = spi_master_get_devdata(master); 1341 sdd->port_conf = s3c64xx_spi_get_port_config(pdev); 1342 sdd->master = master; 1343 sdd->cntrlr_info = sci; 1344 sdd->pdev = pdev; 1345 sdd->sfr_start = mem_res->start; 1346 sdd->cs_gpio = true; 1347 if (pdev->dev.of_node) { 1348 if (!of_find_property(pdev->dev.of_node, "cs-gpio", NULL)) 1349 sdd->cs_gpio = false; 1350 1351 ret = of_alias_get_id(pdev->dev.of_node, "spi"); 1352 if (ret < 0) { 1353 dev_err(&pdev->dev, "failed to get alias id, errno %d\n", 1354 ret); 1355 goto err0; 1356 } 1357 sdd->port_id = ret; 1358 } else { 1359 sdd->port_id = pdev->id; 1360 } 1361 1362 sdd->cur_bpw = 8; 1363 1364 if (!sdd->pdev->dev.of_node) { 1365 res = platform_get_resource(pdev, IORESOURCE_DMA, 0); 1366 if (!res) { 1367 dev_warn(&pdev->dev, "Unable to get SPI tx dma " 1368 "resource. Switching to poll mode\n"); 1369 sdd->port_conf->quirks = S3C64XX_SPI_QUIRK_POLL; 1370 } else 1371 sdd->tx_dma.dmach = res->start; 1372 1373 res = platform_get_resource(pdev, IORESOURCE_DMA, 1); 1374 if (!res) { 1375 dev_warn(&pdev->dev, "Unable to get SPI rx dma " 1376 "resource. Switching to poll mode\n"); 1377 sdd->port_conf->quirks = S3C64XX_SPI_QUIRK_POLL; 1378 } else 1379 sdd->rx_dma.dmach = res->start; 1380 } 1381 1382 sdd->tx_dma.direction = DMA_MEM_TO_DEV; 1383 sdd->rx_dma.direction = DMA_DEV_TO_MEM; 1384 1385 master->dev.of_node = pdev->dev.of_node; 1386 master->bus_num = sdd->port_id; 1387 master->setup = s3c64xx_spi_setup; 1388 master->cleanup = s3c64xx_spi_cleanup; 1389 master->prepare_transfer_hardware = s3c64xx_spi_prepare_transfer; 1390 master->transfer_one_message = s3c64xx_spi_transfer_one_message; 1391 master->unprepare_transfer_hardware = s3c64xx_spi_unprepare_transfer; 1392 master->num_chipselect = sci->num_cs; 1393 master->dma_alignment = 8; 1394 master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) | 1395 SPI_BPW_MASK(8); 1396 /* the spi->mode bits understood by this driver: */ 1397 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; 1398 1399 sdd->regs = devm_ioremap_resource(&pdev->dev, mem_res); 1400 if (IS_ERR(sdd->regs)) { 1401 ret = PTR_ERR(sdd->regs); 1402 goto err0; 1403 } 1404 1405 if (sci->cfg_gpio && sci->cfg_gpio()) { 1406 dev_err(&pdev->dev, "Unable to config gpio\n"); 1407 ret = -EBUSY; 1408 goto err0; 1409 } 1410 1411 /* Setup clocks */ 1412 sdd->clk = devm_clk_get(&pdev->dev, "spi"); 1413 if (IS_ERR(sdd->clk)) { 1414 dev_err(&pdev->dev, "Unable to acquire clock 'spi'\n"); 1415 ret = PTR_ERR(sdd->clk); 1416 goto err0; 1417 } 1418 1419 if (clk_prepare_enable(sdd->clk)) { 1420 dev_err(&pdev->dev, "Couldn't enable clock 'spi'\n"); 1421 ret = -EBUSY; 1422 goto err0; 1423 } 1424 1425 sprintf(clk_name, "spi_busclk%d", sci->src_clk_nr); 1426 sdd->src_clk = devm_clk_get(&pdev->dev, clk_name); 1427 if (IS_ERR(sdd->src_clk)) { 1428 dev_err(&pdev->dev, 1429 "Unable to acquire clock '%s'\n", clk_name); 1430 ret = PTR_ERR(sdd->src_clk); 1431 goto err2; 1432 } 1433 1434 if (clk_prepare_enable(sdd->src_clk)) { 1435 dev_err(&pdev->dev, "Couldn't enable clock '%s'\n", clk_name); 1436 ret = -EBUSY; 1437 goto err2; 1438 } 1439 1440 /* Setup Deufult Mode */ 1441 s3c64xx_spi_hwinit(sdd, sdd->port_id); 1442 1443 spin_lock_init(&sdd->lock); 1444 init_completion(&sdd->xfer_completion); 1445 INIT_LIST_HEAD(&sdd->queue); 1446 1447 ret = devm_request_irq(&pdev->dev, irq, s3c64xx_spi_irq, 0, 1448 "spi-s3c64xx", sdd); 1449 if (ret != 0) { 1450 dev_err(&pdev->dev, "Failed to request IRQ %d: %d\n", 1451 irq, ret); 1452 goto err3; 1453 } 1454 1455 writel(S3C64XX_SPI_INT_RX_OVERRUN_EN | S3C64XX_SPI_INT_RX_UNDERRUN_EN | 1456 S3C64XX_SPI_INT_TX_OVERRUN_EN | S3C64XX_SPI_INT_TX_UNDERRUN_EN, 1457 sdd->regs + S3C64XX_SPI_INT_EN); 1458 1459 if (spi_register_master(master)) { 1460 dev_err(&pdev->dev, "cannot register SPI master\n"); 1461 ret = -EBUSY; 1462 goto err3; 1463 } 1464 1465 dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d with %d Slaves attached\n", 1466 sdd->port_id, master->num_chipselect); 1467 dev_dbg(&pdev->dev, "\tIOmem=[0x%x-0x%x]\tDMA=[Rx-%d, Tx-%d]\n", 1468 mem_res->end, mem_res->start, 1469 sdd->rx_dma.dmach, sdd->tx_dma.dmach); 1470 1471 pm_runtime_enable(&pdev->dev); 1472 1473 return 0; 1474 1475 err3: 1476 clk_disable_unprepare(sdd->src_clk); 1477 err2: 1478 clk_disable_unprepare(sdd->clk); 1479 err0: 1480 spi_master_put(master); 1481 1482 return ret; 1483 } 1484 1485 static int s3c64xx_spi_remove(struct platform_device *pdev) 1486 { 1487 struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); 1488 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); 1489 1490 pm_runtime_disable(&pdev->dev); 1491 1492 spi_unregister_master(master); 1493 1494 writel(0, sdd->regs + S3C64XX_SPI_INT_EN); 1495 1496 clk_disable_unprepare(sdd->src_clk); 1497 1498 clk_disable_unprepare(sdd->clk); 1499 1500 spi_master_put(master); 1501 1502 return 0; 1503 } 1504 1505 #ifdef CONFIG_PM_SLEEP 1506 static int s3c64xx_spi_suspend(struct device *dev) 1507 { 1508 struct spi_master *master = dev_get_drvdata(dev); 1509 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); 1510 1511 spi_master_suspend(master); 1512 1513 /* Disable the clock */ 1514 clk_disable_unprepare(sdd->src_clk); 1515 clk_disable_unprepare(sdd->clk); 1516 1517 sdd->cur_speed = 0; /* Output Clock is stopped */ 1518 1519 return 0; 1520 } 1521 1522 static int s3c64xx_spi_resume(struct device *dev) 1523 { 1524 struct spi_master *master = dev_get_drvdata(dev); 1525 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); 1526 struct s3c64xx_spi_info *sci = sdd->cntrlr_info; 1527 1528 if (sci->cfg_gpio) 1529 sci->cfg_gpio(); 1530 1531 /* Enable the clock */ 1532 clk_prepare_enable(sdd->src_clk); 1533 clk_prepare_enable(sdd->clk); 1534 1535 s3c64xx_spi_hwinit(sdd, sdd->port_id); 1536 1537 spi_master_resume(master); 1538 1539 return 0; 1540 } 1541 #endif /* CONFIG_PM_SLEEP */ 1542 1543 #ifdef CONFIG_PM_RUNTIME 1544 static int s3c64xx_spi_runtime_suspend(struct device *dev) 1545 { 1546 struct spi_master *master = dev_get_drvdata(dev); 1547 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); 1548 1549 clk_disable_unprepare(sdd->clk); 1550 clk_disable_unprepare(sdd->src_clk); 1551 1552 return 0; 1553 } 1554 1555 static int s3c64xx_spi_runtime_resume(struct device *dev) 1556 { 1557 struct spi_master *master = dev_get_drvdata(dev); 1558 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); 1559 1560 clk_prepare_enable(sdd->src_clk); 1561 clk_prepare_enable(sdd->clk); 1562 1563 return 0; 1564 } 1565 #endif /* CONFIG_PM_RUNTIME */ 1566 1567 static const struct dev_pm_ops s3c64xx_spi_pm = { 1568 SET_SYSTEM_SLEEP_PM_OPS(s3c64xx_spi_suspend, s3c64xx_spi_resume) 1569 SET_RUNTIME_PM_OPS(s3c64xx_spi_runtime_suspend, 1570 s3c64xx_spi_runtime_resume, NULL) 1571 }; 1572 1573 static struct s3c64xx_spi_port_config s3c2443_spi_port_config = { 1574 .fifo_lvl_mask = { 0x7f }, 1575 .rx_lvl_offset = 13, 1576 .tx_st_done = 21, 1577 .high_speed = true, 1578 }; 1579 1580 static struct s3c64xx_spi_port_config s3c6410_spi_port_config = { 1581 .fifo_lvl_mask = { 0x7f, 0x7F }, 1582 .rx_lvl_offset = 13, 1583 .tx_st_done = 21, 1584 }; 1585 1586 static struct s3c64xx_spi_port_config s5p64x0_spi_port_config = { 1587 .fifo_lvl_mask = { 0x1ff, 0x7F }, 1588 .rx_lvl_offset = 15, 1589 .tx_st_done = 25, 1590 }; 1591 1592 static struct s3c64xx_spi_port_config s5pc100_spi_port_config = { 1593 .fifo_lvl_mask = { 0x7f, 0x7F }, 1594 .rx_lvl_offset = 13, 1595 .tx_st_done = 21, 1596 .high_speed = true, 1597 }; 1598 1599 static struct s3c64xx_spi_port_config s5pv210_spi_port_config = { 1600 .fifo_lvl_mask = { 0x1ff, 0x7F }, 1601 .rx_lvl_offset = 15, 1602 .tx_st_done = 25, 1603 .high_speed = true, 1604 }; 1605 1606 static struct s3c64xx_spi_port_config exynos4_spi_port_config = { 1607 .fifo_lvl_mask = { 0x1ff, 0x7F, 0x7F }, 1608 .rx_lvl_offset = 15, 1609 .tx_st_done = 25, 1610 .high_speed = true, 1611 .clk_from_cmu = true, 1612 }; 1613 1614 static struct s3c64xx_spi_port_config exynos5440_spi_port_config = { 1615 .fifo_lvl_mask = { 0x1ff }, 1616 .rx_lvl_offset = 15, 1617 .tx_st_done = 25, 1618 .high_speed = true, 1619 .clk_from_cmu = true, 1620 .quirks = S3C64XX_SPI_QUIRK_POLL, 1621 }; 1622 1623 static struct platform_device_id s3c64xx_spi_driver_ids[] = { 1624 { 1625 .name = "s3c2443-spi", 1626 .driver_data = (kernel_ulong_t)&s3c2443_spi_port_config, 1627 }, { 1628 .name = "s3c6410-spi", 1629 .driver_data = (kernel_ulong_t)&s3c6410_spi_port_config, 1630 }, { 1631 .name = "s5p64x0-spi", 1632 .driver_data = (kernel_ulong_t)&s5p64x0_spi_port_config, 1633 }, { 1634 .name = "s5pc100-spi", 1635 .driver_data = (kernel_ulong_t)&s5pc100_spi_port_config, 1636 }, { 1637 .name = "s5pv210-spi", 1638 .driver_data = (kernel_ulong_t)&s5pv210_spi_port_config, 1639 }, { 1640 .name = "exynos4210-spi", 1641 .driver_data = (kernel_ulong_t)&exynos4_spi_port_config, 1642 }, 1643 { }, 1644 }; 1645 1646 static const struct of_device_id s3c64xx_spi_dt_match[] = { 1647 { .compatible = "samsung,exynos4210-spi", 1648 .data = (void *)&exynos4_spi_port_config, 1649 }, 1650 { .compatible = "samsung,exynos5440-spi", 1651 .data = (void *)&exynos5440_spi_port_config, 1652 }, 1653 { }, 1654 }; 1655 MODULE_DEVICE_TABLE(of, s3c64xx_spi_dt_match); 1656 1657 static struct platform_driver s3c64xx_spi_driver = { 1658 .driver = { 1659 .name = "s3c64xx-spi", 1660 .owner = THIS_MODULE, 1661 .pm = &s3c64xx_spi_pm, 1662 .of_match_table = of_match_ptr(s3c64xx_spi_dt_match), 1663 }, 1664 .remove = s3c64xx_spi_remove, 1665 .id_table = s3c64xx_spi_driver_ids, 1666 }; 1667 MODULE_ALIAS("platform:s3c64xx-spi"); 1668 1669 static int __init s3c64xx_spi_init(void) 1670 { 1671 return platform_driver_probe(&s3c64xx_spi_driver, s3c64xx_spi_probe); 1672 } 1673 subsys_initcall(s3c64xx_spi_init); 1674 1675 static void __exit s3c64xx_spi_exit(void) 1676 { 1677 platform_driver_unregister(&s3c64xx_spi_driver); 1678 } 1679 module_exit(s3c64xx_spi_exit); 1680 1681 MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>"); 1682 MODULE_DESCRIPTION("S3C64XX SPI Controller Driver"); 1683 MODULE_LICENSE("GPL"); 1684