1 /* 2 * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved. 3 * Copyright (C) 2008 Juergen Beisert 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License 7 * as published by the Free Software Foundation; either version 2 8 * of the License, or (at your option) any later version. 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the 16 * Free Software Foundation 17 * 51 Franklin Street, Fifth Floor 18 * Boston, MA 02110-1301, USA. 19 */ 20 21 #include <linux/clk.h> 22 #include <linux/completion.h> 23 #include <linux/delay.h> 24 #include <linux/dmaengine.h> 25 #include <linux/dma-mapping.h> 26 #include <linux/err.h> 27 #include <linux/gpio.h> 28 #include <linux/interrupt.h> 29 #include <linux/io.h> 30 #include <linux/irq.h> 31 #include <linux/kernel.h> 32 #include <linux/module.h> 33 #include <linux/platform_device.h> 34 #include <linux/slab.h> 35 #include <linux/spi/spi.h> 36 #include <linux/spi/spi_bitbang.h> 37 #include <linux/types.h> 38 #include <linux/of.h> 39 #include <linux/of_device.h> 40 #include <linux/of_gpio.h> 41 42 #include <linux/platform_data/dma-imx.h> 43 #include <linux/platform_data/spi-imx.h> 44 45 #define DRIVER_NAME "spi_imx" 46 47 #define MXC_CSPIRXDATA 0x00 48 #define MXC_CSPITXDATA 0x04 49 #define MXC_CSPICTRL 0x08 50 #define MXC_CSPIINT 0x0c 51 #define MXC_RESET 0x1c 52 53 /* generic defines to abstract from the different register layouts */ 54 #define MXC_INT_RR (1 << 0) /* Receive data ready interrupt */ 55 #define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */ 56 57 /* The maximum bytes that a sdma BD can transfer.*/ 58 #define MAX_SDMA_BD_BYTES (1 << 15) 59 #define MX51_ECSPI_CTRL_MAX_BURST 512 60 61 enum spi_imx_devtype { 62 IMX1_CSPI, 63 IMX21_CSPI, 64 IMX27_CSPI, 65 IMX31_CSPI, 66 IMX35_CSPI, /* CSPI on all i.mx except above */ 67 IMX51_ECSPI, /* ECSPI on i.mx51 */ 68 IMX53_ECSPI, /* ECSPI on i.mx53 and later */ 69 }; 70 71 struct spi_imx_data; 72 73 struct spi_imx_devtype_data { 74 void (*intctrl)(struct spi_imx_data *, int); 75 int (*config)(struct spi_device *); 76 void (*trigger)(struct spi_imx_data *); 77 int (*rx_available)(struct spi_imx_data *); 78 void (*reset)(struct spi_imx_data *); 79 bool has_dmamode; 80 unsigned int fifo_size; 81 bool dynamic_burst; 82 enum spi_imx_devtype devtype; 83 }; 84 85 struct spi_imx_data { 86 struct spi_bitbang bitbang; 87 struct device *dev; 88 89 struct completion xfer_done; 90 void __iomem *base; 91 unsigned long base_phys; 92 93 struct clk *clk_per; 94 struct clk *clk_ipg; 95 unsigned long spi_clk; 96 unsigned int spi_bus_clk; 97 98 unsigned int speed_hz; 99 unsigned int bits_per_word; 100 unsigned int spi_drctl; 101 102 unsigned int count, remainder; 103 void (*tx)(struct spi_imx_data *); 104 void (*rx)(struct spi_imx_data *); 105 void *rx_buf; 106 const void *tx_buf; 107 unsigned int txfifo; /* number of words pushed in tx FIFO */ 108 unsigned int dynamic_burst, read_u32; 109 unsigned int word_mask; 110 111 /* DMA */ 112 bool usedma; 113 u32 wml; 114 struct completion dma_rx_completion; 115 struct completion dma_tx_completion; 116 117 const struct spi_imx_devtype_data *devtype_data; 118 }; 119 120 static inline int is_imx27_cspi(struct spi_imx_data *d) 121 { 122 return d->devtype_data->devtype == IMX27_CSPI; 123 } 124 125 static inline int is_imx35_cspi(struct spi_imx_data *d) 126 { 127 return d->devtype_data->devtype == IMX35_CSPI; 128 } 129 130 static inline int is_imx51_ecspi(struct spi_imx_data *d) 131 { 132 return d->devtype_data->devtype == IMX51_ECSPI; 133 } 134 135 static inline int is_imx53_ecspi(struct spi_imx_data *d) 136 { 137 return d->devtype_data->devtype == IMX53_ECSPI; 138 } 139 140 #define MXC_SPI_BUF_RX(type) \ 141 static void spi_imx_buf_rx_##type(struct spi_imx_data *spi_imx) \ 142 { \ 143 unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA); \ 144 \ 145 if (spi_imx->rx_buf) { \ 146 *(type *)spi_imx->rx_buf = val; \ 147 spi_imx->rx_buf += sizeof(type); \ 148 } \ 149 } 150 151 #define MXC_SPI_BUF_TX(type) \ 152 static void spi_imx_buf_tx_##type(struct spi_imx_data *spi_imx) \ 153 { \ 154 type val = 0; \ 155 \ 156 if (spi_imx->tx_buf) { \ 157 val = *(type *)spi_imx->tx_buf; \ 158 spi_imx->tx_buf += sizeof(type); \ 159 } \ 160 \ 161 spi_imx->count -= sizeof(type); \ 162 \ 163 writel(val, spi_imx->base + MXC_CSPITXDATA); \ 164 } 165 166 MXC_SPI_BUF_RX(u8) 167 MXC_SPI_BUF_TX(u8) 168 MXC_SPI_BUF_RX(u16) 169 MXC_SPI_BUF_TX(u16) 170 MXC_SPI_BUF_RX(u32) 171 MXC_SPI_BUF_TX(u32) 172 173 /* First entry is reserved, second entry is valid only if SDHC_SPIEN is set 174 * (which is currently not the case in this driver) 175 */ 176 static int mxc_clkdivs[] = {0, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192, 177 256, 384, 512, 768, 1024}; 178 179 /* MX21, MX27 */ 180 static unsigned int spi_imx_clkdiv_1(unsigned int fin, 181 unsigned int fspi, unsigned int max, unsigned int *fres) 182 { 183 int i; 184 185 for (i = 2; i < max; i++) 186 if (fspi * mxc_clkdivs[i] >= fin) 187 break; 188 189 *fres = fin / mxc_clkdivs[i]; 190 return i; 191 } 192 193 /* MX1, MX31, MX35, MX51 CSPI */ 194 static unsigned int spi_imx_clkdiv_2(unsigned int fin, 195 unsigned int fspi, unsigned int *fres) 196 { 197 int i, div = 4; 198 199 for (i = 0; i < 7; i++) { 200 if (fspi * div >= fin) 201 goto out; 202 div <<= 1; 203 } 204 205 out: 206 *fres = fin / div; 207 return i; 208 } 209 210 static int spi_imx_bytes_per_word(const int bits_per_word) 211 { 212 return DIV_ROUND_UP(bits_per_word, BITS_PER_BYTE); 213 } 214 215 static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi, 216 struct spi_transfer *transfer) 217 { 218 struct spi_imx_data *spi_imx = spi_master_get_devdata(master); 219 unsigned int bytes_per_word, i; 220 221 if (!master->dma_rx) 222 return false; 223 224 bytes_per_word = spi_imx_bytes_per_word(transfer->bits_per_word); 225 226 if (bytes_per_word != 1 && bytes_per_word != 2 && bytes_per_word != 4) 227 return false; 228 229 for (i = spi_imx->devtype_data->fifo_size / 2; i > 0; i--) { 230 if (!(transfer->len % (i * bytes_per_word))) 231 break; 232 } 233 234 if (i == 0) 235 return false; 236 237 spi_imx->wml = i; 238 spi_imx->dynamic_burst = 0; 239 240 return true; 241 } 242 243 #define MX51_ECSPI_CTRL 0x08 244 #define MX51_ECSPI_CTRL_ENABLE (1 << 0) 245 #define MX51_ECSPI_CTRL_XCH (1 << 2) 246 #define MX51_ECSPI_CTRL_SMC (1 << 3) 247 #define MX51_ECSPI_CTRL_MODE_MASK (0xf << 4) 248 #define MX51_ECSPI_CTRL_DRCTL(drctl) ((drctl) << 16) 249 #define MX51_ECSPI_CTRL_POSTDIV_OFFSET 8 250 #define MX51_ECSPI_CTRL_PREDIV_OFFSET 12 251 #define MX51_ECSPI_CTRL_CS(cs) ((cs) << 18) 252 #define MX51_ECSPI_CTRL_BL_OFFSET 20 253 #define MX51_ECSPI_CTRL_BL_MASK (0xfff << 20) 254 255 #define MX51_ECSPI_CONFIG 0x0c 256 #define MX51_ECSPI_CONFIG_SCLKPHA(cs) (1 << ((cs) + 0)) 257 #define MX51_ECSPI_CONFIG_SCLKPOL(cs) (1 << ((cs) + 4)) 258 #define MX51_ECSPI_CONFIG_SBBCTRL(cs) (1 << ((cs) + 8)) 259 #define MX51_ECSPI_CONFIG_SSBPOL(cs) (1 << ((cs) + 12)) 260 #define MX51_ECSPI_CONFIG_SCLKCTL(cs) (1 << ((cs) + 20)) 261 262 #define MX51_ECSPI_INT 0x10 263 #define MX51_ECSPI_INT_TEEN (1 << 0) 264 #define MX51_ECSPI_INT_RREN (1 << 3) 265 266 #define MX51_ECSPI_DMA 0x14 267 #define MX51_ECSPI_DMA_TX_WML(wml) ((wml) & 0x3f) 268 #define MX51_ECSPI_DMA_RX_WML(wml) (((wml) & 0x3f) << 16) 269 #define MX51_ECSPI_DMA_RXT_WML(wml) (((wml) & 0x3f) << 24) 270 271 #define MX51_ECSPI_DMA_TEDEN (1 << 7) 272 #define MX51_ECSPI_DMA_RXDEN (1 << 23) 273 #define MX51_ECSPI_DMA_RXTDEN (1 << 31) 274 275 #define MX51_ECSPI_STAT 0x18 276 #define MX51_ECSPI_STAT_RR (1 << 3) 277 278 #define MX51_ECSPI_TESTREG 0x20 279 #define MX51_ECSPI_TESTREG_LBC BIT(31) 280 281 static void spi_imx_buf_rx_swap_u32(struct spi_imx_data *spi_imx) 282 { 283 unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA); 284 #ifdef __LITTLE_ENDIAN 285 unsigned int bytes_per_word; 286 #endif 287 288 if (spi_imx->rx_buf) { 289 #ifdef __LITTLE_ENDIAN 290 bytes_per_word = spi_imx_bytes_per_word(spi_imx->bits_per_word); 291 if (bytes_per_word == 1) 292 val = cpu_to_be32(val); 293 else if (bytes_per_word == 2) 294 val = (val << 16) | (val >> 16); 295 #endif 296 val &= spi_imx->word_mask; 297 *(u32 *)spi_imx->rx_buf = val; 298 spi_imx->rx_buf += sizeof(u32); 299 } 300 } 301 302 static void spi_imx_buf_rx_swap(struct spi_imx_data *spi_imx) 303 { 304 unsigned int bytes_per_word; 305 306 bytes_per_word = spi_imx_bytes_per_word(spi_imx->bits_per_word); 307 if (spi_imx->read_u32) { 308 spi_imx_buf_rx_swap_u32(spi_imx); 309 return; 310 } 311 312 if (bytes_per_word == 1) 313 spi_imx_buf_rx_u8(spi_imx); 314 else if (bytes_per_word == 2) 315 spi_imx_buf_rx_u16(spi_imx); 316 } 317 318 static void spi_imx_buf_tx_swap_u32(struct spi_imx_data *spi_imx) 319 { 320 u32 val = 0; 321 #ifdef __LITTLE_ENDIAN 322 unsigned int bytes_per_word; 323 #endif 324 325 if (spi_imx->tx_buf) { 326 val = *(u32 *)spi_imx->tx_buf; 327 val &= spi_imx->word_mask; 328 spi_imx->tx_buf += sizeof(u32); 329 } 330 331 spi_imx->count -= sizeof(u32); 332 #ifdef __LITTLE_ENDIAN 333 bytes_per_word = spi_imx_bytes_per_word(spi_imx->bits_per_word); 334 335 if (bytes_per_word == 1) 336 val = cpu_to_be32(val); 337 else if (bytes_per_word == 2) 338 val = (val << 16) | (val >> 16); 339 #endif 340 writel(val, spi_imx->base + MXC_CSPITXDATA); 341 } 342 343 static void spi_imx_buf_tx_swap(struct spi_imx_data *spi_imx) 344 { 345 u32 ctrl, val; 346 unsigned int bytes_per_word; 347 348 if (spi_imx->count == spi_imx->remainder) { 349 ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL); 350 ctrl &= ~MX51_ECSPI_CTRL_BL_MASK; 351 if (spi_imx->count > MX51_ECSPI_CTRL_MAX_BURST) { 352 spi_imx->remainder = spi_imx->count % 353 MX51_ECSPI_CTRL_MAX_BURST; 354 val = MX51_ECSPI_CTRL_MAX_BURST * 8 - 1; 355 } else if (spi_imx->count >= sizeof(u32)) { 356 spi_imx->remainder = spi_imx->count % sizeof(u32); 357 val = (spi_imx->count - spi_imx->remainder) * 8 - 1; 358 } else { 359 spi_imx->remainder = 0; 360 val = spi_imx->bits_per_word - 1; 361 spi_imx->read_u32 = 0; 362 } 363 364 ctrl |= (val << MX51_ECSPI_CTRL_BL_OFFSET); 365 writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL); 366 } 367 368 if (spi_imx->count >= sizeof(u32)) { 369 spi_imx_buf_tx_swap_u32(spi_imx); 370 return; 371 } 372 373 bytes_per_word = spi_imx_bytes_per_word(spi_imx->bits_per_word); 374 375 if (bytes_per_word == 1) 376 spi_imx_buf_tx_u8(spi_imx); 377 else if (bytes_per_word == 2) 378 spi_imx_buf_tx_u16(spi_imx); 379 } 380 381 /* MX51 eCSPI */ 382 static unsigned int mx51_ecspi_clkdiv(struct spi_imx_data *spi_imx, 383 unsigned int fspi, unsigned int *fres) 384 { 385 /* 386 * there are two 4-bit dividers, the pre-divider divides by 387 * $pre, the post-divider by 2^$post 388 */ 389 unsigned int pre, post; 390 unsigned int fin = spi_imx->spi_clk; 391 392 if (unlikely(fspi > fin)) 393 return 0; 394 395 post = fls(fin) - fls(fspi); 396 if (fin > fspi << post) 397 post++; 398 399 /* now we have: (fin <= fspi << post) with post being minimal */ 400 401 post = max(4U, post) - 4; 402 if (unlikely(post > 0xf)) { 403 dev_err(spi_imx->dev, "cannot set clock freq: %u (base freq: %u)\n", 404 fspi, fin); 405 return 0xff; 406 } 407 408 pre = DIV_ROUND_UP(fin, fspi << post) - 1; 409 410 dev_dbg(spi_imx->dev, "%s: fin: %u, fspi: %u, post: %u, pre: %u\n", 411 __func__, fin, fspi, post, pre); 412 413 /* Resulting frequency for the SCLK line. */ 414 *fres = (fin / (pre + 1)) >> post; 415 416 return (pre << MX51_ECSPI_CTRL_PREDIV_OFFSET) | 417 (post << MX51_ECSPI_CTRL_POSTDIV_OFFSET); 418 } 419 420 static void mx51_ecspi_intctrl(struct spi_imx_data *spi_imx, int enable) 421 { 422 unsigned val = 0; 423 424 if (enable & MXC_INT_TE) 425 val |= MX51_ECSPI_INT_TEEN; 426 427 if (enable & MXC_INT_RR) 428 val |= MX51_ECSPI_INT_RREN; 429 430 writel(val, spi_imx->base + MX51_ECSPI_INT); 431 } 432 433 static void mx51_ecspi_trigger(struct spi_imx_data *spi_imx) 434 { 435 u32 reg; 436 437 reg = readl(spi_imx->base + MX51_ECSPI_CTRL); 438 reg |= MX51_ECSPI_CTRL_XCH; 439 writel(reg, spi_imx->base + MX51_ECSPI_CTRL); 440 } 441 442 static int mx51_ecspi_config(struct spi_device *spi) 443 { 444 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); 445 u32 ctrl = MX51_ECSPI_CTRL_ENABLE; 446 u32 clk = spi_imx->speed_hz, delay, reg; 447 u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG); 448 449 /* 450 * The hardware seems to have a race condition when changing modes. The 451 * current assumption is that the selection of the channel arrives 452 * earlier in the hardware than the mode bits when they are written at 453 * the same time. 454 * So set master mode for all channels as we do not support slave mode. 455 */ 456 ctrl |= MX51_ECSPI_CTRL_MODE_MASK; 457 458 /* 459 * Enable SPI_RDY handling (falling edge/level triggered). 460 */ 461 if (spi->mode & SPI_READY) 462 ctrl |= MX51_ECSPI_CTRL_DRCTL(spi_imx->spi_drctl); 463 464 /* set clock speed */ 465 ctrl |= mx51_ecspi_clkdiv(spi_imx, spi_imx->speed_hz, &clk); 466 spi_imx->spi_bus_clk = clk; 467 468 /* set chip select to use */ 469 ctrl |= MX51_ECSPI_CTRL_CS(spi->chip_select); 470 471 ctrl |= (spi_imx->bits_per_word - 1) << MX51_ECSPI_CTRL_BL_OFFSET; 472 473 cfg |= MX51_ECSPI_CONFIG_SBBCTRL(spi->chip_select); 474 475 if (spi->mode & SPI_CPHA) 476 cfg |= MX51_ECSPI_CONFIG_SCLKPHA(spi->chip_select); 477 else 478 cfg &= ~MX51_ECSPI_CONFIG_SCLKPHA(spi->chip_select); 479 480 if (spi->mode & SPI_CPOL) { 481 cfg |= MX51_ECSPI_CONFIG_SCLKPOL(spi->chip_select); 482 cfg |= MX51_ECSPI_CONFIG_SCLKCTL(spi->chip_select); 483 } else { 484 cfg &= ~MX51_ECSPI_CONFIG_SCLKPOL(spi->chip_select); 485 cfg &= ~MX51_ECSPI_CONFIG_SCLKCTL(spi->chip_select); 486 } 487 if (spi->mode & SPI_CS_HIGH) 488 cfg |= MX51_ECSPI_CONFIG_SSBPOL(spi->chip_select); 489 else 490 cfg &= ~MX51_ECSPI_CONFIG_SSBPOL(spi->chip_select); 491 492 if (spi_imx->usedma) 493 ctrl |= MX51_ECSPI_CTRL_SMC; 494 495 /* CTRL register always go first to bring out controller from reset */ 496 writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL); 497 498 reg = readl(spi_imx->base + MX51_ECSPI_TESTREG); 499 if (spi->mode & SPI_LOOP) 500 reg |= MX51_ECSPI_TESTREG_LBC; 501 else 502 reg &= ~MX51_ECSPI_TESTREG_LBC; 503 writel(reg, spi_imx->base + MX51_ECSPI_TESTREG); 504 505 writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG); 506 507 /* 508 * Wait until the changes in the configuration register CONFIGREG 509 * propagate into the hardware. It takes exactly one tick of the 510 * SCLK clock, but we will wait two SCLK clock just to be sure. The 511 * effect of the delay it takes for the hardware to apply changes 512 * is noticable if the SCLK clock run very slow. In such a case, if 513 * the polarity of SCLK should be inverted, the GPIO ChipSelect might 514 * be asserted before the SCLK polarity changes, which would disrupt 515 * the SPI communication as the device on the other end would consider 516 * the change of SCLK polarity as a clock tick already. 517 */ 518 delay = (2 * 1000000) / clk; 519 if (likely(delay < 10)) /* SCLK is faster than 100 kHz */ 520 udelay(delay); 521 else /* SCLK is _very_ slow */ 522 usleep_range(delay, delay + 10); 523 524 /* 525 * Configure the DMA register: setup the watermark 526 * and enable DMA request. 527 */ 528 529 writel(MX51_ECSPI_DMA_RX_WML(spi_imx->wml) | 530 MX51_ECSPI_DMA_TX_WML(spi_imx->wml) | 531 MX51_ECSPI_DMA_RXT_WML(spi_imx->wml) | 532 MX51_ECSPI_DMA_TEDEN | MX51_ECSPI_DMA_RXDEN | 533 MX51_ECSPI_DMA_RXTDEN, spi_imx->base + MX51_ECSPI_DMA); 534 535 return 0; 536 } 537 538 static int mx51_ecspi_rx_available(struct spi_imx_data *spi_imx) 539 { 540 return readl(spi_imx->base + MX51_ECSPI_STAT) & MX51_ECSPI_STAT_RR; 541 } 542 543 static void mx51_ecspi_reset(struct spi_imx_data *spi_imx) 544 { 545 /* drain receive buffer */ 546 while (mx51_ecspi_rx_available(spi_imx)) 547 readl(spi_imx->base + MXC_CSPIRXDATA); 548 } 549 550 #define MX31_INTREG_TEEN (1 << 0) 551 #define MX31_INTREG_RREN (1 << 3) 552 553 #define MX31_CSPICTRL_ENABLE (1 << 0) 554 #define MX31_CSPICTRL_MASTER (1 << 1) 555 #define MX31_CSPICTRL_XCH (1 << 2) 556 #define MX31_CSPICTRL_SMC (1 << 3) 557 #define MX31_CSPICTRL_POL (1 << 4) 558 #define MX31_CSPICTRL_PHA (1 << 5) 559 #define MX31_CSPICTRL_SSCTL (1 << 6) 560 #define MX31_CSPICTRL_SSPOL (1 << 7) 561 #define MX31_CSPICTRL_BC_SHIFT 8 562 #define MX35_CSPICTRL_BL_SHIFT 20 563 #define MX31_CSPICTRL_CS_SHIFT 24 564 #define MX35_CSPICTRL_CS_SHIFT 12 565 #define MX31_CSPICTRL_DR_SHIFT 16 566 567 #define MX31_CSPI_DMAREG 0x10 568 #define MX31_DMAREG_RH_DEN (1<<4) 569 #define MX31_DMAREG_TH_DEN (1<<1) 570 571 #define MX31_CSPISTATUS 0x14 572 #define MX31_STATUS_RR (1 << 3) 573 574 #define MX31_CSPI_TESTREG 0x1C 575 #define MX31_TEST_LBC (1 << 14) 576 577 /* These functions also work for the i.MX35, but be aware that 578 * the i.MX35 has a slightly different register layout for bits 579 * we do not use here. 580 */ 581 static void mx31_intctrl(struct spi_imx_data *spi_imx, int enable) 582 { 583 unsigned int val = 0; 584 585 if (enable & MXC_INT_TE) 586 val |= MX31_INTREG_TEEN; 587 if (enable & MXC_INT_RR) 588 val |= MX31_INTREG_RREN; 589 590 writel(val, spi_imx->base + MXC_CSPIINT); 591 } 592 593 static void mx31_trigger(struct spi_imx_data *spi_imx) 594 { 595 unsigned int reg; 596 597 reg = readl(spi_imx->base + MXC_CSPICTRL); 598 reg |= MX31_CSPICTRL_XCH; 599 writel(reg, spi_imx->base + MXC_CSPICTRL); 600 } 601 602 static int mx31_config(struct spi_device *spi) 603 { 604 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); 605 unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER; 606 unsigned int clk; 607 608 reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, spi_imx->speed_hz, &clk) << 609 MX31_CSPICTRL_DR_SHIFT; 610 spi_imx->spi_bus_clk = clk; 611 612 if (is_imx35_cspi(spi_imx)) { 613 reg |= (spi_imx->bits_per_word - 1) << MX35_CSPICTRL_BL_SHIFT; 614 reg |= MX31_CSPICTRL_SSCTL; 615 } else { 616 reg |= (spi_imx->bits_per_word - 1) << MX31_CSPICTRL_BC_SHIFT; 617 } 618 619 if (spi->mode & SPI_CPHA) 620 reg |= MX31_CSPICTRL_PHA; 621 if (spi->mode & SPI_CPOL) 622 reg |= MX31_CSPICTRL_POL; 623 if (spi->mode & SPI_CS_HIGH) 624 reg |= MX31_CSPICTRL_SSPOL; 625 if (!gpio_is_valid(spi->cs_gpio)) 626 reg |= (spi->chip_select) << 627 (is_imx35_cspi(spi_imx) ? MX35_CSPICTRL_CS_SHIFT : 628 MX31_CSPICTRL_CS_SHIFT); 629 630 if (spi_imx->usedma) 631 reg |= MX31_CSPICTRL_SMC; 632 633 writel(reg, spi_imx->base + MXC_CSPICTRL); 634 635 reg = readl(spi_imx->base + MX31_CSPI_TESTREG); 636 if (spi->mode & SPI_LOOP) 637 reg |= MX31_TEST_LBC; 638 else 639 reg &= ~MX31_TEST_LBC; 640 writel(reg, spi_imx->base + MX31_CSPI_TESTREG); 641 642 if (spi_imx->usedma) { 643 /* configure DMA requests when RXFIFO is half full and 644 when TXFIFO is half empty */ 645 writel(MX31_DMAREG_RH_DEN | MX31_DMAREG_TH_DEN, 646 spi_imx->base + MX31_CSPI_DMAREG); 647 } 648 649 return 0; 650 } 651 652 static int mx31_rx_available(struct spi_imx_data *spi_imx) 653 { 654 return readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR; 655 } 656 657 static void mx31_reset(struct spi_imx_data *spi_imx) 658 { 659 /* drain receive buffer */ 660 while (readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR) 661 readl(spi_imx->base + MXC_CSPIRXDATA); 662 } 663 664 #define MX21_INTREG_RR (1 << 4) 665 #define MX21_INTREG_TEEN (1 << 9) 666 #define MX21_INTREG_RREN (1 << 13) 667 668 #define MX21_CSPICTRL_POL (1 << 5) 669 #define MX21_CSPICTRL_PHA (1 << 6) 670 #define MX21_CSPICTRL_SSPOL (1 << 8) 671 #define MX21_CSPICTRL_XCH (1 << 9) 672 #define MX21_CSPICTRL_ENABLE (1 << 10) 673 #define MX21_CSPICTRL_MASTER (1 << 11) 674 #define MX21_CSPICTRL_DR_SHIFT 14 675 #define MX21_CSPICTRL_CS_SHIFT 19 676 677 static void mx21_intctrl(struct spi_imx_data *spi_imx, int enable) 678 { 679 unsigned int val = 0; 680 681 if (enable & MXC_INT_TE) 682 val |= MX21_INTREG_TEEN; 683 if (enable & MXC_INT_RR) 684 val |= MX21_INTREG_RREN; 685 686 writel(val, spi_imx->base + MXC_CSPIINT); 687 } 688 689 static void mx21_trigger(struct spi_imx_data *spi_imx) 690 { 691 unsigned int reg; 692 693 reg = readl(spi_imx->base + MXC_CSPICTRL); 694 reg |= MX21_CSPICTRL_XCH; 695 writel(reg, spi_imx->base + MXC_CSPICTRL); 696 } 697 698 static int mx21_config(struct spi_device *spi) 699 { 700 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); 701 unsigned int reg = MX21_CSPICTRL_ENABLE | MX21_CSPICTRL_MASTER; 702 unsigned int max = is_imx27_cspi(spi_imx) ? 16 : 18; 703 unsigned int clk; 704 705 reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, spi_imx->speed_hz, max, &clk) 706 << MX21_CSPICTRL_DR_SHIFT; 707 spi_imx->spi_bus_clk = clk; 708 709 reg |= spi_imx->bits_per_word - 1; 710 711 if (spi->mode & SPI_CPHA) 712 reg |= MX21_CSPICTRL_PHA; 713 if (spi->mode & SPI_CPOL) 714 reg |= MX21_CSPICTRL_POL; 715 if (spi->mode & SPI_CS_HIGH) 716 reg |= MX21_CSPICTRL_SSPOL; 717 if (!gpio_is_valid(spi->cs_gpio)) 718 reg |= spi->chip_select << MX21_CSPICTRL_CS_SHIFT; 719 720 writel(reg, spi_imx->base + MXC_CSPICTRL); 721 722 return 0; 723 } 724 725 static int mx21_rx_available(struct spi_imx_data *spi_imx) 726 { 727 return readl(spi_imx->base + MXC_CSPIINT) & MX21_INTREG_RR; 728 } 729 730 static void mx21_reset(struct spi_imx_data *spi_imx) 731 { 732 writel(1, spi_imx->base + MXC_RESET); 733 } 734 735 #define MX1_INTREG_RR (1 << 3) 736 #define MX1_INTREG_TEEN (1 << 8) 737 #define MX1_INTREG_RREN (1 << 11) 738 739 #define MX1_CSPICTRL_POL (1 << 4) 740 #define MX1_CSPICTRL_PHA (1 << 5) 741 #define MX1_CSPICTRL_XCH (1 << 8) 742 #define MX1_CSPICTRL_ENABLE (1 << 9) 743 #define MX1_CSPICTRL_MASTER (1 << 10) 744 #define MX1_CSPICTRL_DR_SHIFT 13 745 746 static void mx1_intctrl(struct spi_imx_data *spi_imx, int enable) 747 { 748 unsigned int val = 0; 749 750 if (enable & MXC_INT_TE) 751 val |= MX1_INTREG_TEEN; 752 if (enable & MXC_INT_RR) 753 val |= MX1_INTREG_RREN; 754 755 writel(val, spi_imx->base + MXC_CSPIINT); 756 } 757 758 static void mx1_trigger(struct spi_imx_data *spi_imx) 759 { 760 unsigned int reg; 761 762 reg = readl(spi_imx->base + MXC_CSPICTRL); 763 reg |= MX1_CSPICTRL_XCH; 764 writel(reg, spi_imx->base + MXC_CSPICTRL); 765 } 766 767 static int mx1_config(struct spi_device *spi) 768 { 769 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); 770 unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_MASTER; 771 unsigned int clk; 772 773 reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, spi_imx->speed_hz, &clk) << 774 MX1_CSPICTRL_DR_SHIFT; 775 spi_imx->spi_bus_clk = clk; 776 777 reg |= spi_imx->bits_per_word - 1; 778 779 if (spi->mode & SPI_CPHA) 780 reg |= MX1_CSPICTRL_PHA; 781 if (spi->mode & SPI_CPOL) 782 reg |= MX1_CSPICTRL_POL; 783 784 writel(reg, spi_imx->base + MXC_CSPICTRL); 785 786 return 0; 787 } 788 789 static int mx1_rx_available(struct spi_imx_data *spi_imx) 790 { 791 return readl(spi_imx->base + MXC_CSPIINT) & MX1_INTREG_RR; 792 } 793 794 static void mx1_reset(struct spi_imx_data *spi_imx) 795 { 796 writel(1, spi_imx->base + MXC_RESET); 797 } 798 799 static struct spi_imx_devtype_data imx1_cspi_devtype_data = { 800 .intctrl = mx1_intctrl, 801 .config = mx1_config, 802 .trigger = mx1_trigger, 803 .rx_available = mx1_rx_available, 804 .reset = mx1_reset, 805 .fifo_size = 8, 806 .has_dmamode = false, 807 .dynamic_burst = false, 808 .devtype = IMX1_CSPI, 809 }; 810 811 static struct spi_imx_devtype_data imx21_cspi_devtype_data = { 812 .intctrl = mx21_intctrl, 813 .config = mx21_config, 814 .trigger = mx21_trigger, 815 .rx_available = mx21_rx_available, 816 .reset = mx21_reset, 817 .fifo_size = 8, 818 .has_dmamode = false, 819 .dynamic_burst = false, 820 .devtype = IMX21_CSPI, 821 }; 822 823 static struct spi_imx_devtype_data imx27_cspi_devtype_data = { 824 /* i.mx27 cspi shares the functions with i.mx21 one */ 825 .intctrl = mx21_intctrl, 826 .config = mx21_config, 827 .trigger = mx21_trigger, 828 .rx_available = mx21_rx_available, 829 .reset = mx21_reset, 830 .fifo_size = 8, 831 .has_dmamode = false, 832 .dynamic_burst = false, 833 .devtype = IMX27_CSPI, 834 }; 835 836 static struct spi_imx_devtype_data imx31_cspi_devtype_data = { 837 .intctrl = mx31_intctrl, 838 .config = mx31_config, 839 .trigger = mx31_trigger, 840 .rx_available = mx31_rx_available, 841 .reset = mx31_reset, 842 .fifo_size = 8, 843 .has_dmamode = false, 844 .dynamic_burst = false, 845 .devtype = IMX31_CSPI, 846 }; 847 848 static struct spi_imx_devtype_data imx35_cspi_devtype_data = { 849 /* i.mx35 and later cspi shares the functions with i.mx31 one */ 850 .intctrl = mx31_intctrl, 851 .config = mx31_config, 852 .trigger = mx31_trigger, 853 .rx_available = mx31_rx_available, 854 .reset = mx31_reset, 855 .fifo_size = 8, 856 .has_dmamode = true, 857 .dynamic_burst = false, 858 .devtype = IMX35_CSPI, 859 }; 860 861 static struct spi_imx_devtype_data imx51_ecspi_devtype_data = { 862 .intctrl = mx51_ecspi_intctrl, 863 .config = mx51_ecspi_config, 864 .trigger = mx51_ecspi_trigger, 865 .rx_available = mx51_ecspi_rx_available, 866 .reset = mx51_ecspi_reset, 867 .fifo_size = 64, 868 .has_dmamode = true, 869 .dynamic_burst = true, 870 .devtype = IMX51_ECSPI, 871 }; 872 873 static struct spi_imx_devtype_data imx53_ecspi_devtype_data = { 874 .intctrl = mx51_ecspi_intctrl, 875 .config = mx51_ecspi_config, 876 .trigger = mx51_ecspi_trigger, 877 .rx_available = mx51_ecspi_rx_available, 878 .reset = mx51_ecspi_reset, 879 .fifo_size = 64, 880 .has_dmamode = true, 881 .devtype = IMX53_ECSPI, 882 }; 883 884 static const struct platform_device_id spi_imx_devtype[] = { 885 { 886 .name = "imx1-cspi", 887 .driver_data = (kernel_ulong_t) &imx1_cspi_devtype_data, 888 }, { 889 .name = "imx21-cspi", 890 .driver_data = (kernel_ulong_t) &imx21_cspi_devtype_data, 891 }, { 892 .name = "imx27-cspi", 893 .driver_data = (kernel_ulong_t) &imx27_cspi_devtype_data, 894 }, { 895 .name = "imx31-cspi", 896 .driver_data = (kernel_ulong_t) &imx31_cspi_devtype_data, 897 }, { 898 .name = "imx35-cspi", 899 .driver_data = (kernel_ulong_t) &imx35_cspi_devtype_data, 900 }, { 901 .name = "imx51-ecspi", 902 .driver_data = (kernel_ulong_t) &imx51_ecspi_devtype_data, 903 }, { 904 .name = "imx53-ecspi", 905 .driver_data = (kernel_ulong_t) &imx53_ecspi_devtype_data, 906 }, { 907 /* sentinel */ 908 } 909 }; 910 911 static const struct of_device_id spi_imx_dt_ids[] = { 912 { .compatible = "fsl,imx1-cspi", .data = &imx1_cspi_devtype_data, }, 913 { .compatible = "fsl,imx21-cspi", .data = &imx21_cspi_devtype_data, }, 914 { .compatible = "fsl,imx27-cspi", .data = &imx27_cspi_devtype_data, }, 915 { .compatible = "fsl,imx31-cspi", .data = &imx31_cspi_devtype_data, }, 916 { .compatible = "fsl,imx35-cspi", .data = &imx35_cspi_devtype_data, }, 917 { .compatible = "fsl,imx51-ecspi", .data = &imx51_ecspi_devtype_data, }, 918 { .compatible = "fsl,imx53-ecspi", .data = &imx53_ecspi_devtype_data, }, 919 { /* sentinel */ } 920 }; 921 MODULE_DEVICE_TABLE(of, spi_imx_dt_ids); 922 923 static void spi_imx_chipselect(struct spi_device *spi, int is_active) 924 { 925 int active = is_active != BITBANG_CS_INACTIVE; 926 int dev_is_lowactive = !(spi->mode & SPI_CS_HIGH); 927 928 if (spi->mode & SPI_NO_CS) 929 return; 930 931 if (!gpio_is_valid(spi->cs_gpio)) 932 return; 933 934 gpio_set_value(spi->cs_gpio, dev_is_lowactive ^ active); 935 } 936 937 static void spi_imx_push(struct spi_imx_data *spi_imx) 938 { 939 while (spi_imx->txfifo < spi_imx->devtype_data->fifo_size) { 940 if (!spi_imx->count) 941 break; 942 if (spi_imx->txfifo && (spi_imx->count == spi_imx->remainder)) 943 break; 944 spi_imx->tx(spi_imx); 945 spi_imx->txfifo++; 946 } 947 948 spi_imx->devtype_data->trigger(spi_imx); 949 } 950 951 static irqreturn_t spi_imx_isr(int irq, void *dev_id) 952 { 953 struct spi_imx_data *spi_imx = dev_id; 954 955 while (spi_imx->devtype_data->rx_available(spi_imx)) { 956 spi_imx->rx(spi_imx); 957 spi_imx->txfifo--; 958 } 959 960 if (spi_imx->count) { 961 spi_imx_push(spi_imx); 962 return IRQ_HANDLED; 963 } 964 965 if (spi_imx->txfifo) { 966 /* No data left to push, but still waiting for rx data, 967 * enable receive data available interrupt. 968 */ 969 spi_imx->devtype_data->intctrl( 970 spi_imx, MXC_INT_RR); 971 return IRQ_HANDLED; 972 } 973 974 spi_imx->devtype_data->intctrl(spi_imx, 0); 975 complete(&spi_imx->xfer_done); 976 977 return IRQ_HANDLED; 978 } 979 980 static int spi_imx_dma_configure(struct spi_master *master) 981 { 982 int ret; 983 enum dma_slave_buswidth buswidth; 984 struct dma_slave_config rx = {}, tx = {}; 985 struct spi_imx_data *spi_imx = spi_master_get_devdata(master); 986 987 switch (spi_imx_bytes_per_word(spi_imx->bits_per_word)) { 988 case 4: 989 buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES; 990 break; 991 case 2: 992 buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES; 993 break; 994 case 1: 995 buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE; 996 break; 997 default: 998 return -EINVAL; 999 } 1000 1001 tx.direction = DMA_MEM_TO_DEV; 1002 tx.dst_addr = spi_imx->base_phys + MXC_CSPITXDATA; 1003 tx.dst_addr_width = buswidth; 1004 tx.dst_maxburst = spi_imx->wml; 1005 ret = dmaengine_slave_config(master->dma_tx, &tx); 1006 if (ret) { 1007 dev_err(spi_imx->dev, "TX dma configuration failed with %d\n", ret); 1008 return ret; 1009 } 1010 1011 rx.direction = DMA_DEV_TO_MEM; 1012 rx.src_addr = spi_imx->base_phys + MXC_CSPIRXDATA; 1013 rx.src_addr_width = buswidth; 1014 rx.src_maxburst = spi_imx->wml; 1015 ret = dmaengine_slave_config(master->dma_rx, &rx); 1016 if (ret) { 1017 dev_err(spi_imx->dev, "RX dma configuration failed with %d\n", ret); 1018 return ret; 1019 } 1020 1021 return 0; 1022 } 1023 1024 static int spi_imx_setupxfer(struct spi_device *spi, 1025 struct spi_transfer *t) 1026 { 1027 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); 1028 int ret; 1029 1030 if (!t) 1031 return 0; 1032 1033 spi_imx->bits_per_word = t->bits_per_word; 1034 spi_imx->speed_hz = t->speed_hz; 1035 1036 /* Initialize the functions for transfer */ 1037 if (spi_imx->devtype_data->dynamic_burst) { 1038 u32 mask; 1039 1040 spi_imx->dynamic_burst = 0; 1041 spi_imx->remainder = 0; 1042 spi_imx->read_u32 = 1; 1043 1044 mask = (1 << spi_imx->bits_per_word) - 1; 1045 spi_imx->rx = spi_imx_buf_rx_swap; 1046 spi_imx->tx = spi_imx_buf_tx_swap; 1047 spi_imx->dynamic_burst = 1; 1048 spi_imx->remainder = t->len; 1049 1050 if (spi_imx->bits_per_word <= 8) 1051 spi_imx->word_mask = mask << 24 | mask << 16 1052 | mask << 8 | mask; 1053 else if (spi_imx->bits_per_word <= 16) 1054 spi_imx->word_mask = mask << 16 | mask; 1055 else 1056 spi_imx->word_mask = mask; 1057 } else { 1058 if (spi_imx->bits_per_word <= 8) { 1059 spi_imx->rx = spi_imx_buf_rx_u8; 1060 spi_imx->tx = spi_imx_buf_tx_u8; 1061 } else if (spi_imx->bits_per_word <= 16) { 1062 spi_imx->rx = spi_imx_buf_rx_u16; 1063 spi_imx->tx = spi_imx_buf_tx_u16; 1064 } else { 1065 spi_imx->rx = spi_imx_buf_rx_u32; 1066 spi_imx->tx = spi_imx_buf_tx_u32; 1067 } 1068 } 1069 1070 if (spi_imx_can_dma(spi_imx->bitbang.master, spi, t)) 1071 spi_imx->usedma = 1; 1072 else 1073 spi_imx->usedma = 0; 1074 1075 if (spi_imx->usedma) { 1076 ret = spi_imx_dma_configure(spi->master); 1077 if (ret) 1078 return ret; 1079 } 1080 1081 spi_imx->devtype_data->config(spi); 1082 1083 return 0; 1084 } 1085 1086 static void spi_imx_sdma_exit(struct spi_imx_data *spi_imx) 1087 { 1088 struct spi_master *master = spi_imx->bitbang.master; 1089 1090 if (master->dma_rx) { 1091 dma_release_channel(master->dma_rx); 1092 master->dma_rx = NULL; 1093 } 1094 1095 if (master->dma_tx) { 1096 dma_release_channel(master->dma_tx); 1097 master->dma_tx = NULL; 1098 } 1099 } 1100 1101 static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx, 1102 struct spi_master *master) 1103 { 1104 int ret; 1105 1106 /* use pio mode for i.mx6dl chip TKT238285 */ 1107 if (of_machine_is_compatible("fsl,imx6dl")) 1108 return 0; 1109 1110 spi_imx->wml = spi_imx->devtype_data->fifo_size / 2; 1111 1112 /* Prepare for TX DMA: */ 1113 master->dma_tx = dma_request_slave_channel_reason(dev, "tx"); 1114 if (IS_ERR(master->dma_tx)) { 1115 ret = PTR_ERR(master->dma_tx); 1116 dev_dbg(dev, "can't get the TX DMA channel, error %d!\n", ret); 1117 master->dma_tx = NULL; 1118 goto err; 1119 } 1120 1121 /* Prepare for RX : */ 1122 master->dma_rx = dma_request_slave_channel_reason(dev, "rx"); 1123 if (IS_ERR(master->dma_rx)) { 1124 ret = PTR_ERR(master->dma_rx); 1125 dev_dbg(dev, "can't get the RX DMA channel, error %d\n", ret); 1126 master->dma_rx = NULL; 1127 goto err; 1128 } 1129 1130 init_completion(&spi_imx->dma_rx_completion); 1131 init_completion(&spi_imx->dma_tx_completion); 1132 master->can_dma = spi_imx_can_dma; 1133 master->max_dma_len = MAX_SDMA_BD_BYTES; 1134 spi_imx->bitbang.master->flags = SPI_MASTER_MUST_RX | 1135 SPI_MASTER_MUST_TX; 1136 1137 return 0; 1138 err: 1139 spi_imx_sdma_exit(spi_imx); 1140 return ret; 1141 } 1142 1143 static void spi_imx_dma_rx_callback(void *cookie) 1144 { 1145 struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie; 1146 1147 complete(&spi_imx->dma_rx_completion); 1148 } 1149 1150 static void spi_imx_dma_tx_callback(void *cookie) 1151 { 1152 struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie; 1153 1154 complete(&spi_imx->dma_tx_completion); 1155 } 1156 1157 static int spi_imx_calculate_timeout(struct spi_imx_data *spi_imx, int size) 1158 { 1159 unsigned long timeout = 0; 1160 1161 /* Time with actual data transfer and CS change delay related to HW */ 1162 timeout = (8 + 4) * size / spi_imx->spi_bus_clk; 1163 1164 /* Add extra second for scheduler related activities */ 1165 timeout += 1; 1166 1167 /* Double calculated timeout */ 1168 return msecs_to_jiffies(2 * timeout * MSEC_PER_SEC); 1169 } 1170 1171 static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx, 1172 struct spi_transfer *transfer) 1173 { 1174 struct dma_async_tx_descriptor *desc_tx, *desc_rx; 1175 unsigned long transfer_timeout; 1176 unsigned long timeout; 1177 struct spi_master *master = spi_imx->bitbang.master; 1178 struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg; 1179 1180 /* 1181 * The TX DMA setup starts the transfer, so make sure RX is configured 1182 * before TX. 1183 */ 1184 desc_rx = dmaengine_prep_slave_sg(master->dma_rx, 1185 rx->sgl, rx->nents, DMA_DEV_TO_MEM, 1186 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1187 if (!desc_rx) 1188 return -EINVAL; 1189 1190 desc_rx->callback = spi_imx_dma_rx_callback; 1191 desc_rx->callback_param = (void *)spi_imx; 1192 dmaengine_submit(desc_rx); 1193 reinit_completion(&spi_imx->dma_rx_completion); 1194 dma_async_issue_pending(master->dma_rx); 1195 1196 desc_tx = dmaengine_prep_slave_sg(master->dma_tx, 1197 tx->sgl, tx->nents, DMA_MEM_TO_DEV, 1198 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1199 if (!desc_tx) { 1200 dmaengine_terminate_all(master->dma_tx); 1201 return -EINVAL; 1202 } 1203 1204 desc_tx->callback = spi_imx_dma_tx_callback; 1205 desc_tx->callback_param = (void *)spi_imx; 1206 dmaengine_submit(desc_tx); 1207 reinit_completion(&spi_imx->dma_tx_completion); 1208 dma_async_issue_pending(master->dma_tx); 1209 1210 transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len); 1211 1212 /* Wait SDMA to finish the data transfer.*/ 1213 timeout = wait_for_completion_timeout(&spi_imx->dma_tx_completion, 1214 transfer_timeout); 1215 if (!timeout) { 1216 dev_err(spi_imx->dev, "I/O Error in DMA TX\n"); 1217 dmaengine_terminate_all(master->dma_tx); 1218 dmaengine_terminate_all(master->dma_rx); 1219 return -ETIMEDOUT; 1220 } 1221 1222 timeout = wait_for_completion_timeout(&spi_imx->dma_rx_completion, 1223 transfer_timeout); 1224 if (!timeout) { 1225 dev_err(&master->dev, "I/O Error in DMA RX\n"); 1226 spi_imx->devtype_data->reset(spi_imx); 1227 dmaengine_terminate_all(master->dma_rx); 1228 return -ETIMEDOUT; 1229 } 1230 1231 return transfer->len; 1232 } 1233 1234 static int spi_imx_pio_transfer(struct spi_device *spi, 1235 struct spi_transfer *transfer) 1236 { 1237 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); 1238 unsigned long transfer_timeout; 1239 unsigned long timeout; 1240 1241 spi_imx->tx_buf = transfer->tx_buf; 1242 spi_imx->rx_buf = transfer->rx_buf; 1243 spi_imx->count = transfer->len; 1244 spi_imx->txfifo = 0; 1245 1246 reinit_completion(&spi_imx->xfer_done); 1247 1248 spi_imx_push(spi_imx); 1249 1250 spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE); 1251 1252 transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len); 1253 1254 timeout = wait_for_completion_timeout(&spi_imx->xfer_done, 1255 transfer_timeout); 1256 if (!timeout) { 1257 dev_err(&spi->dev, "I/O Error in PIO\n"); 1258 spi_imx->devtype_data->reset(spi_imx); 1259 return -ETIMEDOUT; 1260 } 1261 1262 return transfer->len; 1263 } 1264 1265 static int spi_imx_transfer(struct spi_device *spi, 1266 struct spi_transfer *transfer) 1267 { 1268 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); 1269 1270 if (spi_imx->usedma) 1271 return spi_imx_dma_transfer(spi_imx, transfer); 1272 else 1273 return spi_imx_pio_transfer(spi, transfer); 1274 } 1275 1276 static int spi_imx_setup(struct spi_device *spi) 1277 { 1278 dev_dbg(&spi->dev, "%s: mode %d, %u bpw, %d hz\n", __func__, 1279 spi->mode, spi->bits_per_word, spi->max_speed_hz); 1280 1281 if (spi->mode & SPI_NO_CS) 1282 return 0; 1283 1284 if (gpio_is_valid(spi->cs_gpio)) 1285 gpio_direction_output(spi->cs_gpio, 1286 spi->mode & SPI_CS_HIGH ? 0 : 1); 1287 1288 spi_imx_chipselect(spi, BITBANG_CS_INACTIVE); 1289 1290 return 0; 1291 } 1292 1293 static void spi_imx_cleanup(struct spi_device *spi) 1294 { 1295 } 1296 1297 static int 1298 spi_imx_prepare_message(struct spi_master *master, struct spi_message *msg) 1299 { 1300 struct spi_imx_data *spi_imx = spi_master_get_devdata(master); 1301 int ret; 1302 1303 ret = clk_enable(spi_imx->clk_per); 1304 if (ret) 1305 return ret; 1306 1307 ret = clk_enable(spi_imx->clk_ipg); 1308 if (ret) { 1309 clk_disable(spi_imx->clk_per); 1310 return ret; 1311 } 1312 1313 return 0; 1314 } 1315 1316 static int 1317 spi_imx_unprepare_message(struct spi_master *master, struct spi_message *msg) 1318 { 1319 struct spi_imx_data *spi_imx = spi_master_get_devdata(master); 1320 1321 clk_disable(spi_imx->clk_ipg); 1322 clk_disable(spi_imx->clk_per); 1323 return 0; 1324 } 1325 1326 static int spi_imx_probe(struct platform_device *pdev) 1327 { 1328 struct device_node *np = pdev->dev.of_node; 1329 const struct of_device_id *of_id = 1330 of_match_device(spi_imx_dt_ids, &pdev->dev); 1331 struct spi_imx_master *mxc_platform_info = 1332 dev_get_platdata(&pdev->dev); 1333 struct spi_master *master; 1334 struct spi_imx_data *spi_imx; 1335 struct resource *res; 1336 int i, ret, irq, spi_drctl; 1337 1338 if (!np && !mxc_platform_info) { 1339 dev_err(&pdev->dev, "can't get the platform data\n"); 1340 return -EINVAL; 1341 } 1342 1343 master = spi_alloc_master(&pdev->dev, sizeof(struct spi_imx_data)); 1344 if (!master) 1345 return -ENOMEM; 1346 1347 ret = of_property_read_u32(np, "fsl,spi-rdy-drctl", &spi_drctl); 1348 if ((ret < 0) || (spi_drctl >= 0x3)) { 1349 /* '11' is reserved */ 1350 spi_drctl = 0; 1351 } 1352 1353 platform_set_drvdata(pdev, master); 1354 1355 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32); 1356 master->bus_num = np ? -1 : pdev->id; 1357 1358 spi_imx = spi_master_get_devdata(master); 1359 spi_imx->bitbang.master = master; 1360 spi_imx->dev = &pdev->dev; 1361 1362 spi_imx->devtype_data = of_id ? of_id->data : 1363 (struct spi_imx_devtype_data *)pdev->id_entry->driver_data; 1364 1365 if (mxc_platform_info) { 1366 master->num_chipselect = mxc_platform_info->num_chipselect; 1367 master->cs_gpios = devm_kzalloc(&master->dev, 1368 sizeof(int) * master->num_chipselect, GFP_KERNEL); 1369 if (!master->cs_gpios) 1370 return -ENOMEM; 1371 1372 for (i = 0; i < master->num_chipselect; i++) 1373 master->cs_gpios[i] = mxc_platform_info->chipselect[i]; 1374 } 1375 1376 spi_imx->bitbang.chipselect = spi_imx_chipselect; 1377 spi_imx->bitbang.setup_transfer = spi_imx_setupxfer; 1378 spi_imx->bitbang.txrx_bufs = spi_imx_transfer; 1379 spi_imx->bitbang.master->setup = spi_imx_setup; 1380 spi_imx->bitbang.master->cleanup = spi_imx_cleanup; 1381 spi_imx->bitbang.master->prepare_message = spi_imx_prepare_message; 1382 spi_imx->bitbang.master->unprepare_message = spi_imx_unprepare_message; 1383 spi_imx->bitbang.master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \ 1384 | SPI_NO_CS; 1385 if (is_imx35_cspi(spi_imx) || is_imx51_ecspi(spi_imx) || 1386 is_imx53_ecspi(spi_imx)) 1387 spi_imx->bitbang.master->mode_bits |= SPI_LOOP | SPI_READY; 1388 1389 spi_imx->spi_drctl = spi_drctl; 1390 1391 init_completion(&spi_imx->xfer_done); 1392 1393 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1394 spi_imx->base = devm_ioremap_resource(&pdev->dev, res); 1395 if (IS_ERR(spi_imx->base)) { 1396 ret = PTR_ERR(spi_imx->base); 1397 goto out_master_put; 1398 } 1399 spi_imx->base_phys = res->start; 1400 1401 irq = platform_get_irq(pdev, 0); 1402 if (irq < 0) { 1403 ret = irq; 1404 goto out_master_put; 1405 } 1406 1407 ret = devm_request_irq(&pdev->dev, irq, spi_imx_isr, 0, 1408 dev_name(&pdev->dev), spi_imx); 1409 if (ret) { 1410 dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret); 1411 goto out_master_put; 1412 } 1413 1414 spi_imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); 1415 if (IS_ERR(spi_imx->clk_ipg)) { 1416 ret = PTR_ERR(spi_imx->clk_ipg); 1417 goto out_master_put; 1418 } 1419 1420 spi_imx->clk_per = devm_clk_get(&pdev->dev, "per"); 1421 if (IS_ERR(spi_imx->clk_per)) { 1422 ret = PTR_ERR(spi_imx->clk_per); 1423 goto out_master_put; 1424 } 1425 1426 ret = clk_prepare_enable(spi_imx->clk_per); 1427 if (ret) 1428 goto out_master_put; 1429 1430 ret = clk_prepare_enable(spi_imx->clk_ipg); 1431 if (ret) 1432 goto out_put_per; 1433 1434 spi_imx->spi_clk = clk_get_rate(spi_imx->clk_per); 1435 /* 1436 * Only validated on i.mx35 and i.mx6 now, can remove the constraint 1437 * if validated on other chips. 1438 */ 1439 if (spi_imx->devtype_data->has_dmamode) { 1440 ret = spi_imx_sdma_init(&pdev->dev, spi_imx, master); 1441 if (ret == -EPROBE_DEFER) 1442 goto out_clk_put; 1443 1444 if (ret < 0) 1445 dev_err(&pdev->dev, "dma setup error %d, use pio\n", 1446 ret); 1447 } 1448 1449 spi_imx->devtype_data->reset(spi_imx); 1450 1451 spi_imx->devtype_data->intctrl(spi_imx, 0); 1452 1453 master->dev.of_node = pdev->dev.of_node; 1454 ret = spi_bitbang_start(&spi_imx->bitbang); 1455 if (ret) { 1456 dev_err(&pdev->dev, "bitbang start failed with %d\n", ret); 1457 goto out_clk_put; 1458 } 1459 1460 if (!master->cs_gpios) { 1461 dev_err(&pdev->dev, "No CS GPIOs available\n"); 1462 ret = -EINVAL; 1463 goto out_clk_put; 1464 } 1465 1466 for (i = 0; i < master->num_chipselect; i++) { 1467 if (!gpio_is_valid(master->cs_gpios[i])) 1468 continue; 1469 1470 ret = devm_gpio_request(&pdev->dev, master->cs_gpios[i], 1471 DRIVER_NAME); 1472 if (ret) { 1473 dev_err(&pdev->dev, "Can't get CS GPIO %i\n", 1474 master->cs_gpios[i]); 1475 goto out_clk_put; 1476 } 1477 } 1478 1479 dev_info(&pdev->dev, "probed\n"); 1480 1481 clk_disable(spi_imx->clk_ipg); 1482 clk_disable(spi_imx->clk_per); 1483 return ret; 1484 1485 out_clk_put: 1486 clk_disable_unprepare(spi_imx->clk_ipg); 1487 out_put_per: 1488 clk_disable_unprepare(spi_imx->clk_per); 1489 out_master_put: 1490 spi_master_put(master); 1491 1492 return ret; 1493 } 1494 1495 static int spi_imx_remove(struct platform_device *pdev) 1496 { 1497 struct spi_master *master = platform_get_drvdata(pdev); 1498 struct spi_imx_data *spi_imx = spi_master_get_devdata(master); 1499 1500 spi_bitbang_stop(&spi_imx->bitbang); 1501 1502 writel(0, spi_imx->base + MXC_CSPICTRL); 1503 clk_unprepare(spi_imx->clk_ipg); 1504 clk_unprepare(spi_imx->clk_per); 1505 spi_imx_sdma_exit(spi_imx); 1506 spi_master_put(master); 1507 1508 return 0; 1509 } 1510 1511 static struct platform_driver spi_imx_driver = { 1512 .driver = { 1513 .name = DRIVER_NAME, 1514 .of_match_table = spi_imx_dt_ids, 1515 }, 1516 .id_table = spi_imx_devtype, 1517 .probe = spi_imx_probe, 1518 .remove = spi_imx_remove, 1519 }; 1520 module_platform_driver(spi_imx_driver); 1521 1522 MODULE_DESCRIPTION("SPI Master Controller driver"); 1523 MODULE_AUTHOR("Sascha Hauer, Pengutronix"); 1524 MODULE_LICENSE("GPL"); 1525 MODULE_ALIAS("platform:" DRIVER_NAME); 1526