1 /* 2 * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved. 3 * Copyright (C) 2008 Juergen Beisert 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License 7 * as published by the Free Software Foundation; either version 2 8 * of the License, or (at your option) any later version. 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the 16 * Free Software Foundation 17 * 51 Franklin Street, Fifth Floor 18 * Boston, MA 02110-1301, USA. 19 */ 20 21 #include <linux/clk.h> 22 #include <linux/completion.h> 23 #include <linux/delay.h> 24 #include <linux/dmaengine.h> 25 #include <linux/dma-mapping.h> 26 #include <linux/err.h> 27 #include <linux/gpio.h> 28 #include <linux/interrupt.h> 29 #include <linux/io.h> 30 #include <linux/irq.h> 31 #include <linux/kernel.h> 32 #include <linux/module.h> 33 #include <linux/platform_device.h> 34 #include <linux/slab.h> 35 #include <linux/spi/spi.h> 36 #include <linux/spi/spi_bitbang.h> 37 #include <linux/types.h> 38 #include <linux/of.h> 39 #include <linux/of_device.h> 40 #include <linux/of_gpio.h> 41 42 #include <linux/platform_data/dma-imx.h> 43 #include <linux/platform_data/spi-imx.h> 44 45 #define DRIVER_NAME "spi_imx" 46 47 #define MXC_CSPIRXDATA 0x00 48 #define MXC_CSPITXDATA 0x04 49 #define MXC_CSPICTRL 0x08 50 #define MXC_CSPIINT 0x0c 51 #define MXC_RESET 0x1c 52 53 /* generic defines to abstract from the different register layouts */ 54 #define MXC_INT_RR (1 << 0) /* Receive data ready interrupt */ 55 #define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */ 56 57 /* The maximum bytes that a sdma BD can transfer.*/ 58 #define MAX_SDMA_BD_BYTES (1 << 15) 59 60 enum spi_imx_devtype { 61 IMX1_CSPI, 62 IMX21_CSPI, 63 IMX27_CSPI, 64 IMX31_CSPI, 65 IMX35_CSPI, /* CSPI on all i.mx except above */ 66 IMX51_ECSPI, /* ECSPI on i.mx51 and later */ 67 }; 68 69 struct spi_imx_data; 70 71 struct spi_imx_devtype_data { 72 void (*intctrl)(struct spi_imx_data *, int); 73 int (*config)(struct spi_device *); 74 void (*trigger)(struct spi_imx_data *); 75 int (*rx_available)(struct spi_imx_data *); 76 void (*reset)(struct spi_imx_data *); 77 enum spi_imx_devtype devtype; 78 }; 79 80 struct spi_imx_data { 81 struct spi_bitbang bitbang; 82 struct device *dev; 83 84 struct completion xfer_done; 85 void __iomem *base; 86 unsigned long base_phys; 87 88 struct clk *clk_per; 89 struct clk *clk_ipg; 90 unsigned long spi_clk; 91 unsigned int spi_bus_clk; 92 93 unsigned int speed_hz; 94 unsigned int bits_per_word; 95 unsigned int spi_drctl; 96 97 unsigned int count; 98 void (*tx)(struct spi_imx_data *); 99 void (*rx)(struct spi_imx_data *); 100 void *rx_buf; 101 const void *tx_buf; 102 unsigned int txfifo; /* number of words pushed in tx FIFO */ 103 104 /* DMA */ 105 bool usedma; 106 u32 wml; 107 struct completion dma_rx_completion; 108 struct completion dma_tx_completion; 109 110 const struct spi_imx_devtype_data *devtype_data; 111 }; 112 113 static inline int is_imx27_cspi(struct spi_imx_data *d) 114 { 115 return d->devtype_data->devtype == IMX27_CSPI; 116 } 117 118 static inline int is_imx35_cspi(struct spi_imx_data *d) 119 { 120 return d->devtype_data->devtype == IMX35_CSPI; 121 } 122 123 static inline int is_imx51_ecspi(struct spi_imx_data *d) 124 { 125 return d->devtype_data->devtype == IMX51_ECSPI; 126 } 127 128 static inline unsigned spi_imx_get_fifosize(struct spi_imx_data *d) 129 { 130 return is_imx51_ecspi(d) ? 64 : 8; 131 } 132 133 #define MXC_SPI_BUF_RX(type) \ 134 static void spi_imx_buf_rx_##type(struct spi_imx_data *spi_imx) \ 135 { \ 136 unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA); \ 137 \ 138 if (spi_imx->rx_buf) { \ 139 *(type *)spi_imx->rx_buf = val; \ 140 spi_imx->rx_buf += sizeof(type); \ 141 } \ 142 } 143 144 #define MXC_SPI_BUF_TX(type) \ 145 static void spi_imx_buf_tx_##type(struct spi_imx_data *spi_imx) \ 146 { \ 147 type val = 0; \ 148 \ 149 if (spi_imx->tx_buf) { \ 150 val = *(type *)spi_imx->tx_buf; \ 151 spi_imx->tx_buf += sizeof(type); \ 152 } \ 153 \ 154 spi_imx->count -= sizeof(type); \ 155 \ 156 writel(val, spi_imx->base + MXC_CSPITXDATA); \ 157 } 158 159 MXC_SPI_BUF_RX(u8) 160 MXC_SPI_BUF_TX(u8) 161 MXC_SPI_BUF_RX(u16) 162 MXC_SPI_BUF_TX(u16) 163 MXC_SPI_BUF_RX(u32) 164 MXC_SPI_BUF_TX(u32) 165 166 /* First entry is reserved, second entry is valid only if SDHC_SPIEN is set 167 * (which is currently not the case in this driver) 168 */ 169 static int mxc_clkdivs[] = {0, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192, 170 256, 384, 512, 768, 1024}; 171 172 /* MX21, MX27 */ 173 static unsigned int spi_imx_clkdiv_1(unsigned int fin, 174 unsigned int fspi, unsigned int max, unsigned int *fres) 175 { 176 int i; 177 178 for (i = 2; i < max; i++) 179 if (fspi * mxc_clkdivs[i] >= fin) 180 break; 181 182 *fres = fin / mxc_clkdivs[i]; 183 return i; 184 } 185 186 /* MX1, MX31, MX35, MX51 CSPI */ 187 static unsigned int spi_imx_clkdiv_2(unsigned int fin, 188 unsigned int fspi, unsigned int *fres) 189 { 190 int i, div = 4; 191 192 for (i = 0; i < 7; i++) { 193 if (fspi * div >= fin) 194 goto out; 195 div <<= 1; 196 } 197 198 out: 199 *fres = fin / div; 200 return i; 201 } 202 203 static int spi_imx_bytes_per_word(const int bits_per_word) 204 { 205 return DIV_ROUND_UP(bits_per_word, BITS_PER_BYTE); 206 } 207 208 static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi, 209 struct spi_transfer *transfer) 210 { 211 struct spi_imx_data *spi_imx = spi_master_get_devdata(master); 212 unsigned int bytes_per_word, i; 213 214 if (!master->dma_rx) 215 return false; 216 217 bytes_per_word = spi_imx_bytes_per_word(transfer->bits_per_word); 218 219 if (bytes_per_word != 1 && bytes_per_word != 2 && bytes_per_word != 4) 220 return false; 221 222 for (i = spi_imx_get_fifosize(spi_imx) / 2; i > 0; i--) { 223 if (!(transfer->len % (i * bytes_per_word))) 224 break; 225 } 226 227 if (i == 0) 228 return false; 229 230 spi_imx->wml = i; 231 232 return true; 233 } 234 235 #define MX51_ECSPI_CTRL 0x08 236 #define MX51_ECSPI_CTRL_ENABLE (1 << 0) 237 #define MX51_ECSPI_CTRL_XCH (1 << 2) 238 #define MX51_ECSPI_CTRL_SMC (1 << 3) 239 #define MX51_ECSPI_CTRL_MODE_MASK (0xf << 4) 240 #define MX51_ECSPI_CTRL_DRCTL(drctl) ((drctl) << 16) 241 #define MX51_ECSPI_CTRL_POSTDIV_OFFSET 8 242 #define MX51_ECSPI_CTRL_PREDIV_OFFSET 12 243 #define MX51_ECSPI_CTRL_CS(cs) ((cs) << 18) 244 #define MX51_ECSPI_CTRL_BL_OFFSET 20 245 246 #define MX51_ECSPI_CONFIG 0x0c 247 #define MX51_ECSPI_CONFIG_SCLKPHA(cs) (1 << ((cs) + 0)) 248 #define MX51_ECSPI_CONFIG_SCLKPOL(cs) (1 << ((cs) + 4)) 249 #define MX51_ECSPI_CONFIG_SBBCTRL(cs) (1 << ((cs) + 8)) 250 #define MX51_ECSPI_CONFIG_SSBPOL(cs) (1 << ((cs) + 12)) 251 #define MX51_ECSPI_CONFIG_SCLKCTL(cs) (1 << ((cs) + 20)) 252 253 #define MX51_ECSPI_INT 0x10 254 #define MX51_ECSPI_INT_TEEN (1 << 0) 255 #define MX51_ECSPI_INT_RREN (1 << 3) 256 257 #define MX51_ECSPI_DMA 0x14 258 #define MX51_ECSPI_DMA_TX_WML(wml) ((wml) & 0x3f) 259 #define MX51_ECSPI_DMA_RX_WML(wml) (((wml) & 0x3f) << 16) 260 #define MX51_ECSPI_DMA_RXT_WML(wml) (((wml) & 0x3f) << 24) 261 262 #define MX51_ECSPI_DMA_TEDEN (1 << 7) 263 #define MX51_ECSPI_DMA_RXDEN (1 << 23) 264 #define MX51_ECSPI_DMA_RXTDEN (1 << 31) 265 266 #define MX51_ECSPI_STAT 0x18 267 #define MX51_ECSPI_STAT_RR (1 << 3) 268 269 #define MX51_ECSPI_TESTREG 0x20 270 #define MX51_ECSPI_TESTREG_LBC BIT(31) 271 272 /* MX51 eCSPI */ 273 static unsigned int mx51_ecspi_clkdiv(struct spi_imx_data *spi_imx, 274 unsigned int fspi, unsigned int *fres) 275 { 276 /* 277 * there are two 4-bit dividers, the pre-divider divides by 278 * $pre, the post-divider by 2^$post 279 */ 280 unsigned int pre, post; 281 unsigned int fin = spi_imx->spi_clk; 282 283 if (unlikely(fspi > fin)) 284 return 0; 285 286 post = fls(fin) - fls(fspi); 287 if (fin > fspi << post) 288 post++; 289 290 /* now we have: (fin <= fspi << post) with post being minimal */ 291 292 post = max(4U, post) - 4; 293 if (unlikely(post > 0xf)) { 294 dev_err(spi_imx->dev, "cannot set clock freq: %u (base freq: %u)\n", 295 fspi, fin); 296 return 0xff; 297 } 298 299 pre = DIV_ROUND_UP(fin, fspi << post) - 1; 300 301 dev_dbg(spi_imx->dev, "%s: fin: %u, fspi: %u, post: %u, pre: %u\n", 302 __func__, fin, fspi, post, pre); 303 304 /* Resulting frequency for the SCLK line. */ 305 *fres = (fin / (pre + 1)) >> post; 306 307 return (pre << MX51_ECSPI_CTRL_PREDIV_OFFSET) | 308 (post << MX51_ECSPI_CTRL_POSTDIV_OFFSET); 309 } 310 311 static void mx51_ecspi_intctrl(struct spi_imx_data *spi_imx, int enable) 312 { 313 unsigned val = 0; 314 315 if (enable & MXC_INT_TE) 316 val |= MX51_ECSPI_INT_TEEN; 317 318 if (enable & MXC_INT_RR) 319 val |= MX51_ECSPI_INT_RREN; 320 321 writel(val, spi_imx->base + MX51_ECSPI_INT); 322 } 323 324 static void mx51_ecspi_trigger(struct spi_imx_data *spi_imx) 325 { 326 u32 reg; 327 328 reg = readl(spi_imx->base + MX51_ECSPI_CTRL); 329 reg |= MX51_ECSPI_CTRL_XCH; 330 writel(reg, spi_imx->base + MX51_ECSPI_CTRL); 331 } 332 333 static int mx51_ecspi_config(struct spi_device *spi) 334 { 335 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); 336 u32 ctrl = MX51_ECSPI_CTRL_ENABLE; 337 u32 clk = spi_imx->speed_hz, delay, reg; 338 u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG); 339 340 /* 341 * The hardware seems to have a race condition when changing modes. The 342 * current assumption is that the selection of the channel arrives 343 * earlier in the hardware than the mode bits when they are written at 344 * the same time. 345 * So set master mode for all channels as we do not support slave mode. 346 */ 347 ctrl |= MX51_ECSPI_CTRL_MODE_MASK; 348 349 /* 350 * Enable SPI_RDY handling (falling edge/level triggered). 351 */ 352 if (spi->mode & SPI_READY) 353 ctrl |= MX51_ECSPI_CTRL_DRCTL(spi_imx->spi_drctl); 354 355 /* set clock speed */ 356 ctrl |= mx51_ecspi_clkdiv(spi_imx, spi_imx->speed_hz, &clk); 357 spi_imx->spi_bus_clk = clk; 358 359 /* set chip select to use */ 360 ctrl |= MX51_ECSPI_CTRL_CS(spi->chip_select); 361 362 ctrl |= (spi_imx->bits_per_word - 1) << MX51_ECSPI_CTRL_BL_OFFSET; 363 364 cfg |= MX51_ECSPI_CONFIG_SBBCTRL(spi->chip_select); 365 366 if (spi->mode & SPI_CPHA) 367 cfg |= MX51_ECSPI_CONFIG_SCLKPHA(spi->chip_select); 368 else 369 cfg &= ~MX51_ECSPI_CONFIG_SCLKPHA(spi->chip_select); 370 371 if (spi->mode & SPI_CPOL) { 372 cfg |= MX51_ECSPI_CONFIG_SCLKPOL(spi->chip_select); 373 cfg |= MX51_ECSPI_CONFIG_SCLKCTL(spi->chip_select); 374 } else { 375 cfg &= ~MX51_ECSPI_CONFIG_SCLKPOL(spi->chip_select); 376 cfg &= ~MX51_ECSPI_CONFIG_SCLKCTL(spi->chip_select); 377 } 378 if (spi->mode & SPI_CS_HIGH) 379 cfg |= MX51_ECSPI_CONFIG_SSBPOL(spi->chip_select); 380 else 381 cfg &= ~MX51_ECSPI_CONFIG_SSBPOL(spi->chip_select); 382 383 if (spi_imx->usedma) 384 ctrl |= MX51_ECSPI_CTRL_SMC; 385 386 /* CTRL register always go first to bring out controller from reset */ 387 writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL); 388 389 reg = readl(spi_imx->base + MX51_ECSPI_TESTREG); 390 if (spi->mode & SPI_LOOP) 391 reg |= MX51_ECSPI_TESTREG_LBC; 392 else 393 reg &= ~MX51_ECSPI_TESTREG_LBC; 394 writel(reg, spi_imx->base + MX51_ECSPI_TESTREG); 395 396 writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG); 397 398 /* 399 * Wait until the changes in the configuration register CONFIGREG 400 * propagate into the hardware. It takes exactly one tick of the 401 * SCLK clock, but we will wait two SCLK clock just to be sure. The 402 * effect of the delay it takes for the hardware to apply changes 403 * is noticable if the SCLK clock run very slow. In such a case, if 404 * the polarity of SCLK should be inverted, the GPIO ChipSelect might 405 * be asserted before the SCLK polarity changes, which would disrupt 406 * the SPI communication as the device on the other end would consider 407 * the change of SCLK polarity as a clock tick already. 408 */ 409 delay = (2 * 1000000) / clk; 410 if (likely(delay < 10)) /* SCLK is faster than 100 kHz */ 411 udelay(delay); 412 else /* SCLK is _very_ slow */ 413 usleep_range(delay, delay + 10); 414 415 /* 416 * Configure the DMA register: setup the watermark 417 * and enable DMA request. 418 */ 419 420 writel(MX51_ECSPI_DMA_RX_WML(spi_imx->wml) | 421 MX51_ECSPI_DMA_TX_WML(spi_imx->wml) | 422 MX51_ECSPI_DMA_RXT_WML(spi_imx->wml) | 423 MX51_ECSPI_DMA_TEDEN | MX51_ECSPI_DMA_RXDEN | 424 MX51_ECSPI_DMA_RXTDEN, spi_imx->base + MX51_ECSPI_DMA); 425 426 return 0; 427 } 428 429 static int mx51_ecspi_rx_available(struct spi_imx_data *spi_imx) 430 { 431 return readl(spi_imx->base + MX51_ECSPI_STAT) & MX51_ECSPI_STAT_RR; 432 } 433 434 static void mx51_ecspi_reset(struct spi_imx_data *spi_imx) 435 { 436 /* drain receive buffer */ 437 while (mx51_ecspi_rx_available(spi_imx)) 438 readl(spi_imx->base + MXC_CSPIRXDATA); 439 } 440 441 #define MX31_INTREG_TEEN (1 << 0) 442 #define MX31_INTREG_RREN (1 << 3) 443 444 #define MX31_CSPICTRL_ENABLE (1 << 0) 445 #define MX31_CSPICTRL_MASTER (1 << 1) 446 #define MX31_CSPICTRL_XCH (1 << 2) 447 #define MX31_CSPICTRL_SMC (1 << 3) 448 #define MX31_CSPICTRL_POL (1 << 4) 449 #define MX31_CSPICTRL_PHA (1 << 5) 450 #define MX31_CSPICTRL_SSCTL (1 << 6) 451 #define MX31_CSPICTRL_SSPOL (1 << 7) 452 #define MX31_CSPICTRL_BC_SHIFT 8 453 #define MX35_CSPICTRL_BL_SHIFT 20 454 #define MX31_CSPICTRL_CS_SHIFT 24 455 #define MX35_CSPICTRL_CS_SHIFT 12 456 #define MX31_CSPICTRL_DR_SHIFT 16 457 458 #define MX31_CSPI_DMAREG 0x10 459 #define MX31_DMAREG_RH_DEN (1<<4) 460 #define MX31_DMAREG_TH_DEN (1<<1) 461 462 #define MX31_CSPISTATUS 0x14 463 #define MX31_STATUS_RR (1 << 3) 464 465 #define MX31_CSPI_TESTREG 0x1C 466 #define MX31_TEST_LBC (1 << 14) 467 468 /* These functions also work for the i.MX35, but be aware that 469 * the i.MX35 has a slightly different register layout for bits 470 * we do not use here. 471 */ 472 static void mx31_intctrl(struct spi_imx_data *spi_imx, int enable) 473 { 474 unsigned int val = 0; 475 476 if (enable & MXC_INT_TE) 477 val |= MX31_INTREG_TEEN; 478 if (enable & MXC_INT_RR) 479 val |= MX31_INTREG_RREN; 480 481 writel(val, spi_imx->base + MXC_CSPIINT); 482 } 483 484 static void mx31_trigger(struct spi_imx_data *spi_imx) 485 { 486 unsigned int reg; 487 488 reg = readl(spi_imx->base + MXC_CSPICTRL); 489 reg |= MX31_CSPICTRL_XCH; 490 writel(reg, spi_imx->base + MXC_CSPICTRL); 491 } 492 493 static int mx31_config(struct spi_device *spi) 494 { 495 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); 496 unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER; 497 unsigned int clk; 498 499 reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, spi_imx->speed_hz, &clk) << 500 MX31_CSPICTRL_DR_SHIFT; 501 spi_imx->spi_bus_clk = clk; 502 503 if (is_imx35_cspi(spi_imx)) { 504 reg |= (spi_imx->bits_per_word - 1) << MX35_CSPICTRL_BL_SHIFT; 505 reg |= MX31_CSPICTRL_SSCTL; 506 } else { 507 reg |= (spi_imx->bits_per_word - 1) << MX31_CSPICTRL_BC_SHIFT; 508 } 509 510 if (spi->mode & SPI_CPHA) 511 reg |= MX31_CSPICTRL_PHA; 512 if (spi->mode & SPI_CPOL) 513 reg |= MX31_CSPICTRL_POL; 514 if (spi->mode & SPI_CS_HIGH) 515 reg |= MX31_CSPICTRL_SSPOL; 516 if (spi->cs_gpio < 0) 517 reg |= (spi->cs_gpio + 32) << 518 (is_imx35_cspi(spi_imx) ? MX35_CSPICTRL_CS_SHIFT : 519 MX31_CSPICTRL_CS_SHIFT); 520 521 if (spi_imx->usedma) 522 reg |= MX31_CSPICTRL_SMC; 523 524 writel(reg, spi_imx->base + MXC_CSPICTRL); 525 526 reg = readl(spi_imx->base + MX31_CSPI_TESTREG); 527 if (spi->mode & SPI_LOOP) 528 reg |= MX31_TEST_LBC; 529 else 530 reg &= ~MX31_TEST_LBC; 531 writel(reg, spi_imx->base + MX31_CSPI_TESTREG); 532 533 if (spi_imx->usedma) { 534 /* configure DMA requests when RXFIFO is half full and 535 when TXFIFO is half empty */ 536 writel(MX31_DMAREG_RH_DEN | MX31_DMAREG_TH_DEN, 537 spi_imx->base + MX31_CSPI_DMAREG); 538 } 539 540 return 0; 541 } 542 543 static int mx31_rx_available(struct spi_imx_data *spi_imx) 544 { 545 return readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR; 546 } 547 548 static void mx31_reset(struct spi_imx_data *spi_imx) 549 { 550 /* drain receive buffer */ 551 while (readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR) 552 readl(spi_imx->base + MXC_CSPIRXDATA); 553 } 554 555 #define MX21_INTREG_RR (1 << 4) 556 #define MX21_INTREG_TEEN (1 << 9) 557 #define MX21_INTREG_RREN (1 << 13) 558 559 #define MX21_CSPICTRL_POL (1 << 5) 560 #define MX21_CSPICTRL_PHA (1 << 6) 561 #define MX21_CSPICTRL_SSPOL (1 << 8) 562 #define MX21_CSPICTRL_XCH (1 << 9) 563 #define MX21_CSPICTRL_ENABLE (1 << 10) 564 #define MX21_CSPICTRL_MASTER (1 << 11) 565 #define MX21_CSPICTRL_DR_SHIFT 14 566 #define MX21_CSPICTRL_CS_SHIFT 19 567 568 static void mx21_intctrl(struct spi_imx_data *spi_imx, int enable) 569 { 570 unsigned int val = 0; 571 572 if (enable & MXC_INT_TE) 573 val |= MX21_INTREG_TEEN; 574 if (enable & MXC_INT_RR) 575 val |= MX21_INTREG_RREN; 576 577 writel(val, spi_imx->base + MXC_CSPIINT); 578 } 579 580 static void mx21_trigger(struct spi_imx_data *spi_imx) 581 { 582 unsigned int reg; 583 584 reg = readl(spi_imx->base + MXC_CSPICTRL); 585 reg |= MX21_CSPICTRL_XCH; 586 writel(reg, spi_imx->base + MXC_CSPICTRL); 587 } 588 589 static int mx21_config(struct spi_device *spi) 590 { 591 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); 592 unsigned int reg = MX21_CSPICTRL_ENABLE | MX21_CSPICTRL_MASTER; 593 unsigned int max = is_imx27_cspi(spi_imx) ? 16 : 18; 594 unsigned int clk; 595 596 reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, spi_imx->speed_hz, max, &clk) 597 << MX21_CSPICTRL_DR_SHIFT; 598 spi_imx->spi_bus_clk = clk; 599 600 reg |= spi_imx->bits_per_word - 1; 601 602 if (spi->mode & SPI_CPHA) 603 reg |= MX21_CSPICTRL_PHA; 604 if (spi->mode & SPI_CPOL) 605 reg |= MX21_CSPICTRL_POL; 606 if (spi->mode & SPI_CS_HIGH) 607 reg |= MX21_CSPICTRL_SSPOL; 608 if (spi->cs_gpio < 0) 609 reg |= (spi->cs_gpio + 32) << MX21_CSPICTRL_CS_SHIFT; 610 611 writel(reg, spi_imx->base + MXC_CSPICTRL); 612 613 return 0; 614 } 615 616 static int mx21_rx_available(struct spi_imx_data *spi_imx) 617 { 618 return readl(spi_imx->base + MXC_CSPIINT) & MX21_INTREG_RR; 619 } 620 621 static void mx21_reset(struct spi_imx_data *spi_imx) 622 { 623 writel(1, spi_imx->base + MXC_RESET); 624 } 625 626 #define MX1_INTREG_RR (1 << 3) 627 #define MX1_INTREG_TEEN (1 << 8) 628 #define MX1_INTREG_RREN (1 << 11) 629 630 #define MX1_CSPICTRL_POL (1 << 4) 631 #define MX1_CSPICTRL_PHA (1 << 5) 632 #define MX1_CSPICTRL_XCH (1 << 8) 633 #define MX1_CSPICTRL_ENABLE (1 << 9) 634 #define MX1_CSPICTRL_MASTER (1 << 10) 635 #define MX1_CSPICTRL_DR_SHIFT 13 636 637 static void mx1_intctrl(struct spi_imx_data *spi_imx, int enable) 638 { 639 unsigned int val = 0; 640 641 if (enable & MXC_INT_TE) 642 val |= MX1_INTREG_TEEN; 643 if (enable & MXC_INT_RR) 644 val |= MX1_INTREG_RREN; 645 646 writel(val, spi_imx->base + MXC_CSPIINT); 647 } 648 649 static void mx1_trigger(struct spi_imx_data *spi_imx) 650 { 651 unsigned int reg; 652 653 reg = readl(spi_imx->base + MXC_CSPICTRL); 654 reg |= MX1_CSPICTRL_XCH; 655 writel(reg, spi_imx->base + MXC_CSPICTRL); 656 } 657 658 static int mx1_config(struct spi_device *spi) 659 { 660 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); 661 unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_MASTER; 662 unsigned int clk; 663 664 reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, spi_imx->speed_hz, &clk) << 665 MX1_CSPICTRL_DR_SHIFT; 666 spi_imx->spi_bus_clk = clk; 667 668 reg |= spi_imx->bits_per_word - 1; 669 670 if (spi->mode & SPI_CPHA) 671 reg |= MX1_CSPICTRL_PHA; 672 if (spi->mode & SPI_CPOL) 673 reg |= MX1_CSPICTRL_POL; 674 675 writel(reg, spi_imx->base + MXC_CSPICTRL); 676 677 return 0; 678 } 679 680 static int mx1_rx_available(struct spi_imx_data *spi_imx) 681 { 682 return readl(spi_imx->base + MXC_CSPIINT) & MX1_INTREG_RR; 683 } 684 685 static void mx1_reset(struct spi_imx_data *spi_imx) 686 { 687 writel(1, spi_imx->base + MXC_RESET); 688 } 689 690 static struct spi_imx_devtype_data imx1_cspi_devtype_data = { 691 .intctrl = mx1_intctrl, 692 .config = mx1_config, 693 .trigger = mx1_trigger, 694 .rx_available = mx1_rx_available, 695 .reset = mx1_reset, 696 .devtype = IMX1_CSPI, 697 }; 698 699 static struct spi_imx_devtype_data imx21_cspi_devtype_data = { 700 .intctrl = mx21_intctrl, 701 .config = mx21_config, 702 .trigger = mx21_trigger, 703 .rx_available = mx21_rx_available, 704 .reset = mx21_reset, 705 .devtype = IMX21_CSPI, 706 }; 707 708 static struct spi_imx_devtype_data imx27_cspi_devtype_data = { 709 /* i.mx27 cspi shares the functions with i.mx21 one */ 710 .intctrl = mx21_intctrl, 711 .config = mx21_config, 712 .trigger = mx21_trigger, 713 .rx_available = mx21_rx_available, 714 .reset = mx21_reset, 715 .devtype = IMX27_CSPI, 716 }; 717 718 static struct spi_imx_devtype_data imx31_cspi_devtype_data = { 719 .intctrl = mx31_intctrl, 720 .config = mx31_config, 721 .trigger = mx31_trigger, 722 .rx_available = mx31_rx_available, 723 .reset = mx31_reset, 724 .devtype = IMX31_CSPI, 725 }; 726 727 static struct spi_imx_devtype_data imx35_cspi_devtype_data = { 728 /* i.mx35 and later cspi shares the functions with i.mx31 one */ 729 .intctrl = mx31_intctrl, 730 .config = mx31_config, 731 .trigger = mx31_trigger, 732 .rx_available = mx31_rx_available, 733 .reset = mx31_reset, 734 .devtype = IMX35_CSPI, 735 }; 736 737 static struct spi_imx_devtype_data imx51_ecspi_devtype_data = { 738 .intctrl = mx51_ecspi_intctrl, 739 .config = mx51_ecspi_config, 740 .trigger = mx51_ecspi_trigger, 741 .rx_available = mx51_ecspi_rx_available, 742 .reset = mx51_ecspi_reset, 743 .devtype = IMX51_ECSPI, 744 }; 745 746 static const struct platform_device_id spi_imx_devtype[] = { 747 { 748 .name = "imx1-cspi", 749 .driver_data = (kernel_ulong_t) &imx1_cspi_devtype_data, 750 }, { 751 .name = "imx21-cspi", 752 .driver_data = (kernel_ulong_t) &imx21_cspi_devtype_data, 753 }, { 754 .name = "imx27-cspi", 755 .driver_data = (kernel_ulong_t) &imx27_cspi_devtype_data, 756 }, { 757 .name = "imx31-cspi", 758 .driver_data = (kernel_ulong_t) &imx31_cspi_devtype_data, 759 }, { 760 .name = "imx35-cspi", 761 .driver_data = (kernel_ulong_t) &imx35_cspi_devtype_data, 762 }, { 763 .name = "imx51-ecspi", 764 .driver_data = (kernel_ulong_t) &imx51_ecspi_devtype_data, 765 }, { 766 /* sentinel */ 767 } 768 }; 769 770 static const struct of_device_id spi_imx_dt_ids[] = { 771 { .compatible = "fsl,imx1-cspi", .data = &imx1_cspi_devtype_data, }, 772 { .compatible = "fsl,imx21-cspi", .data = &imx21_cspi_devtype_data, }, 773 { .compatible = "fsl,imx27-cspi", .data = &imx27_cspi_devtype_data, }, 774 { .compatible = "fsl,imx31-cspi", .data = &imx31_cspi_devtype_data, }, 775 { .compatible = "fsl,imx35-cspi", .data = &imx35_cspi_devtype_data, }, 776 { .compatible = "fsl,imx51-ecspi", .data = &imx51_ecspi_devtype_data, }, 777 { /* sentinel */ } 778 }; 779 MODULE_DEVICE_TABLE(of, spi_imx_dt_ids); 780 781 static void spi_imx_chipselect(struct spi_device *spi, int is_active) 782 { 783 int active = is_active != BITBANG_CS_INACTIVE; 784 int dev_is_lowactive = !(spi->mode & SPI_CS_HIGH); 785 786 if (!gpio_is_valid(spi->cs_gpio)) 787 return; 788 789 gpio_set_value(spi->cs_gpio, dev_is_lowactive ^ active); 790 } 791 792 static void spi_imx_push(struct spi_imx_data *spi_imx) 793 { 794 while (spi_imx->txfifo < spi_imx_get_fifosize(spi_imx)) { 795 if (!spi_imx->count) 796 break; 797 spi_imx->tx(spi_imx); 798 spi_imx->txfifo++; 799 } 800 801 spi_imx->devtype_data->trigger(spi_imx); 802 } 803 804 static irqreturn_t spi_imx_isr(int irq, void *dev_id) 805 { 806 struct spi_imx_data *spi_imx = dev_id; 807 808 while (spi_imx->devtype_data->rx_available(spi_imx)) { 809 spi_imx->rx(spi_imx); 810 spi_imx->txfifo--; 811 } 812 813 if (spi_imx->count) { 814 spi_imx_push(spi_imx); 815 return IRQ_HANDLED; 816 } 817 818 if (spi_imx->txfifo) { 819 /* No data left to push, but still waiting for rx data, 820 * enable receive data available interrupt. 821 */ 822 spi_imx->devtype_data->intctrl( 823 spi_imx, MXC_INT_RR); 824 return IRQ_HANDLED; 825 } 826 827 spi_imx->devtype_data->intctrl(spi_imx, 0); 828 complete(&spi_imx->xfer_done); 829 830 return IRQ_HANDLED; 831 } 832 833 static int spi_imx_dma_configure(struct spi_master *master) 834 { 835 int ret; 836 enum dma_slave_buswidth buswidth; 837 struct dma_slave_config rx = {}, tx = {}; 838 struct spi_imx_data *spi_imx = spi_master_get_devdata(master); 839 840 switch (spi_imx_bytes_per_word(spi_imx->bits_per_word)) { 841 case 4: 842 buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES; 843 break; 844 case 2: 845 buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES; 846 break; 847 case 1: 848 buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE; 849 break; 850 default: 851 return -EINVAL; 852 } 853 854 tx.direction = DMA_MEM_TO_DEV; 855 tx.dst_addr = spi_imx->base_phys + MXC_CSPITXDATA; 856 tx.dst_addr_width = buswidth; 857 tx.dst_maxburst = spi_imx->wml; 858 ret = dmaengine_slave_config(master->dma_tx, &tx); 859 if (ret) { 860 dev_err(spi_imx->dev, "TX dma configuration failed with %d\n", ret); 861 return ret; 862 } 863 864 rx.direction = DMA_DEV_TO_MEM; 865 rx.src_addr = spi_imx->base_phys + MXC_CSPIRXDATA; 866 rx.src_addr_width = buswidth; 867 rx.src_maxburst = spi_imx->wml; 868 ret = dmaengine_slave_config(master->dma_rx, &rx); 869 if (ret) { 870 dev_err(spi_imx->dev, "RX dma configuration failed with %d\n", ret); 871 return ret; 872 } 873 874 return 0; 875 } 876 877 static int spi_imx_setupxfer(struct spi_device *spi, 878 struct spi_transfer *t) 879 { 880 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); 881 int ret; 882 883 if (!t) 884 return 0; 885 886 spi_imx->bits_per_word = t->bits_per_word; 887 spi_imx->speed_hz = t->speed_hz; 888 889 /* Initialize the functions for transfer */ 890 if (spi_imx->bits_per_word <= 8) { 891 spi_imx->rx = spi_imx_buf_rx_u8; 892 spi_imx->tx = spi_imx_buf_tx_u8; 893 } else if (spi_imx->bits_per_word <= 16) { 894 spi_imx->rx = spi_imx_buf_rx_u16; 895 spi_imx->tx = spi_imx_buf_tx_u16; 896 } else { 897 spi_imx->rx = spi_imx_buf_rx_u32; 898 spi_imx->tx = spi_imx_buf_tx_u32; 899 } 900 901 if (spi_imx_can_dma(spi_imx->bitbang.master, spi, t)) 902 spi_imx->usedma = 1; 903 else 904 spi_imx->usedma = 0; 905 906 if (spi_imx->usedma) { 907 ret = spi_imx_dma_configure(spi->master); 908 if (ret) 909 return ret; 910 } 911 912 spi_imx->devtype_data->config(spi); 913 914 return 0; 915 } 916 917 static void spi_imx_sdma_exit(struct spi_imx_data *spi_imx) 918 { 919 struct spi_master *master = spi_imx->bitbang.master; 920 921 if (master->dma_rx) { 922 dma_release_channel(master->dma_rx); 923 master->dma_rx = NULL; 924 } 925 926 if (master->dma_tx) { 927 dma_release_channel(master->dma_tx); 928 master->dma_tx = NULL; 929 } 930 } 931 932 static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx, 933 struct spi_master *master) 934 { 935 int ret; 936 937 /* use pio mode for i.mx6dl chip TKT238285 */ 938 if (of_machine_is_compatible("fsl,imx6dl")) 939 return 0; 940 941 spi_imx->wml = spi_imx_get_fifosize(spi_imx) / 2; 942 943 /* Prepare for TX DMA: */ 944 master->dma_tx = dma_request_slave_channel_reason(dev, "tx"); 945 if (IS_ERR(master->dma_tx)) { 946 ret = PTR_ERR(master->dma_tx); 947 dev_dbg(dev, "can't get the TX DMA channel, error %d!\n", ret); 948 master->dma_tx = NULL; 949 goto err; 950 } 951 952 /* Prepare for RX : */ 953 master->dma_rx = dma_request_slave_channel_reason(dev, "rx"); 954 if (IS_ERR(master->dma_rx)) { 955 ret = PTR_ERR(master->dma_rx); 956 dev_dbg(dev, "can't get the RX DMA channel, error %d\n", ret); 957 master->dma_rx = NULL; 958 goto err; 959 } 960 961 init_completion(&spi_imx->dma_rx_completion); 962 init_completion(&spi_imx->dma_tx_completion); 963 master->can_dma = spi_imx_can_dma; 964 master->max_dma_len = MAX_SDMA_BD_BYTES; 965 spi_imx->bitbang.master->flags = SPI_MASTER_MUST_RX | 966 SPI_MASTER_MUST_TX; 967 968 return 0; 969 err: 970 spi_imx_sdma_exit(spi_imx); 971 return ret; 972 } 973 974 static void spi_imx_dma_rx_callback(void *cookie) 975 { 976 struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie; 977 978 complete(&spi_imx->dma_rx_completion); 979 } 980 981 static void spi_imx_dma_tx_callback(void *cookie) 982 { 983 struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie; 984 985 complete(&spi_imx->dma_tx_completion); 986 } 987 988 static int spi_imx_calculate_timeout(struct spi_imx_data *spi_imx, int size) 989 { 990 unsigned long timeout = 0; 991 992 /* Time with actual data transfer and CS change delay related to HW */ 993 timeout = (8 + 4) * size / spi_imx->spi_bus_clk; 994 995 /* Add extra second for scheduler related activities */ 996 timeout += 1; 997 998 /* Double calculated timeout */ 999 return msecs_to_jiffies(2 * timeout * MSEC_PER_SEC); 1000 } 1001 1002 static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx, 1003 struct spi_transfer *transfer) 1004 { 1005 struct dma_async_tx_descriptor *desc_tx, *desc_rx; 1006 unsigned long transfer_timeout; 1007 unsigned long timeout; 1008 struct spi_master *master = spi_imx->bitbang.master; 1009 struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg; 1010 1011 /* 1012 * The TX DMA setup starts the transfer, so make sure RX is configured 1013 * before TX. 1014 */ 1015 desc_rx = dmaengine_prep_slave_sg(master->dma_rx, 1016 rx->sgl, rx->nents, DMA_DEV_TO_MEM, 1017 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1018 if (!desc_rx) 1019 return -EINVAL; 1020 1021 desc_rx->callback = spi_imx_dma_rx_callback; 1022 desc_rx->callback_param = (void *)spi_imx; 1023 dmaengine_submit(desc_rx); 1024 reinit_completion(&spi_imx->dma_rx_completion); 1025 dma_async_issue_pending(master->dma_rx); 1026 1027 desc_tx = dmaengine_prep_slave_sg(master->dma_tx, 1028 tx->sgl, tx->nents, DMA_MEM_TO_DEV, 1029 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1030 if (!desc_tx) { 1031 dmaengine_terminate_all(master->dma_tx); 1032 return -EINVAL; 1033 } 1034 1035 desc_tx->callback = spi_imx_dma_tx_callback; 1036 desc_tx->callback_param = (void *)spi_imx; 1037 dmaengine_submit(desc_tx); 1038 reinit_completion(&spi_imx->dma_tx_completion); 1039 dma_async_issue_pending(master->dma_tx); 1040 1041 transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len); 1042 1043 /* Wait SDMA to finish the data transfer.*/ 1044 timeout = wait_for_completion_timeout(&spi_imx->dma_tx_completion, 1045 transfer_timeout); 1046 if (!timeout) { 1047 dev_err(spi_imx->dev, "I/O Error in DMA TX\n"); 1048 dmaengine_terminate_all(master->dma_tx); 1049 dmaengine_terminate_all(master->dma_rx); 1050 return -ETIMEDOUT; 1051 } 1052 1053 timeout = wait_for_completion_timeout(&spi_imx->dma_rx_completion, 1054 transfer_timeout); 1055 if (!timeout) { 1056 dev_err(&master->dev, "I/O Error in DMA RX\n"); 1057 spi_imx->devtype_data->reset(spi_imx); 1058 dmaengine_terminate_all(master->dma_rx); 1059 return -ETIMEDOUT; 1060 } 1061 1062 return transfer->len; 1063 } 1064 1065 static int spi_imx_pio_transfer(struct spi_device *spi, 1066 struct spi_transfer *transfer) 1067 { 1068 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); 1069 unsigned long transfer_timeout; 1070 unsigned long timeout; 1071 1072 spi_imx->tx_buf = transfer->tx_buf; 1073 spi_imx->rx_buf = transfer->rx_buf; 1074 spi_imx->count = transfer->len; 1075 spi_imx->txfifo = 0; 1076 1077 reinit_completion(&spi_imx->xfer_done); 1078 1079 spi_imx_push(spi_imx); 1080 1081 spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE); 1082 1083 transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len); 1084 1085 timeout = wait_for_completion_timeout(&spi_imx->xfer_done, 1086 transfer_timeout); 1087 if (!timeout) { 1088 dev_err(&spi->dev, "I/O Error in PIO\n"); 1089 spi_imx->devtype_data->reset(spi_imx); 1090 return -ETIMEDOUT; 1091 } 1092 1093 return transfer->len; 1094 } 1095 1096 static int spi_imx_transfer(struct spi_device *spi, 1097 struct spi_transfer *transfer) 1098 { 1099 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); 1100 1101 if (spi_imx->usedma) 1102 return spi_imx_dma_transfer(spi_imx, transfer); 1103 else 1104 return spi_imx_pio_transfer(spi, transfer); 1105 } 1106 1107 static int spi_imx_setup(struct spi_device *spi) 1108 { 1109 dev_dbg(&spi->dev, "%s: mode %d, %u bpw, %d hz\n", __func__, 1110 spi->mode, spi->bits_per_word, spi->max_speed_hz); 1111 1112 if (gpio_is_valid(spi->cs_gpio)) 1113 gpio_direction_output(spi->cs_gpio, 1114 spi->mode & SPI_CS_HIGH ? 0 : 1); 1115 1116 spi_imx_chipselect(spi, BITBANG_CS_INACTIVE); 1117 1118 return 0; 1119 } 1120 1121 static void spi_imx_cleanup(struct spi_device *spi) 1122 { 1123 } 1124 1125 static int 1126 spi_imx_prepare_message(struct spi_master *master, struct spi_message *msg) 1127 { 1128 struct spi_imx_data *spi_imx = spi_master_get_devdata(master); 1129 int ret; 1130 1131 ret = clk_enable(spi_imx->clk_per); 1132 if (ret) 1133 return ret; 1134 1135 ret = clk_enable(spi_imx->clk_ipg); 1136 if (ret) { 1137 clk_disable(spi_imx->clk_per); 1138 return ret; 1139 } 1140 1141 return 0; 1142 } 1143 1144 static int 1145 spi_imx_unprepare_message(struct spi_master *master, struct spi_message *msg) 1146 { 1147 struct spi_imx_data *spi_imx = spi_master_get_devdata(master); 1148 1149 clk_disable(spi_imx->clk_ipg); 1150 clk_disable(spi_imx->clk_per); 1151 return 0; 1152 } 1153 1154 static int spi_imx_probe(struct platform_device *pdev) 1155 { 1156 struct device_node *np = pdev->dev.of_node; 1157 const struct of_device_id *of_id = 1158 of_match_device(spi_imx_dt_ids, &pdev->dev); 1159 struct spi_imx_master *mxc_platform_info = 1160 dev_get_platdata(&pdev->dev); 1161 struct spi_master *master; 1162 struct spi_imx_data *spi_imx; 1163 struct resource *res; 1164 int i, ret, irq, spi_drctl; 1165 1166 if (!np && !mxc_platform_info) { 1167 dev_err(&pdev->dev, "can't get the platform data\n"); 1168 return -EINVAL; 1169 } 1170 1171 master = spi_alloc_master(&pdev->dev, sizeof(struct spi_imx_data)); 1172 if (!master) 1173 return -ENOMEM; 1174 1175 ret = of_property_read_u32(np, "fsl,spi-rdy-drctl", &spi_drctl); 1176 if ((ret < 0) || (spi_drctl >= 0x3)) { 1177 /* '11' is reserved */ 1178 spi_drctl = 0; 1179 } 1180 1181 platform_set_drvdata(pdev, master); 1182 1183 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32); 1184 master->bus_num = np ? -1 : pdev->id; 1185 1186 spi_imx = spi_master_get_devdata(master); 1187 spi_imx->bitbang.master = master; 1188 spi_imx->dev = &pdev->dev; 1189 1190 spi_imx->devtype_data = of_id ? of_id->data : 1191 (struct spi_imx_devtype_data *)pdev->id_entry->driver_data; 1192 1193 if (mxc_platform_info) { 1194 master->num_chipselect = mxc_platform_info->num_chipselect; 1195 master->cs_gpios = devm_kzalloc(&master->dev, 1196 sizeof(int) * master->num_chipselect, GFP_KERNEL); 1197 if (!master->cs_gpios) 1198 return -ENOMEM; 1199 1200 for (i = 0; i < master->num_chipselect; i++) 1201 master->cs_gpios[i] = mxc_platform_info->chipselect[i]; 1202 } 1203 1204 spi_imx->bitbang.chipselect = spi_imx_chipselect; 1205 spi_imx->bitbang.setup_transfer = spi_imx_setupxfer; 1206 spi_imx->bitbang.txrx_bufs = spi_imx_transfer; 1207 spi_imx->bitbang.master->setup = spi_imx_setup; 1208 spi_imx->bitbang.master->cleanup = spi_imx_cleanup; 1209 spi_imx->bitbang.master->prepare_message = spi_imx_prepare_message; 1210 spi_imx->bitbang.master->unprepare_message = spi_imx_unprepare_message; 1211 spi_imx->bitbang.master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; 1212 if (is_imx35_cspi(spi_imx) || is_imx51_ecspi(spi_imx)) 1213 spi_imx->bitbang.master->mode_bits |= SPI_LOOP | SPI_READY; 1214 1215 spi_imx->spi_drctl = spi_drctl; 1216 1217 init_completion(&spi_imx->xfer_done); 1218 1219 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1220 spi_imx->base = devm_ioremap_resource(&pdev->dev, res); 1221 if (IS_ERR(spi_imx->base)) { 1222 ret = PTR_ERR(spi_imx->base); 1223 goto out_master_put; 1224 } 1225 spi_imx->base_phys = res->start; 1226 1227 irq = platform_get_irq(pdev, 0); 1228 if (irq < 0) { 1229 ret = irq; 1230 goto out_master_put; 1231 } 1232 1233 ret = devm_request_irq(&pdev->dev, irq, spi_imx_isr, 0, 1234 dev_name(&pdev->dev), spi_imx); 1235 if (ret) { 1236 dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret); 1237 goto out_master_put; 1238 } 1239 1240 spi_imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); 1241 if (IS_ERR(spi_imx->clk_ipg)) { 1242 ret = PTR_ERR(spi_imx->clk_ipg); 1243 goto out_master_put; 1244 } 1245 1246 spi_imx->clk_per = devm_clk_get(&pdev->dev, "per"); 1247 if (IS_ERR(spi_imx->clk_per)) { 1248 ret = PTR_ERR(spi_imx->clk_per); 1249 goto out_master_put; 1250 } 1251 1252 ret = clk_prepare_enable(spi_imx->clk_per); 1253 if (ret) 1254 goto out_master_put; 1255 1256 ret = clk_prepare_enable(spi_imx->clk_ipg); 1257 if (ret) 1258 goto out_put_per; 1259 1260 spi_imx->spi_clk = clk_get_rate(spi_imx->clk_per); 1261 /* 1262 * Only validated on i.mx35 and i.mx6 now, can remove the constraint 1263 * if validated on other chips. 1264 */ 1265 if (is_imx35_cspi(spi_imx) || is_imx51_ecspi(spi_imx)) { 1266 ret = spi_imx_sdma_init(&pdev->dev, spi_imx, master); 1267 if (ret == -EPROBE_DEFER) 1268 goto out_clk_put; 1269 1270 if (ret < 0) 1271 dev_err(&pdev->dev, "dma setup error %d, use pio\n", 1272 ret); 1273 } 1274 1275 spi_imx->devtype_data->reset(spi_imx); 1276 1277 spi_imx->devtype_data->intctrl(spi_imx, 0); 1278 1279 master->dev.of_node = pdev->dev.of_node; 1280 ret = spi_bitbang_start(&spi_imx->bitbang); 1281 if (ret) { 1282 dev_err(&pdev->dev, "bitbang start failed with %d\n", ret); 1283 goto out_clk_put; 1284 } 1285 1286 if (!master->cs_gpios) { 1287 dev_err(&pdev->dev, "No CS GPIOs available\n"); 1288 ret = -EINVAL; 1289 goto out_clk_put; 1290 } 1291 1292 for (i = 0; i < master->num_chipselect; i++) { 1293 if (!gpio_is_valid(master->cs_gpios[i])) 1294 continue; 1295 1296 ret = devm_gpio_request(&pdev->dev, master->cs_gpios[i], 1297 DRIVER_NAME); 1298 if (ret) { 1299 dev_err(&pdev->dev, "Can't get CS GPIO %i\n", 1300 master->cs_gpios[i]); 1301 goto out_clk_put; 1302 } 1303 } 1304 1305 dev_info(&pdev->dev, "probed\n"); 1306 1307 clk_disable(spi_imx->clk_ipg); 1308 clk_disable(spi_imx->clk_per); 1309 return ret; 1310 1311 out_clk_put: 1312 clk_disable_unprepare(spi_imx->clk_ipg); 1313 out_put_per: 1314 clk_disable_unprepare(spi_imx->clk_per); 1315 out_master_put: 1316 spi_master_put(master); 1317 1318 return ret; 1319 } 1320 1321 static int spi_imx_remove(struct platform_device *pdev) 1322 { 1323 struct spi_master *master = platform_get_drvdata(pdev); 1324 struct spi_imx_data *spi_imx = spi_master_get_devdata(master); 1325 1326 spi_bitbang_stop(&spi_imx->bitbang); 1327 1328 writel(0, spi_imx->base + MXC_CSPICTRL); 1329 clk_unprepare(spi_imx->clk_ipg); 1330 clk_unprepare(spi_imx->clk_per); 1331 spi_imx_sdma_exit(spi_imx); 1332 spi_master_put(master); 1333 1334 return 0; 1335 } 1336 1337 static struct platform_driver spi_imx_driver = { 1338 .driver = { 1339 .name = DRIVER_NAME, 1340 .of_match_table = spi_imx_dt_ids, 1341 }, 1342 .id_table = spi_imx_devtype, 1343 .probe = spi_imx_probe, 1344 .remove = spi_imx_remove, 1345 }; 1346 module_platform_driver(spi_imx_driver); 1347 1348 MODULE_DESCRIPTION("SPI Master Controller driver"); 1349 MODULE_AUTHOR("Sascha Hauer, Pengutronix"); 1350 MODULE_LICENSE("GPL"); 1351 MODULE_ALIAS("platform:" DRIVER_NAME); 1352