1 /* 2 * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved. 3 * Copyright (C) 2008 Juergen Beisert 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License 7 * as published by the Free Software Foundation; either version 2 8 * of the License, or (at your option) any later version. 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the 16 * Free Software Foundation 17 * 51 Franklin Street, Fifth Floor 18 * Boston, MA 02110-1301, USA. 19 */ 20 21 #include <linux/clk.h> 22 #include <linux/completion.h> 23 #include <linux/delay.h> 24 #include <linux/dmaengine.h> 25 #include <linux/dma-mapping.h> 26 #include <linux/err.h> 27 #include <linux/gpio.h> 28 #include <linux/interrupt.h> 29 #include <linux/io.h> 30 #include <linux/irq.h> 31 #include <linux/kernel.h> 32 #include <linux/module.h> 33 #include <linux/platform_device.h> 34 #include <linux/slab.h> 35 #include <linux/spi/spi.h> 36 #include <linux/spi/spi_bitbang.h> 37 #include <linux/types.h> 38 #include <linux/of.h> 39 #include <linux/of_device.h> 40 #include <linux/of_gpio.h> 41 42 #include <linux/platform_data/dma-imx.h> 43 #include <linux/platform_data/spi-imx.h> 44 45 #define DRIVER_NAME "spi_imx" 46 47 #define MXC_CSPIRXDATA 0x00 48 #define MXC_CSPITXDATA 0x04 49 #define MXC_CSPICTRL 0x08 50 #define MXC_CSPIINT 0x0c 51 #define MXC_RESET 0x1c 52 53 /* generic defines to abstract from the different register layouts */ 54 #define MXC_INT_RR (1 << 0) /* Receive data ready interrupt */ 55 #define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */ 56 57 /* The maximum bytes that a sdma BD can transfer.*/ 58 #define MAX_SDMA_BD_BYTES (1 << 15) 59 #define IMX_DMA_TIMEOUT (msecs_to_jiffies(3000)) 60 struct spi_imx_config { 61 unsigned int speed_hz; 62 unsigned int bpw; 63 unsigned int mode; 64 u8 cs; 65 }; 66 67 enum spi_imx_devtype { 68 IMX1_CSPI, 69 IMX21_CSPI, 70 IMX27_CSPI, 71 IMX31_CSPI, 72 IMX35_CSPI, /* CSPI on all i.mx except above */ 73 IMX51_ECSPI, /* ECSPI on i.mx51 and later */ 74 }; 75 76 struct spi_imx_data; 77 78 struct spi_imx_devtype_data { 79 void (*intctrl)(struct spi_imx_data *, int); 80 int (*config)(struct spi_imx_data *, struct spi_imx_config *); 81 void (*trigger)(struct spi_imx_data *); 82 int (*rx_available)(struct spi_imx_data *); 83 void (*reset)(struct spi_imx_data *); 84 enum spi_imx_devtype devtype; 85 }; 86 87 struct spi_imx_data { 88 struct spi_bitbang bitbang; 89 90 struct completion xfer_done; 91 void __iomem *base; 92 int irq; 93 struct clk *clk_per; 94 struct clk *clk_ipg; 95 unsigned long spi_clk; 96 97 unsigned int count; 98 void (*tx)(struct spi_imx_data *); 99 void (*rx)(struct spi_imx_data *); 100 void *rx_buf; 101 const void *tx_buf; 102 unsigned int txfifo; /* number of words pushed in tx FIFO */ 103 104 /* DMA */ 105 unsigned int dma_is_inited; 106 unsigned int dma_finished; 107 bool usedma; 108 u32 rx_wml; 109 u32 tx_wml; 110 u32 rxt_wml; 111 struct completion dma_rx_completion; 112 struct completion dma_tx_completion; 113 114 const struct spi_imx_devtype_data *devtype_data; 115 int chipselect[0]; 116 }; 117 118 static inline int is_imx27_cspi(struct spi_imx_data *d) 119 { 120 return d->devtype_data->devtype == IMX27_CSPI; 121 } 122 123 static inline int is_imx35_cspi(struct spi_imx_data *d) 124 { 125 return d->devtype_data->devtype == IMX35_CSPI; 126 } 127 128 static inline unsigned spi_imx_get_fifosize(struct spi_imx_data *d) 129 { 130 return (d->devtype_data->devtype == IMX51_ECSPI) ? 64 : 8; 131 } 132 133 #define MXC_SPI_BUF_RX(type) \ 134 static void spi_imx_buf_rx_##type(struct spi_imx_data *spi_imx) \ 135 { \ 136 unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA); \ 137 \ 138 if (spi_imx->rx_buf) { \ 139 *(type *)spi_imx->rx_buf = val; \ 140 spi_imx->rx_buf += sizeof(type); \ 141 } \ 142 } 143 144 #define MXC_SPI_BUF_TX(type) \ 145 static void spi_imx_buf_tx_##type(struct spi_imx_data *spi_imx) \ 146 { \ 147 type val = 0; \ 148 \ 149 if (spi_imx->tx_buf) { \ 150 val = *(type *)spi_imx->tx_buf; \ 151 spi_imx->tx_buf += sizeof(type); \ 152 } \ 153 \ 154 spi_imx->count -= sizeof(type); \ 155 \ 156 writel(val, spi_imx->base + MXC_CSPITXDATA); \ 157 } 158 159 MXC_SPI_BUF_RX(u8) 160 MXC_SPI_BUF_TX(u8) 161 MXC_SPI_BUF_RX(u16) 162 MXC_SPI_BUF_TX(u16) 163 MXC_SPI_BUF_RX(u32) 164 MXC_SPI_BUF_TX(u32) 165 166 /* First entry is reserved, second entry is valid only if SDHC_SPIEN is set 167 * (which is currently not the case in this driver) 168 */ 169 static int mxc_clkdivs[] = {0, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192, 170 256, 384, 512, 768, 1024}; 171 172 /* MX21, MX27 */ 173 static unsigned int spi_imx_clkdiv_1(unsigned int fin, 174 unsigned int fspi, unsigned int max) 175 { 176 int i; 177 178 for (i = 2; i < max; i++) 179 if (fspi * mxc_clkdivs[i] >= fin) 180 return i; 181 182 return max; 183 } 184 185 /* MX1, MX31, MX35, MX51 CSPI */ 186 static unsigned int spi_imx_clkdiv_2(unsigned int fin, 187 unsigned int fspi) 188 { 189 int i, div = 4; 190 191 for (i = 0; i < 7; i++) { 192 if (fspi * div >= fin) 193 return i; 194 div <<= 1; 195 } 196 197 return 7; 198 } 199 200 static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi, 201 struct spi_transfer *transfer) 202 { 203 struct spi_imx_data *spi_imx = spi_master_get_devdata(master); 204 205 if (spi_imx->dma_is_inited && (transfer->len > spi_imx->rx_wml) 206 && (transfer->len > spi_imx->tx_wml)) 207 return true; 208 return false; 209 } 210 211 #define MX51_ECSPI_CTRL 0x08 212 #define MX51_ECSPI_CTRL_ENABLE (1 << 0) 213 #define MX51_ECSPI_CTRL_XCH (1 << 2) 214 #define MX51_ECSPI_CTRL_SMC (1 << 3) 215 #define MX51_ECSPI_CTRL_MODE_MASK (0xf << 4) 216 #define MX51_ECSPI_CTRL_POSTDIV_OFFSET 8 217 #define MX51_ECSPI_CTRL_PREDIV_OFFSET 12 218 #define MX51_ECSPI_CTRL_CS(cs) ((cs) << 18) 219 #define MX51_ECSPI_CTRL_BL_OFFSET 20 220 221 #define MX51_ECSPI_CONFIG 0x0c 222 #define MX51_ECSPI_CONFIG_SCLKPHA(cs) (1 << ((cs) + 0)) 223 #define MX51_ECSPI_CONFIG_SCLKPOL(cs) (1 << ((cs) + 4)) 224 #define MX51_ECSPI_CONFIG_SBBCTRL(cs) (1 << ((cs) + 8)) 225 #define MX51_ECSPI_CONFIG_SSBPOL(cs) (1 << ((cs) + 12)) 226 #define MX51_ECSPI_CONFIG_SCLKCTL(cs) (1 << ((cs) + 20)) 227 228 #define MX51_ECSPI_INT 0x10 229 #define MX51_ECSPI_INT_TEEN (1 << 0) 230 #define MX51_ECSPI_INT_RREN (1 << 3) 231 232 #define MX51_ECSPI_DMA 0x14 233 #define MX51_ECSPI_DMA_TX_WML_OFFSET 0 234 #define MX51_ECSPI_DMA_TX_WML_MASK 0x3F 235 #define MX51_ECSPI_DMA_RX_WML_OFFSET 16 236 #define MX51_ECSPI_DMA_RX_WML_MASK (0x3F << 16) 237 #define MX51_ECSPI_DMA_RXT_WML_OFFSET 24 238 #define MX51_ECSPI_DMA_RXT_WML_MASK (0x3F << 24) 239 240 #define MX51_ECSPI_DMA_TEDEN_OFFSET 7 241 #define MX51_ECSPI_DMA_RXDEN_OFFSET 23 242 #define MX51_ECSPI_DMA_RXTDEN_OFFSET 31 243 244 #define MX51_ECSPI_STAT 0x18 245 #define MX51_ECSPI_STAT_RR (1 << 3) 246 247 /* MX51 eCSPI */ 248 static unsigned int mx51_ecspi_clkdiv(unsigned int fin, unsigned int fspi, 249 unsigned int *fres) 250 { 251 /* 252 * there are two 4-bit dividers, the pre-divider divides by 253 * $pre, the post-divider by 2^$post 254 */ 255 unsigned int pre, post; 256 257 if (unlikely(fspi > fin)) 258 return 0; 259 260 post = fls(fin) - fls(fspi); 261 if (fin > fspi << post) 262 post++; 263 264 /* now we have: (fin <= fspi << post) with post being minimal */ 265 266 post = max(4U, post) - 4; 267 if (unlikely(post > 0xf)) { 268 pr_err("%s: cannot set clock freq: %u (base freq: %u)\n", 269 __func__, fspi, fin); 270 return 0xff; 271 } 272 273 pre = DIV_ROUND_UP(fin, fspi << post) - 1; 274 275 pr_debug("%s: fin: %u, fspi: %u, post: %u, pre: %u\n", 276 __func__, fin, fspi, post, pre); 277 278 /* Resulting frequency for the SCLK line. */ 279 *fres = (fin / (pre + 1)) >> post; 280 281 return (pre << MX51_ECSPI_CTRL_PREDIV_OFFSET) | 282 (post << MX51_ECSPI_CTRL_POSTDIV_OFFSET); 283 } 284 285 static void __maybe_unused mx51_ecspi_intctrl(struct spi_imx_data *spi_imx, int enable) 286 { 287 unsigned val = 0; 288 289 if (enable & MXC_INT_TE) 290 val |= MX51_ECSPI_INT_TEEN; 291 292 if (enable & MXC_INT_RR) 293 val |= MX51_ECSPI_INT_RREN; 294 295 writel(val, spi_imx->base + MX51_ECSPI_INT); 296 } 297 298 static void __maybe_unused mx51_ecspi_trigger(struct spi_imx_data *spi_imx) 299 { 300 u32 reg = readl(spi_imx->base + MX51_ECSPI_CTRL); 301 302 if (!spi_imx->usedma) 303 reg |= MX51_ECSPI_CTRL_XCH; 304 else if (!spi_imx->dma_finished) 305 reg |= MX51_ECSPI_CTRL_SMC; 306 else 307 reg &= ~MX51_ECSPI_CTRL_SMC; 308 writel(reg, spi_imx->base + MX51_ECSPI_CTRL); 309 } 310 311 static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx, 312 struct spi_imx_config *config) 313 { 314 u32 ctrl = MX51_ECSPI_CTRL_ENABLE, cfg = 0, dma = 0; 315 u32 tx_wml_cfg, rx_wml_cfg, rxt_wml_cfg; 316 u32 clk = config->speed_hz, delay; 317 318 /* 319 * The hardware seems to have a race condition when changing modes. The 320 * current assumption is that the selection of the channel arrives 321 * earlier in the hardware than the mode bits when they are written at 322 * the same time. 323 * So set master mode for all channels as we do not support slave mode. 324 */ 325 ctrl |= MX51_ECSPI_CTRL_MODE_MASK; 326 327 /* set clock speed */ 328 ctrl |= mx51_ecspi_clkdiv(spi_imx->spi_clk, config->speed_hz, &clk); 329 330 /* set chip select to use */ 331 ctrl |= MX51_ECSPI_CTRL_CS(config->cs); 332 333 ctrl |= (config->bpw - 1) << MX51_ECSPI_CTRL_BL_OFFSET; 334 335 cfg |= MX51_ECSPI_CONFIG_SBBCTRL(config->cs); 336 337 if (config->mode & SPI_CPHA) 338 cfg |= MX51_ECSPI_CONFIG_SCLKPHA(config->cs); 339 340 if (config->mode & SPI_CPOL) { 341 cfg |= MX51_ECSPI_CONFIG_SCLKPOL(config->cs); 342 cfg |= MX51_ECSPI_CONFIG_SCLKCTL(config->cs); 343 } 344 if (config->mode & SPI_CS_HIGH) 345 cfg |= MX51_ECSPI_CONFIG_SSBPOL(config->cs); 346 347 writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL); 348 writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG); 349 350 /* 351 * Wait until the changes in the configuration register CONFIGREG 352 * propagate into the hardware. It takes exactly one tick of the 353 * SCLK clock, but we will wait two SCLK clock just to be sure. The 354 * effect of the delay it takes for the hardware to apply changes 355 * is noticable if the SCLK clock run very slow. In such a case, if 356 * the polarity of SCLK should be inverted, the GPIO ChipSelect might 357 * be asserted before the SCLK polarity changes, which would disrupt 358 * the SPI communication as the device on the other end would consider 359 * the change of SCLK polarity as a clock tick already. 360 */ 361 delay = (2 * 1000000) / clk; 362 if (likely(delay < 10)) /* SCLK is faster than 100 kHz */ 363 udelay(delay); 364 else /* SCLK is _very_ slow */ 365 usleep_range(delay, delay + 10); 366 367 /* 368 * Configure the DMA register: setup the watermark 369 * and enable DMA request. 370 */ 371 if (spi_imx->dma_is_inited) { 372 dma = readl(spi_imx->base + MX51_ECSPI_DMA); 373 374 spi_imx->tx_wml = spi_imx_get_fifosize(spi_imx) / 2; 375 spi_imx->rx_wml = spi_imx_get_fifosize(spi_imx) / 2; 376 spi_imx->rxt_wml = spi_imx_get_fifosize(spi_imx) / 2; 377 rx_wml_cfg = spi_imx->rx_wml << MX51_ECSPI_DMA_RX_WML_OFFSET; 378 tx_wml_cfg = spi_imx->tx_wml << MX51_ECSPI_DMA_TX_WML_OFFSET; 379 rxt_wml_cfg = spi_imx->rxt_wml << MX51_ECSPI_DMA_RXT_WML_OFFSET; 380 dma = (dma & ~MX51_ECSPI_DMA_TX_WML_MASK 381 & ~MX51_ECSPI_DMA_RX_WML_MASK 382 & ~MX51_ECSPI_DMA_RXT_WML_MASK) 383 | rx_wml_cfg | tx_wml_cfg | rxt_wml_cfg 384 |(1 << MX51_ECSPI_DMA_TEDEN_OFFSET) 385 |(1 << MX51_ECSPI_DMA_RXDEN_OFFSET) 386 |(1 << MX51_ECSPI_DMA_RXTDEN_OFFSET); 387 388 writel(dma, spi_imx->base + MX51_ECSPI_DMA); 389 } 390 391 return 0; 392 } 393 394 static int __maybe_unused mx51_ecspi_rx_available(struct spi_imx_data *spi_imx) 395 { 396 return readl(spi_imx->base + MX51_ECSPI_STAT) & MX51_ECSPI_STAT_RR; 397 } 398 399 static void __maybe_unused mx51_ecspi_reset(struct spi_imx_data *spi_imx) 400 { 401 /* drain receive buffer */ 402 while (mx51_ecspi_rx_available(spi_imx)) 403 readl(spi_imx->base + MXC_CSPIRXDATA); 404 } 405 406 #define MX31_INTREG_TEEN (1 << 0) 407 #define MX31_INTREG_RREN (1 << 3) 408 409 #define MX31_CSPICTRL_ENABLE (1 << 0) 410 #define MX31_CSPICTRL_MASTER (1 << 1) 411 #define MX31_CSPICTRL_XCH (1 << 2) 412 #define MX31_CSPICTRL_POL (1 << 4) 413 #define MX31_CSPICTRL_PHA (1 << 5) 414 #define MX31_CSPICTRL_SSCTL (1 << 6) 415 #define MX31_CSPICTRL_SSPOL (1 << 7) 416 #define MX31_CSPICTRL_BC_SHIFT 8 417 #define MX35_CSPICTRL_BL_SHIFT 20 418 #define MX31_CSPICTRL_CS_SHIFT 24 419 #define MX35_CSPICTRL_CS_SHIFT 12 420 #define MX31_CSPICTRL_DR_SHIFT 16 421 422 #define MX31_CSPISTATUS 0x14 423 #define MX31_STATUS_RR (1 << 3) 424 425 /* These functions also work for the i.MX35, but be aware that 426 * the i.MX35 has a slightly different register layout for bits 427 * we do not use here. 428 */ 429 static void __maybe_unused mx31_intctrl(struct spi_imx_data *spi_imx, int enable) 430 { 431 unsigned int val = 0; 432 433 if (enable & MXC_INT_TE) 434 val |= MX31_INTREG_TEEN; 435 if (enable & MXC_INT_RR) 436 val |= MX31_INTREG_RREN; 437 438 writel(val, spi_imx->base + MXC_CSPIINT); 439 } 440 441 static void __maybe_unused mx31_trigger(struct spi_imx_data *spi_imx) 442 { 443 unsigned int reg; 444 445 reg = readl(spi_imx->base + MXC_CSPICTRL); 446 reg |= MX31_CSPICTRL_XCH; 447 writel(reg, spi_imx->base + MXC_CSPICTRL); 448 } 449 450 static int __maybe_unused mx31_config(struct spi_imx_data *spi_imx, 451 struct spi_imx_config *config) 452 { 453 unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER; 454 int cs = spi_imx->chipselect[config->cs]; 455 456 reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) << 457 MX31_CSPICTRL_DR_SHIFT; 458 459 if (is_imx35_cspi(spi_imx)) { 460 reg |= (config->bpw - 1) << MX35_CSPICTRL_BL_SHIFT; 461 reg |= MX31_CSPICTRL_SSCTL; 462 } else { 463 reg |= (config->bpw - 1) << MX31_CSPICTRL_BC_SHIFT; 464 } 465 466 if (config->mode & SPI_CPHA) 467 reg |= MX31_CSPICTRL_PHA; 468 if (config->mode & SPI_CPOL) 469 reg |= MX31_CSPICTRL_POL; 470 if (config->mode & SPI_CS_HIGH) 471 reg |= MX31_CSPICTRL_SSPOL; 472 if (cs < 0) 473 reg |= (cs + 32) << 474 (is_imx35_cspi(spi_imx) ? MX35_CSPICTRL_CS_SHIFT : 475 MX31_CSPICTRL_CS_SHIFT); 476 477 writel(reg, spi_imx->base + MXC_CSPICTRL); 478 479 return 0; 480 } 481 482 static int __maybe_unused mx31_rx_available(struct spi_imx_data *spi_imx) 483 { 484 return readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR; 485 } 486 487 static void __maybe_unused mx31_reset(struct spi_imx_data *spi_imx) 488 { 489 /* drain receive buffer */ 490 while (readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR) 491 readl(spi_imx->base + MXC_CSPIRXDATA); 492 } 493 494 #define MX21_INTREG_RR (1 << 4) 495 #define MX21_INTREG_TEEN (1 << 9) 496 #define MX21_INTREG_RREN (1 << 13) 497 498 #define MX21_CSPICTRL_POL (1 << 5) 499 #define MX21_CSPICTRL_PHA (1 << 6) 500 #define MX21_CSPICTRL_SSPOL (1 << 8) 501 #define MX21_CSPICTRL_XCH (1 << 9) 502 #define MX21_CSPICTRL_ENABLE (1 << 10) 503 #define MX21_CSPICTRL_MASTER (1 << 11) 504 #define MX21_CSPICTRL_DR_SHIFT 14 505 #define MX21_CSPICTRL_CS_SHIFT 19 506 507 static void __maybe_unused mx21_intctrl(struct spi_imx_data *spi_imx, int enable) 508 { 509 unsigned int val = 0; 510 511 if (enable & MXC_INT_TE) 512 val |= MX21_INTREG_TEEN; 513 if (enable & MXC_INT_RR) 514 val |= MX21_INTREG_RREN; 515 516 writel(val, spi_imx->base + MXC_CSPIINT); 517 } 518 519 static void __maybe_unused mx21_trigger(struct spi_imx_data *spi_imx) 520 { 521 unsigned int reg; 522 523 reg = readl(spi_imx->base + MXC_CSPICTRL); 524 reg |= MX21_CSPICTRL_XCH; 525 writel(reg, spi_imx->base + MXC_CSPICTRL); 526 } 527 528 static int __maybe_unused mx21_config(struct spi_imx_data *spi_imx, 529 struct spi_imx_config *config) 530 { 531 unsigned int reg = MX21_CSPICTRL_ENABLE | MX21_CSPICTRL_MASTER; 532 int cs = spi_imx->chipselect[config->cs]; 533 unsigned int max = is_imx27_cspi(spi_imx) ? 16 : 18; 534 535 reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, config->speed_hz, max) << 536 MX21_CSPICTRL_DR_SHIFT; 537 reg |= config->bpw - 1; 538 539 if (config->mode & SPI_CPHA) 540 reg |= MX21_CSPICTRL_PHA; 541 if (config->mode & SPI_CPOL) 542 reg |= MX21_CSPICTRL_POL; 543 if (config->mode & SPI_CS_HIGH) 544 reg |= MX21_CSPICTRL_SSPOL; 545 if (cs < 0) 546 reg |= (cs + 32) << MX21_CSPICTRL_CS_SHIFT; 547 548 writel(reg, spi_imx->base + MXC_CSPICTRL); 549 550 return 0; 551 } 552 553 static int __maybe_unused mx21_rx_available(struct spi_imx_data *spi_imx) 554 { 555 return readl(spi_imx->base + MXC_CSPIINT) & MX21_INTREG_RR; 556 } 557 558 static void __maybe_unused mx21_reset(struct spi_imx_data *spi_imx) 559 { 560 writel(1, spi_imx->base + MXC_RESET); 561 } 562 563 #define MX1_INTREG_RR (1 << 3) 564 #define MX1_INTREG_TEEN (1 << 8) 565 #define MX1_INTREG_RREN (1 << 11) 566 567 #define MX1_CSPICTRL_POL (1 << 4) 568 #define MX1_CSPICTRL_PHA (1 << 5) 569 #define MX1_CSPICTRL_XCH (1 << 8) 570 #define MX1_CSPICTRL_ENABLE (1 << 9) 571 #define MX1_CSPICTRL_MASTER (1 << 10) 572 #define MX1_CSPICTRL_DR_SHIFT 13 573 574 static void __maybe_unused mx1_intctrl(struct spi_imx_data *spi_imx, int enable) 575 { 576 unsigned int val = 0; 577 578 if (enable & MXC_INT_TE) 579 val |= MX1_INTREG_TEEN; 580 if (enable & MXC_INT_RR) 581 val |= MX1_INTREG_RREN; 582 583 writel(val, spi_imx->base + MXC_CSPIINT); 584 } 585 586 static void __maybe_unused mx1_trigger(struct spi_imx_data *spi_imx) 587 { 588 unsigned int reg; 589 590 reg = readl(spi_imx->base + MXC_CSPICTRL); 591 reg |= MX1_CSPICTRL_XCH; 592 writel(reg, spi_imx->base + MXC_CSPICTRL); 593 } 594 595 static int __maybe_unused mx1_config(struct spi_imx_data *spi_imx, 596 struct spi_imx_config *config) 597 { 598 unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_MASTER; 599 600 reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) << 601 MX1_CSPICTRL_DR_SHIFT; 602 reg |= config->bpw - 1; 603 604 if (config->mode & SPI_CPHA) 605 reg |= MX1_CSPICTRL_PHA; 606 if (config->mode & SPI_CPOL) 607 reg |= MX1_CSPICTRL_POL; 608 609 writel(reg, spi_imx->base + MXC_CSPICTRL); 610 611 return 0; 612 } 613 614 static int __maybe_unused mx1_rx_available(struct spi_imx_data *spi_imx) 615 { 616 return readl(spi_imx->base + MXC_CSPIINT) & MX1_INTREG_RR; 617 } 618 619 static void __maybe_unused mx1_reset(struct spi_imx_data *spi_imx) 620 { 621 writel(1, spi_imx->base + MXC_RESET); 622 } 623 624 static struct spi_imx_devtype_data imx1_cspi_devtype_data = { 625 .intctrl = mx1_intctrl, 626 .config = mx1_config, 627 .trigger = mx1_trigger, 628 .rx_available = mx1_rx_available, 629 .reset = mx1_reset, 630 .devtype = IMX1_CSPI, 631 }; 632 633 static struct spi_imx_devtype_data imx21_cspi_devtype_data = { 634 .intctrl = mx21_intctrl, 635 .config = mx21_config, 636 .trigger = mx21_trigger, 637 .rx_available = mx21_rx_available, 638 .reset = mx21_reset, 639 .devtype = IMX21_CSPI, 640 }; 641 642 static struct spi_imx_devtype_data imx27_cspi_devtype_data = { 643 /* i.mx27 cspi shares the functions with i.mx21 one */ 644 .intctrl = mx21_intctrl, 645 .config = mx21_config, 646 .trigger = mx21_trigger, 647 .rx_available = mx21_rx_available, 648 .reset = mx21_reset, 649 .devtype = IMX27_CSPI, 650 }; 651 652 static struct spi_imx_devtype_data imx31_cspi_devtype_data = { 653 .intctrl = mx31_intctrl, 654 .config = mx31_config, 655 .trigger = mx31_trigger, 656 .rx_available = mx31_rx_available, 657 .reset = mx31_reset, 658 .devtype = IMX31_CSPI, 659 }; 660 661 static struct spi_imx_devtype_data imx35_cspi_devtype_data = { 662 /* i.mx35 and later cspi shares the functions with i.mx31 one */ 663 .intctrl = mx31_intctrl, 664 .config = mx31_config, 665 .trigger = mx31_trigger, 666 .rx_available = mx31_rx_available, 667 .reset = mx31_reset, 668 .devtype = IMX35_CSPI, 669 }; 670 671 static struct spi_imx_devtype_data imx51_ecspi_devtype_data = { 672 .intctrl = mx51_ecspi_intctrl, 673 .config = mx51_ecspi_config, 674 .trigger = mx51_ecspi_trigger, 675 .rx_available = mx51_ecspi_rx_available, 676 .reset = mx51_ecspi_reset, 677 .devtype = IMX51_ECSPI, 678 }; 679 680 static struct platform_device_id spi_imx_devtype[] = { 681 { 682 .name = "imx1-cspi", 683 .driver_data = (kernel_ulong_t) &imx1_cspi_devtype_data, 684 }, { 685 .name = "imx21-cspi", 686 .driver_data = (kernel_ulong_t) &imx21_cspi_devtype_data, 687 }, { 688 .name = "imx27-cspi", 689 .driver_data = (kernel_ulong_t) &imx27_cspi_devtype_data, 690 }, { 691 .name = "imx31-cspi", 692 .driver_data = (kernel_ulong_t) &imx31_cspi_devtype_data, 693 }, { 694 .name = "imx35-cspi", 695 .driver_data = (kernel_ulong_t) &imx35_cspi_devtype_data, 696 }, { 697 .name = "imx51-ecspi", 698 .driver_data = (kernel_ulong_t) &imx51_ecspi_devtype_data, 699 }, { 700 /* sentinel */ 701 } 702 }; 703 704 static const struct of_device_id spi_imx_dt_ids[] = { 705 { .compatible = "fsl,imx1-cspi", .data = &imx1_cspi_devtype_data, }, 706 { .compatible = "fsl,imx21-cspi", .data = &imx21_cspi_devtype_data, }, 707 { .compatible = "fsl,imx27-cspi", .data = &imx27_cspi_devtype_data, }, 708 { .compatible = "fsl,imx31-cspi", .data = &imx31_cspi_devtype_data, }, 709 { .compatible = "fsl,imx35-cspi", .data = &imx35_cspi_devtype_data, }, 710 { .compatible = "fsl,imx51-ecspi", .data = &imx51_ecspi_devtype_data, }, 711 { /* sentinel */ } 712 }; 713 MODULE_DEVICE_TABLE(of, spi_imx_dt_ids); 714 715 static void spi_imx_chipselect(struct spi_device *spi, int is_active) 716 { 717 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); 718 int gpio = spi_imx->chipselect[spi->chip_select]; 719 int active = is_active != BITBANG_CS_INACTIVE; 720 int dev_is_lowactive = !(spi->mode & SPI_CS_HIGH); 721 722 if (!gpio_is_valid(gpio)) 723 return; 724 725 gpio_set_value(gpio, dev_is_lowactive ^ active); 726 } 727 728 static void spi_imx_push(struct spi_imx_data *spi_imx) 729 { 730 while (spi_imx->txfifo < spi_imx_get_fifosize(spi_imx)) { 731 if (!spi_imx->count) 732 break; 733 spi_imx->tx(spi_imx); 734 spi_imx->txfifo++; 735 } 736 737 spi_imx->devtype_data->trigger(spi_imx); 738 } 739 740 static irqreturn_t spi_imx_isr(int irq, void *dev_id) 741 { 742 struct spi_imx_data *spi_imx = dev_id; 743 744 while (spi_imx->devtype_data->rx_available(spi_imx)) { 745 spi_imx->rx(spi_imx); 746 spi_imx->txfifo--; 747 } 748 749 if (spi_imx->count) { 750 spi_imx_push(spi_imx); 751 return IRQ_HANDLED; 752 } 753 754 if (spi_imx->txfifo) { 755 /* No data left to push, but still waiting for rx data, 756 * enable receive data available interrupt. 757 */ 758 spi_imx->devtype_data->intctrl( 759 spi_imx, MXC_INT_RR); 760 return IRQ_HANDLED; 761 } 762 763 spi_imx->devtype_data->intctrl(spi_imx, 0); 764 complete(&spi_imx->xfer_done); 765 766 return IRQ_HANDLED; 767 } 768 769 static int spi_imx_setupxfer(struct spi_device *spi, 770 struct spi_transfer *t) 771 { 772 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); 773 struct spi_imx_config config; 774 775 config.bpw = t ? t->bits_per_word : spi->bits_per_word; 776 config.speed_hz = t ? t->speed_hz : spi->max_speed_hz; 777 config.mode = spi->mode; 778 config.cs = spi->chip_select; 779 780 if (!config.speed_hz) 781 config.speed_hz = spi->max_speed_hz; 782 if (!config.bpw) 783 config.bpw = spi->bits_per_word; 784 785 /* Initialize the functions for transfer */ 786 if (config.bpw <= 8) { 787 spi_imx->rx = spi_imx_buf_rx_u8; 788 spi_imx->tx = spi_imx_buf_tx_u8; 789 } else if (config.bpw <= 16) { 790 spi_imx->rx = spi_imx_buf_rx_u16; 791 spi_imx->tx = spi_imx_buf_tx_u16; 792 } else { 793 spi_imx->rx = spi_imx_buf_rx_u32; 794 spi_imx->tx = spi_imx_buf_tx_u32; 795 } 796 797 spi_imx->devtype_data->config(spi_imx, &config); 798 799 return 0; 800 } 801 802 static void spi_imx_sdma_exit(struct spi_imx_data *spi_imx) 803 { 804 struct spi_master *master = spi_imx->bitbang.master; 805 806 if (master->dma_rx) { 807 dma_release_channel(master->dma_rx); 808 master->dma_rx = NULL; 809 } 810 811 if (master->dma_tx) { 812 dma_release_channel(master->dma_tx); 813 master->dma_tx = NULL; 814 } 815 816 spi_imx->dma_is_inited = 0; 817 } 818 819 static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx, 820 struct spi_master *master, 821 const struct resource *res) 822 { 823 struct dma_slave_config slave_config = {}; 824 int ret; 825 826 /* Prepare for TX DMA: */ 827 master->dma_tx = dma_request_slave_channel(dev, "tx"); 828 if (!master->dma_tx) { 829 dev_err(dev, "cannot get the TX DMA channel!\n"); 830 ret = -EINVAL; 831 goto err; 832 } 833 834 slave_config.direction = DMA_MEM_TO_DEV; 835 slave_config.dst_addr = res->start + MXC_CSPITXDATA; 836 slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 837 slave_config.dst_maxburst = spi_imx_get_fifosize(spi_imx) / 2; 838 ret = dmaengine_slave_config(master->dma_tx, &slave_config); 839 if (ret) { 840 dev_err(dev, "error in TX dma configuration.\n"); 841 goto err; 842 } 843 844 /* Prepare for RX : */ 845 master->dma_rx = dma_request_slave_channel(dev, "rx"); 846 if (!master->dma_rx) { 847 dev_dbg(dev, "cannot get the DMA channel.\n"); 848 ret = -EINVAL; 849 goto err; 850 } 851 852 slave_config.direction = DMA_DEV_TO_MEM; 853 slave_config.src_addr = res->start + MXC_CSPIRXDATA; 854 slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 855 slave_config.src_maxburst = spi_imx_get_fifosize(spi_imx) / 2; 856 ret = dmaengine_slave_config(master->dma_rx, &slave_config); 857 if (ret) { 858 dev_err(dev, "error in RX dma configuration.\n"); 859 goto err; 860 } 861 862 init_completion(&spi_imx->dma_rx_completion); 863 init_completion(&spi_imx->dma_tx_completion); 864 master->can_dma = spi_imx_can_dma; 865 master->max_dma_len = MAX_SDMA_BD_BYTES; 866 spi_imx->bitbang.master->flags = SPI_MASTER_MUST_RX | 867 SPI_MASTER_MUST_TX; 868 spi_imx->dma_is_inited = 1; 869 870 return 0; 871 err: 872 spi_imx_sdma_exit(spi_imx); 873 return ret; 874 } 875 876 static void spi_imx_dma_rx_callback(void *cookie) 877 { 878 struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie; 879 880 complete(&spi_imx->dma_rx_completion); 881 } 882 883 static void spi_imx_dma_tx_callback(void *cookie) 884 { 885 struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie; 886 887 complete(&spi_imx->dma_tx_completion); 888 } 889 890 static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx, 891 struct spi_transfer *transfer) 892 { 893 struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL; 894 int ret; 895 u32 dma; 896 int left; 897 struct spi_master *master = spi_imx->bitbang.master; 898 struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg; 899 900 if (tx) { 901 desc_tx = dmaengine_prep_slave_sg(master->dma_tx, 902 tx->sgl, tx->nents, DMA_TO_DEVICE, 903 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 904 if (!desc_tx) 905 goto no_dma; 906 907 desc_tx->callback = spi_imx_dma_tx_callback; 908 desc_tx->callback_param = (void *)spi_imx; 909 dmaengine_submit(desc_tx); 910 } 911 912 if (rx) { 913 desc_rx = dmaengine_prep_slave_sg(master->dma_rx, 914 rx->sgl, rx->nents, DMA_FROM_DEVICE, 915 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 916 if (!desc_rx) 917 goto no_dma; 918 919 desc_rx->callback = spi_imx_dma_rx_callback; 920 desc_rx->callback_param = (void *)spi_imx; 921 dmaengine_submit(desc_rx); 922 } 923 924 reinit_completion(&spi_imx->dma_rx_completion); 925 reinit_completion(&spi_imx->dma_tx_completion); 926 927 /* Trigger the cspi module. */ 928 spi_imx->dma_finished = 0; 929 930 dma = readl(spi_imx->base + MX51_ECSPI_DMA); 931 dma = dma & (~MX51_ECSPI_DMA_RXT_WML_MASK); 932 /* Change RX_DMA_LENGTH trigger dma fetch tail data */ 933 left = transfer->len % spi_imx->rxt_wml; 934 if (left) 935 writel(dma | (left << MX51_ECSPI_DMA_RXT_WML_OFFSET), 936 spi_imx->base + MX51_ECSPI_DMA); 937 spi_imx->devtype_data->trigger(spi_imx); 938 939 dma_async_issue_pending(master->dma_tx); 940 dma_async_issue_pending(master->dma_rx); 941 /* Wait SDMA to finish the data transfer.*/ 942 ret = wait_for_completion_timeout(&spi_imx->dma_tx_completion, 943 IMX_DMA_TIMEOUT); 944 if (!ret) { 945 pr_warn("%s %s: I/O Error in DMA TX\n", 946 dev_driver_string(&master->dev), 947 dev_name(&master->dev)); 948 dmaengine_terminate_all(master->dma_tx); 949 } else { 950 ret = wait_for_completion_timeout(&spi_imx->dma_rx_completion, 951 IMX_DMA_TIMEOUT); 952 if (!ret) { 953 pr_warn("%s %s: I/O Error in DMA RX\n", 954 dev_driver_string(&master->dev), 955 dev_name(&master->dev)); 956 spi_imx->devtype_data->reset(spi_imx); 957 dmaengine_terminate_all(master->dma_rx); 958 } 959 writel(dma | 960 spi_imx->rxt_wml << MX51_ECSPI_DMA_RXT_WML_OFFSET, 961 spi_imx->base + MX51_ECSPI_DMA); 962 } 963 964 spi_imx->dma_finished = 1; 965 spi_imx->devtype_data->trigger(spi_imx); 966 967 if (!ret) 968 ret = -ETIMEDOUT; 969 else if (ret > 0) 970 ret = transfer->len; 971 972 return ret; 973 974 no_dma: 975 pr_warn_once("%s %s: DMA not available, falling back to PIO\n", 976 dev_driver_string(&master->dev), 977 dev_name(&master->dev)); 978 return -EAGAIN; 979 } 980 981 static int spi_imx_pio_transfer(struct spi_device *spi, 982 struct spi_transfer *transfer) 983 { 984 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); 985 986 spi_imx->tx_buf = transfer->tx_buf; 987 spi_imx->rx_buf = transfer->rx_buf; 988 spi_imx->count = transfer->len; 989 spi_imx->txfifo = 0; 990 991 reinit_completion(&spi_imx->xfer_done); 992 993 spi_imx_push(spi_imx); 994 995 spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE); 996 997 wait_for_completion(&spi_imx->xfer_done); 998 999 return transfer->len; 1000 } 1001 1002 static int spi_imx_transfer(struct spi_device *spi, 1003 struct spi_transfer *transfer) 1004 { 1005 int ret; 1006 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); 1007 1008 if (spi_imx->bitbang.master->can_dma && 1009 spi_imx_can_dma(spi_imx->bitbang.master, spi, transfer)) { 1010 spi_imx->usedma = true; 1011 ret = spi_imx_dma_transfer(spi_imx, transfer); 1012 if (ret != -EAGAIN) 1013 return ret; 1014 } 1015 spi_imx->usedma = false; 1016 1017 return spi_imx_pio_transfer(spi, transfer); 1018 } 1019 1020 static int spi_imx_setup(struct spi_device *spi) 1021 { 1022 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); 1023 int gpio = spi_imx->chipselect[spi->chip_select]; 1024 1025 dev_dbg(&spi->dev, "%s: mode %d, %u bpw, %d hz\n", __func__, 1026 spi->mode, spi->bits_per_word, spi->max_speed_hz); 1027 1028 if (gpio_is_valid(gpio)) 1029 gpio_direction_output(gpio, spi->mode & SPI_CS_HIGH ? 0 : 1); 1030 1031 spi_imx_chipselect(spi, BITBANG_CS_INACTIVE); 1032 1033 return 0; 1034 } 1035 1036 static void spi_imx_cleanup(struct spi_device *spi) 1037 { 1038 } 1039 1040 static int 1041 spi_imx_prepare_message(struct spi_master *master, struct spi_message *msg) 1042 { 1043 struct spi_imx_data *spi_imx = spi_master_get_devdata(master); 1044 int ret; 1045 1046 ret = clk_enable(spi_imx->clk_per); 1047 if (ret) 1048 return ret; 1049 1050 ret = clk_enable(spi_imx->clk_ipg); 1051 if (ret) { 1052 clk_disable(spi_imx->clk_per); 1053 return ret; 1054 } 1055 1056 return 0; 1057 } 1058 1059 static int 1060 spi_imx_unprepare_message(struct spi_master *master, struct spi_message *msg) 1061 { 1062 struct spi_imx_data *spi_imx = spi_master_get_devdata(master); 1063 1064 clk_disable(spi_imx->clk_ipg); 1065 clk_disable(spi_imx->clk_per); 1066 return 0; 1067 } 1068 1069 static int spi_imx_probe(struct platform_device *pdev) 1070 { 1071 struct device_node *np = pdev->dev.of_node; 1072 const struct of_device_id *of_id = 1073 of_match_device(spi_imx_dt_ids, &pdev->dev); 1074 struct spi_imx_master *mxc_platform_info = 1075 dev_get_platdata(&pdev->dev); 1076 struct spi_master *master; 1077 struct spi_imx_data *spi_imx; 1078 struct resource *res; 1079 int i, ret, num_cs; 1080 1081 if (!np && !mxc_platform_info) { 1082 dev_err(&pdev->dev, "can't get the platform data\n"); 1083 return -EINVAL; 1084 } 1085 1086 ret = of_property_read_u32(np, "fsl,spi-num-chipselects", &num_cs); 1087 if (ret < 0) { 1088 if (mxc_platform_info) 1089 num_cs = mxc_platform_info->num_chipselect; 1090 else 1091 return ret; 1092 } 1093 1094 master = spi_alloc_master(&pdev->dev, 1095 sizeof(struct spi_imx_data) + sizeof(int) * num_cs); 1096 if (!master) 1097 return -ENOMEM; 1098 1099 platform_set_drvdata(pdev, master); 1100 1101 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32); 1102 master->bus_num = pdev->id; 1103 master->num_chipselect = num_cs; 1104 1105 spi_imx = spi_master_get_devdata(master); 1106 spi_imx->bitbang.master = master; 1107 1108 for (i = 0; i < master->num_chipselect; i++) { 1109 int cs_gpio = of_get_named_gpio(np, "cs-gpios", i); 1110 if (!gpio_is_valid(cs_gpio) && mxc_platform_info) 1111 cs_gpio = mxc_platform_info->chipselect[i]; 1112 1113 spi_imx->chipselect[i] = cs_gpio; 1114 if (!gpio_is_valid(cs_gpio)) 1115 continue; 1116 1117 ret = devm_gpio_request(&pdev->dev, spi_imx->chipselect[i], 1118 DRIVER_NAME); 1119 if (ret) { 1120 dev_err(&pdev->dev, "can't get cs gpios\n"); 1121 goto out_master_put; 1122 } 1123 } 1124 1125 spi_imx->bitbang.chipselect = spi_imx_chipselect; 1126 spi_imx->bitbang.setup_transfer = spi_imx_setupxfer; 1127 spi_imx->bitbang.txrx_bufs = spi_imx_transfer; 1128 spi_imx->bitbang.master->setup = spi_imx_setup; 1129 spi_imx->bitbang.master->cleanup = spi_imx_cleanup; 1130 spi_imx->bitbang.master->prepare_message = spi_imx_prepare_message; 1131 spi_imx->bitbang.master->unprepare_message = spi_imx_unprepare_message; 1132 spi_imx->bitbang.master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; 1133 1134 init_completion(&spi_imx->xfer_done); 1135 1136 spi_imx->devtype_data = of_id ? of_id->data : 1137 (struct spi_imx_devtype_data *) pdev->id_entry->driver_data; 1138 1139 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1140 spi_imx->base = devm_ioremap_resource(&pdev->dev, res); 1141 if (IS_ERR(spi_imx->base)) { 1142 ret = PTR_ERR(spi_imx->base); 1143 goto out_master_put; 1144 } 1145 1146 spi_imx->irq = platform_get_irq(pdev, 0); 1147 if (spi_imx->irq < 0) { 1148 ret = spi_imx->irq; 1149 goto out_master_put; 1150 } 1151 1152 ret = devm_request_irq(&pdev->dev, spi_imx->irq, spi_imx_isr, 0, 1153 dev_name(&pdev->dev), spi_imx); 1154 if (ret) { 1155 dev_err(&pdev->dev, "can't get irq%d: %d\n", spi_imx->irq, ret); 1156 goto out_master_put; 1157 } 1158 1159 spi_imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); 1160 if (IS_ERR(spi_imx->clk_ipg)) { 1161 ret = PTR_ERR(spi_imx->clk_ipg); 1162 goto out_master_put; 1163 } 1164 1165 spi_imx->clk_per = devm_clk_get(&pdev->dev, "per"); 1166 if (IS_ERR(spi_imx->clk_per)) { 1167 ret = PTR_ERR(spi_imx->clk_per); 1168 goto out_master_put; 1169 } 1170 1171 ret = clk_prepare_enable(spi_imx->clk_per); 1172 if (ret) 1173 goto out_master_put; 1174 1175 ret = clk_prepare_enable(spi_imx->clk_ipg); 1176 if (ret) 1177 goto out_put_per; 1178 1179 spi_imx->spi_clk = clk_get_rate(spi_imx->clk_per); 1180 /* 1181 * Only validated on i.mx6 now, can remove the constrain if validated on 1182 * other chips. 1183 */ 1184 if (spi_imx->devtype_data == &imx51_ecspi_devtype_data 1185 && spi_imx_sdma_init(&pdev->dev, spi_imx, master, res)) 1186 dev_err(&pdev->dev, "dma setup error,use pio instead\n"); 1187 1188 spi_imx->devtype_data->reset(spi_imx); 1189 1190 spi_imx->devtype_data->intctrl(spi_imx, 0); 1191 1192 master->dev.of_node = pdev->dev.of_node; 1193 ret = spi_bitbang_start(&spi_imx->bitbang); 1194 if (ret) { 1195 dev_err(&pdev->dev, "bitbang start failed with %d\n", ret); 1196 goto out_clk_put; 1197 } 1198 1199 dev_info(&pdev->dev, "probed\n"); 1200 1201 clk_disable(spi_imx->clk_ipg); 1202 clk_disable(spi_imx->clk_per); 1203 return ret; 1204 1205 out_clk_put: 1206 clk_disable_unprepare(spi_imx->clk_ipg); 1207 out_put_per: 1208 clk_disable_unprepare(spi_imx->clk_per); 1209 out_master_put: 1210 spi_master_put(master); 1211 1212 return ret; 1213 } 1214 1215 static int spi_imx_remove(struct platform_device *pdev) 1216 { 1217 struct spi_master *master = platform_get_drvdata(pdev); 1218 struct spi_imx_data *spi_imx = spi_master_get_devdata(master); 1219 1220 spi_bitbang_stop(&spi_imx->bitbang); 1221 1222 writel(0, spi_imx->base + MXC_CSPICTRL); 1223 clk_unprepare(spi_imx->clk_ipg); 1224 clk_unprepare(spi_imx->clk_per); 1225 spi_imx_sdma_exit(spi_imx); 1226 spi_master_put(master); 1227 1228 return 0; 1229 } 1230 1231 static struct platform_driver spi_imx_driver = { 1232 .driver = { 1233 .name = DRIVER_NAME, 1234 .of_match_table = spi_imx_dt_ids, 1235 }, 1236 .id_table = spi_imx_devtype, 1237 .probe = spi_imx_probe, 1238 .remove = spi_imx_remove, 1239 }; 1240 module_platform_driver(spi_imx_driver); 1241 1242 MODULE_DESCRIPTION("SPI Master Controller driver"); 1243 MODULE_AUTHOR("Sascha Hauer, Pengutronix"); 1244 MODULE_LICENSE("GPL"); 1245 MODULE_ALIAS("platform:" DRIVER_NAME); 1246