1 /* 2 * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved. 3 * Copyright (C) 2008 Juergen Beisert 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License 7 * as published by the Free Software Foundation; either version 2 8 * of the License, or (at your option) any later version. 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the 16 * Free Software Foundation 17 * 51 Franklin Street, Fifth Floor 18 * Boston, MA 02110-1301, USA. 19 */ 20 21 #include <linux/clk.h> 22 #include <linux/completion.h> 23 #include <linux/delay.h> 24 #include <linux/dmaengine.h> 25 #include <linux/dma-mapping.h> 26 #include <linux/err.h> 27 #include <linux/gpio.h> 28 #include <linux/interrupt.h> 29 #include <linux/io.h> 30 #include <linux/irq.h> 31 #include <linux/kernel.h> 32 #include <linux/module.h> 33 #include <linux/platform_device.h> 34 #include <linux/slab.h> 35 #include <linux/spi/spi.h> 36 #include <linux/spi/spi_bitbang.h> 37 #include <linux/types.h> 38 #include <linux/of.h> 39 #include <linux/of_device.h> 40 #include <linux/of_gpio.h> 41 42 #include <linux/platform_data/dma-imx.h> 43 #include <linux/platform_data/spi-imx.h> 44 45 #define DRIVER_NAME "spi_imx" 46 47 #define MXC_CSPIRXDATA 0x00 48 #define MXC_CSPITXDATA 0x04 49 #define MXC_CSPICTRL 0x08 50 #define MXC_CSPIINT 0x0c 51 #define MXC_RESET 0x1c 52 53 /* generic defines to abstract from the different register layouts */ 54 #define MXC_INT_RR (1 << 0) /* Receive data ready interrupt */ 55 #define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */ 56 57 /* The maximum bytes that a sdma BD can transfer.*/ 58 #define MAX_SDMA_BD_BYTES (1 << 15) 59 #define IMX_DMA_TIMEOUT (msecs_to_jiffies(3000)) 60 struct spi_imx_config { 61 unsigned int speed_hz; 62 unsigned int bpw; 63 unsigned int mode; 64 u8 cs; 65 }; 66 67 enum spi_imx_devtype { 68 IMX1_CSPI, 69 IMX21_CSPI, 70 IMX27_CSPI, 71 IMX31_CSPI, 72 IMX35_CSPI, /* CSPI on all i.mx except above */ 73 IMX51_ECSPI, /* ECSPI on i.mx51 and later */ 74 }; 75 76 struct spi_imx_data; 77 78 struct spi_imx_devtype_data { 79 void (*intctrl)(struct spi_imx_data *, int); 80 int (*config)(struct spi_imx_data *, struct spi_imx_config *); 81 void (*trigger)(struct spi_imx_data *); 82 int (*rx_available)(struct spi_imx_data *); 83 void (*reset)(struct spi_imx_data *); 84 enum spi_imx_devtype devtype; 85 }; 86 87 struct spi_imx_data { 88 struct spi_bitbang bitbang; 89 90 struct completion xfer_done; 91 void __iomem *base; 92 struct clk *clk_per; 93 struct clk *clk_ipg; 94 unsigned long spi_clk; 95 96 unsigned int count; 97 void (*tx)(struct spi_imx_data *); 98 void (*rx)(struct spi_imx_data *); 99 void *rx_buf; 100 const void *tx_buf; 101 unsigned int txfifo; /* number of words pushed in tx FIFO */ 102 103 /* DMA */ 104 unsigned int dma_is_inited; 105 unsigned int dma_finished; 106 bool usedma; 107 u32 rx_wml; 108 u32 tx_wml; 109 u32 rxt_wml; 110 struct completion dma_rx_completion; 111 struct completion dma_tx_completion; 112 113 const struct spi_imx_devtype_data *devtype_data; 114 int chipselect[0]; 115 }; 116 117 static inline int is_imx27_cspi(struct spi_imx_data *d) 118 { 119 return d->devtype_data->devtype == IMX27_CSPI; 120 } 121 122 static inline int is_imx35_cspi(struct spi_imx_data *d) 123 { 124 return d->devtype_data->devtype == IMX35_CSPI; 125 } 126 127 static inline unsigned spi_imx_get_fifosize(struct spi_imx_data *d) 128 { 129 return (d->devtype_data->devtype == IMX51_ECSPI) ? 64 : 8; 130 } 131 132 #define MXC_SPI_BUF_RX(type) \ 133 static void spi_imx_buf_rx_##type(struct spi_imx_data *spi_imx) \ 134 { \ 135 unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA); \ 136 \ 137 if (spi_imx->rx_buf) { \ 138 *(type *)spi_imx->rx_buf = val; \ 139 spi_imx->rx_buf += sizeof(type); \ 140 } \ 141 } 142 143 #define MXC_SPI_BUF_TX(type) \ 144 static void spi_imx_buf_tx_##type(struct spi_imx_data *spi_imx) \ 145 { \ 146 type val = 0; \ 147 \ 148 if (spi_imx->tx_buf) { \ 149 val = *(type *)spi_imx->tx_buf; \ 150 spi_imx->tx_buf += sizeof(type); \ 151 } \ 152 \ 153 spi_imx->count -= sizeof(type); \ 154 \ 155 writel(val, spi_imx->base + MXC_CSPITXDATA); \ 156 } 157 158 MXC_SPI_BUF_RX(u8) 159 MXC_SPI_BUF_TX(u8) 160 MXC_SPI_BUF_RX(u16) 161 MXC_SPI_BUF_TX(u16) 162 MXC_SPI_BUF_RX(u32) 163 MXC_SPI_BUF_TX(u32) 164 165 /* First entry is reserved, second entry is valid only if SDHC_SPIEN is set 166 * (which is currently not the case in this driver) 167 */ 168 static int mxc_clkdivs[] = {0, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192, 169 256, 384, 512, 768, 1024}; 170 171 /* MX21, MX27 */ 172 static unsigned int spi_imx_clkdiv_1(unsigned int fin, 173 unsigned int fspi, unsigned int max) 174 { 175 int i; 176 177 for (i = 2; i < max; i++) 178 if (fspi * mxc_clkdivs[i] >= fin) 179 return i; 180 181 return max; 182 } 183 184 /* MX1, MX31, MX35, MX51 CSPI */ 185 static unsigned int spi_imx_clkdiv_2(unsigned int fin, 186 unsigned int fspi) 187 { 188 int i, div = 4; 189 190 for (i = 0; i < 7; i++) { 191 if (fspi * div >= fin) 192 return i; 193 div <<= 1; 194 } 195 196 return 7; 197 } 198 199 static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi, 200 struct spi_transfer *transfer) 201 { 202 struct spi_imx_data *spi_imx = spi_master_get_devdata(master); 203 204 if (spi_imx->dma_is_inited 205 && transfer->len > spi_imx->rx_wml * sizeof(u32) 206 && transfer->len > spi_imx->tx_wml * sizeof(u32)) 207 return true; 208 return false; 209 } 210 211 #define MX51_ECSPI_CTRL 0x08 212 #define MX51_ECSPI_CTRL_ENABLE (1 << 0) 213 #define MX51_ECSPI_CTRL_XCH (1 << 2) 214 #define MX51_ECSPI_CTRL_SMC (1 << 3) 215 #define MX51_ECSPI_CTRL_MODE_MASK (0xf << 4) 216 #define MX51_ECSPI_CTRL_POSTDIV_OFFSET 8 217 #define MX51_ECSPI_CTRL_PREDIV_OFFSET 12 218 #define MX51_ECSPI_CTRL_CS(cs) ((cs) << 18) 219 #define MX51_ECSPI_CTRL_BL_OFFSET 20 220 221 #define MX51_ECSPI_CONFIG 0x0c 222 #define MX51_ECSPI_CONFIG_SCLKPHA(cs) (1 << ((cs) + 0)) 223 #define MX51_ECSPI_CONFIG_SCLKPOL(cs) (1 << ((cs) + 4)) 224 #define MX51_ECSPI_CONFIG_SBBCTRL(cs) (1 << ((cs) + 8)) 225 #define MX51_ECSPI_CONFIG_SSBPOL(cs) (1 << ((cs) + 12)) 226 #define MX51_ECSPI_CONFIG_SCLKCTL(cs) (1 << ((cs) + 20)) 227 228 #define MX51_ECSPI_INT 0x10 229 #define MX51_ECSPI_INT_TEEN (1 << 0) 230 #define MX51_ECSPI_INT_RREN (1 << 3) 231 232 #define MX51_ECSPI_DMA 0x14 233 #define MX51_ECSPI_DMA_TX_WML_OFFSET 0 234 #define MX51_ECSPI_DMA_TX_WML_MASK 0x3F 235 #define MX51_ECSPI_DMA_RX_WML_OFFSET 16 236 #define MX51_ECSPI_DMA_RX_WML_MASK (0x3F << 16) 237 #define MX51_ECSPI_DMA_RXT_WML_OFFSET 24 238 #define MX51_ECSPI_DMA_RXT_WML_MASK (0x3F << 24) 239 240 #define MX51_ECSPI_DMA_TEDEN_OFFSET 7 241 #define MX51_ECSPI_DMA_RXDEN_OFFSET 23 242 #define MX51_ECSPI_DMA_RXTDEN_OFFSET 31 243 244 #define MX51_ECSPI_STAT 0x18 245 #define MX51_ECSPI_STAT_RR (1 << 3) 246 247 /* MX51 eCSPI */ 248 static unsigned int mx51_ecspi_clkdiv(unsigned int fin, unsigned int fspi, 249 unsigned int *fres) 250 { 251 /* 252 * there are two 4-bit dividers, the pre-divider divides by 253 * $pre, the post-divider by 2^$post 254 */ 255 unsigned int pre, post; 256 257 if (unlikely(fspi > fin)) 258 return 0; 259 260 post = fls(fin) - fls(fspi); 261 if (fin > fspi << post) 262 post++; 263 264 /* now we have: (fin <= fspi << post) with post being minimal */ 265 266 post = max(4U, post) - 4; 267 if (unlikely(post > 0xf)) { 268 pr_err("%s: cannot set clock freq: %u (base freq: %u)\n", 269 __func__, fspi, fin); 270 return 0xff; 271 } 272 273 pre = DIV_ROUND_UP(fin, fspi << post) - 1; 274 275 pr_debug("%s: fin: %u, fspi: %u, post: %u, pre: %u\n", 276 __func__, fin, fspi, post, pre); 277 278 /* Resulting frequency for the SCLK line. */ 279 *fres = (fin / (pre + 1)) >> post; 280 281 return (pre << MX51_ECSPI_CTRL_PREDIV_OFFSET) | 282 (post << MX51_ECSPI_CTRL_POSTDIV_OFFSET); 283 } 284 285 static void __maybe_unused mx51_ecspi_intctrl(struct spi_imx_data *spi_imx, int enable) 286 { 287 unsigned val = 0; 288 289 if (enable & MXC_INT_TE) 290 val |= MX51_ECSPI_INT_TEEN; 291 292 if (enable & MXC_INT_RR) 293 val |= MX51_ECSPI_INT_RREN; 294 295 writel(val, spi_imx->base + MX51_ECSPI_INT); 296 } 297 298 static void __maybe_unused mx51_ecspi_trigger(struct spi_imx_data *spi_imx) 299 { 300 u32 reg = readl(spi_imx->base + MX51_ECSPI_CTRL); 301 302 if (!spi_imx->usedma) 303 reg |= MX51_ECSPI_CTRL_XCH; 304 else if (!spi_imx->dma_finished) 305 reg |= MX51_ECSPI_CTRL_SMC; 306 else 307 reg &= ~MX51_ECSPI_CTRL_SMC; 308 writel(reg, spi_imx->base + MX51_ECSPI_CTRL); 309 } 310 311 static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx, 312 struct spi_imx_config *config) 313 { 314 u32 ctrl = MX51_ECSPI_CTRL_ENABLE, cfg = 0, dma = 0; 315 u32 tx_wml_cfg, rx_wml_cfg, rxt_wml_cfg; 316 u32 clk = config->speed_hz, delay; 317 318 /* 319 * The hardware seems to have a race condition when changing modes. The 320 * current assumption is that the selection of the channel arrives 321 * earlier in the hardware than the mode bits when they are written at 322 * the same time. 323 * So set master mode for all channels as we do not support slave mode. 324 */ 325 ctrl |= MX51_ECSPI_CTRL_MODE_MASK; 326 327 /* set clock speed */ 328 ctrl |= mx51_ecspi_clkdiv(spi_imx->spi_clk, config->speed_hz, &clk); 329 330 /* set chip select to use */ 331 ctrl |= MX51_ECSPI_CTRL_CS(config->cs); 332 333 ctrl |= (config->bpw - 1) << MX51_ECSPI_CTRL_BL_OFFSET; 334 335 cfg |= MX51_ECSPI_CONFIG_SBBCTRL(config->cs); 336 337 if (config->mode & SPI_CPHA) 338 cfg |= MX51_ECSPI_CONFIG_SCLKPHA(config->cs); 339 340 if (config->mode & SPI_CPOL) { 341 cfg |= MX51_ECSPI_CONFIG_SCLKPOL(config->cs); 342 cfg |= MX51_ECSPI_CONFIG_SCLKCTL(config->cs); 343 } 344 if (config->mode & SPI_CS_HIGH) 345 cfg |= MX51_ECSPI_CONFIG_SSBPOL(config->cs); 346 347 writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL); 348 writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG); 349 350 /* 351 * Wait until the changes in the configuration register CONFIGREG 352 * propagate into the hardware. It takes exactly one tick of the 353 * SCLK clock, but we will wait two SCLK clock just to be sure. The 354 * effect of the delay it takes for the hardware to apply changes 355 * is noticable if the SCLK clock run very slow. In such a case, if 356 * the polarity of SCLK should be inverted, the GPIO ChipSelect might 357 * be asserted before the SCLK polarity changes, which would disrupt 358 * the SPI communication as the device on the other end would consider 359 * the change of SCLK polarity as a clock tick already. 360 */ 361 delay = (2 * 1000000) / clk; 362 if (likely(delay < 10)) /* SCLK is faster than 100 kHz */ 363 udelay(delay); 364 else /* SCLK is _very_ slow */ 365 usleep_range(delay, delay + 10); 366 367 /* 368 * Configure the DMA register: setup the watermark 369 * and enable DMA request. 370 */ 371 if (spi_imx->dma_is_inited) { 372 dma = readl(spi_imx->base + MX51_ECSPI_DMA); 373 374 spi_imx->rxt_wml = spi_imx_get_fifosize(spi_imx) / 2; 375 rx_wml_cfg = spi_imx->rx_wml << MX51_ECSPI_DMA_RX_WML_OFFSET; 376 tx_wml_cfg = spi_imx->tx_wml << MX51_ECSPI_DMA_TX_WML_OFFSET; 377 rxt_wml_cfg = spi_imx->rxt_wml << MX51_ECSPI_DMA_RXT_WML_OFFSET; 378 dma = (dma & ~MX51_ECSPI_DMA_TX_WML_MASK 379 & ~MX51_ECSPI_DMA_RX_WML_MASK 380 & ~MX51_ECSPI_DMA_RXT_WML_MASK) 381 | rx_wml_cfg | tx_wml_cfg | rxt_wml_cfg 382 |(1 << MX51_ECSPI_DMA_TEDEN_OFFSET) 383 |(1 << MX51_ECSPI_DMA_RXDEN_OFFSET) 384 |(1 << MX51_ECSPI_DMA_RXTDEN_OFFSET); 385 386 writel(dma, spi_imx->base + MX51_ECSPI_DMA); 387 } 388 389 return 0; 390 } 391 392 static int __maybe_unused mx51_ecspi_rx_available(struct spi_imx_data *spi_imx) 393 { 394 return readl(spi_imx->base + MX51_ECSPI_STAT) & MX51_ECSPI_STAT_RR; 395 } 396 397 static void __maybe_unused mx51_ecspi_reset(struct spi_imx_data *spi_imx) 398 { 399 /* drain receive buffer */ 400 while (mx51_ecspi_rx_available(spi_imx)) 401 readl(spi_imx->base + MXC_CSPIRXDATA); 402 } 403 404 #define MX31_INTREG_TEEN (1 << 0) 405 #define MX31_INTREG_RREN (1 << 3) 406 407 #define MX31_CSPICTRL_ENABLE (1 << 0) 408 #define MX31_CSPICTRL_MASTER (1 << 1) 409 #define MX31_CSPICTRL_XCH (1 << 2) 410 #define MX31_CSPICTRL_POL (1 << 4) 411 #define MX31_CSPICTRL_PHA (1 << 5) 412 #define MX31_CSPICTRL_SSCTL (1 << 6) 413 #define MX31_CSPICTRL_SSPOL (1 << 7) 414 #define MX31_CSPICTRL_BC_SHIFT 8 415 #define MX35_CSPICTRL_BL_SHIFT 20 416 #define MX31_CSPICTRL_CS_SHIFT 24 417 #define MX35_CSPICTRL_CS_SHIFT 12 418 #define MX31_CSPICTRL_DR_SHIFT 16 419 420 #define MX31_CSPISTATUS 0x14 421 #define MX31_STATUS_RR (1 << 3) 422 423 /* These functions also work for the i.MX35, but be aware that 424 * the i.MX35 has a slightly different register layout for bits 425 * we do not use here. 426 */ 427 static void __maybe_unused mx31_intctrl(struct spi_imx_data *spi_imx, int enable) 428 { 429 unsigned int val = 0; 430 431 if (enable & MXC_INT_TE) 432 val |= MX31_INTREG_TEEN; 433 if (enable & MXC_INT_RR) 434 val |= MX31_INTREG_RREN; 435 436 writel(val, spi_imx->base + MXC_CSPIINT); 437 } 438 439 static void __maybe_unused mx31_trigger(struct spi_imx_data *spi_imx) 440 { 441 unsigned int reg; 442 443 reg = readl(spi_imx->base + MXC_CSPICTRL); 444 reg |= MX31_CSPICTRL_XCH; 445 writel(reg, spi_imx->base + MXC_CSPICTRL); 446 } 447 448 static int __maybe_unused mx31_config(struct spi_imx_data *spi_imx, 449 struct spi_imx_config *config) 450 { 451 unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER; 452 int cs = spi_imx->chipselect[config->cs]; 453 454 reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) << 455 MX31_CSPICTRL_DR_SHIFT; 456 457 if (is_imx35_cspi(spi_imx)) { 458 reg |= (config->bpw - 1) << MX35_CSPICTRL_BL_SHIFT; 459 reg |= MX31_CSPICTRL_SSCTL; 460 } else { 461 reg |= (config->bpw - 1) << MX31_CSPICTRL_BC_SHIFT; 462 } 463 464 if (config->mode & SPI_CPHA) 465 reg |= MX31_CSPICTRL_PHA; 466 if (config->mode & SPI_CPOL) 467 reg |= MX31_CSPICTRL_POL; 468 if (config->mode & SPI_CS_HIGH) 469 reg |= MX31_CSPICTRL_SSPOL; 470 if (cs < 0) 471 reg |= (cs + 32) << 472 (is_imx35_cspi(spi_imx) ? MX35_CSPICTRL_CS_SHIFT : 473 MX31_CSPICTRL_CS_SHIFT); 474 475 writel(reg, spi_imx->base + MXC_CSPICTRL); 476 477 return 0; 478 } 479 480 static int __maybe_unused mx31_rx_available(struct spi_imx_data *spi_imx) 481 { 482 return readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR; 483 } 484 485 static void __maybe_unused mx31_reset(struct spi_imx_data *spi_imx) 486 { 487 /* drain receive buffer */ 488 while (readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR) 489 readl(spi_imx->base + MXC_CSPIRXDATA); 490 } 491 492 #define MX21_INTREG_RR (1 << 4) 493 #define MX21_INTREG_TEEN (1 << 9) 494 #define MX21_INTREG_RREN (1 << 13) 495 496 #define MX21_CSPICTRL_POL (1 << 5) 497 #define MX21_CSPICTRL_PHA (1 << 6) 498 #define MX21_CSPICTRL_SSPOL (1 << 8) 499 #define MX21_CSPICTRL_XCH (1 << 9) 500 #define MX21_CSPICTRL_ENABLE (1 << 10) 501 #define MX21_CSPICTRL_MASTER (1 << 11) 502 #define MX21_CSPICTRL_DR_SHIFT 14 503 #define MX21_CSPICTRL_CS_SHIFT 19 504 505 static void __maybe_unused mx21_intctrl(struct spi_imx_data *spi_imx, int enable) 506 { 507 unsigned int val = 0; 508 509 if (enable & MXC_INT_TE) 510 val |= MX21_INTREG_TEEN; 511 if (enable & MXC_INT_RR) 512 val |= MX21_INTREG_RREN; 513 514 writel(val, spi_imx->base + MXC_CSPIINT); 515 } 516 517 static void __maybe_unused mx21_trigger(struct spi_imx_data *spi_imx) 518 { 519 unsigned int reg; 520 521 reg = readl(spi_imx->base + MXC_CSPICTRL); 522 reg |= MX21_CSPICTRL_XCH; 523 writel(reg, spi_imx->base + MXC_CSPICTRL); 524 } 525 526 static int __maybe_unused mx21_config(struct spi_imx_data *spi_imx, 527 struct spi_imx_config *config) 528 { 529 unsigned int reg = MX21_CSPICTRL_ENABLE | MX21_CSPICTRL_MASTER; 530 int cs = spi_imx->chipselect[config->cs]; 531 unsigned int max = is_imx27_cspi(spi_imx) ? 16 : 18; 532 533 reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, config->speed_hz, max) << 534 MX21_CSPICTRL_DR_SHIFT; 535 reg |= config->bpw - 1; 536 537 if (config->mode & SPI_CPHA) 538 reg |= MX21_CSPICTRL_PHA; 539 if (config->mode & SPI_CPOL) 540 reg |= MX21_CSPICTRL_POL; 541 if (config->mode & SPI_CS_HIGH) 542 reg |= MX21_CSPICTRL_SSPOL; 543 if (cs < 0) 544 reg |= (cs + 32) << MX21_CSPICTRL_CS_SHIFT; 545 546 writel(reg, spi_imx->base + MXC_CSPICTRL); 547 548 return 0; 549 } 550 551 static int __maybe_unused mx21_rx_available(struct spi_imx_data *spi_imx) 552 { 553 return readl(spi_imx->base + MXC_CSPIINT) & MX21_INTREG_RR; 554 } 555 556 static void __maybe_unused mx21_reset(struct spi_imx_data *spi_imx) 557 { 558 writel(1, spi_imx->base + MXC_RESET); 559 } 560 561 #define MX1_INTREG_RR (1 << 3) 562 #define MX1_INTREG_TEEN (1 << 8) 563 #define MX1_INTREG_RREN (1 << 11) 564 565 #define MX1_CSPICTRL_POL (1 << 4) 566 #define MX1_CSPICTRL_PHA (1 << 5) 567 #define MX1_CSPICTRL_XCH (1 << 8) 568 #define MX1_CSPICTRL_ENABLE (1 << 9) 569 #define MX1_CSPICTRL_MASTER (1 << 10) 570 #define MX1_CSPICTRL_DR_SHIFT 13 571 572 static void __maybe_unused mx1_intctrl(struct spi_imx_data *spi_imx, int enable) 573 { 574 unsigned int val = 0; 575 576 if (enable & MXC_INT_TE) 577 val |= MX1_INTREG_TEEN; 578 if (enable & MXC_INT_RR) 579 val |= MX1_INTREG_RREN; 580 581 writel(val, spi_imx->base + MXC_CSPIINT); 582 } 583 584 static void __maybe_unused mx1_trigger(struct spi_imx_data *spi_imx) 585 { 586 unsigned int reg; 587 588 reg = readl(spi_imx->base + MXC_CSPICTRL); 589 reg |= MX1_CSPICTRL_XCH; 590 writel(reg, spi_imx->base + MXC_CSPICTRL); 591 } 592 593 static int __maybe_unused mx1_config(struct spi_imx_data *spi_imx, 594 struct spi_imx_config *config) 595 { 596 unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_MASTER; 597 598 reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) << 599 MX1_CSPICTRL_DR_SHIFT; 600 reg |= config->bpw - 1; 601 602 if (config->mode & SPI_CPHA) 603 reg |= MX1_CSPICTRL_PHA; 604 if (config->mode & SPI_CPOL) 605 reg |= MX1_CSPICTRL_POL; 606 607 writel(reg, spi_imx->base + MXC_CSPICTRL); 608 609 return 0; 610 } 611 612 static int __maybe_unused mx1_rx_available(struct spi_imx_data *spi_imx) 613 { 614 return readl(spi_imx->base + MXC_CSPIINT) & MX1_INTREG_RR; 615 } 616 617 static void __maybe_unused mx1_reset(struct spi_imx_data *spi_imx) 618 { 619 writel(1, spi_imx->base + MXC_RESET); 620 } 621 622 static struct spi_imx_devtype_data imx1_cspi_devtype_data = { 623 .intctrl = mx1_intctrl, 624 .config = mx1_config, 625 .trigger = mx1_trigger, 626 .rx_available = mx1_rx_available, 627 .reset = mx1_reset, 628 .devtype = IMX1_CSPI, 629 }; 630 631 static struct spi_imx_devtype_data imx21_cspi_devtype_data = { 632 .intctrl = mx21_intctrl, 633 .config = mx21_config, 634 .trigger = mx21_trigger, 635 .rx_available = mx21_rx_available, 636 .reset = mx21_reset, 637 .devtype = IMX21_CSPI, 638 }; 639 640 static struct spi_imx_devtype_data imx27_cspi_devtype_data = { 641 /* i.mx27 cspi shares the functions with i.mx21 one */ 642 .intctrl = mx21_intctrl, 643 .config = mx21_config, 644 .trigger = mx21_trigger, 645 .rx_available = mx21_rx_available, 646 .reset = mx21_reset, 647 .devtype = IMX27_CSPI, 648 }; 649 650 static struct spi_imx_devtype_data imx31_cspi_devtype_data = { 651 .intctrl = mx31_intctrl, 652 .config = mx31_config, 653 .trigger = mx31_trigger, 654 .rx_available = mx31_rx_available, 655 .reset = mx31_reset, 656 .devtype = IMX31_CSPI, 657 }; 658 659 static struct spi_imx_devtype_data imx35_cspi_devtype_data = { 660 /* i.mx35 and later cspi shares the functions with i.mx31 one */ 661 .intctrl = mx31_intctrl, 662 .config = mx31_config, 663 .trigger = mx31_trigger, 664 .rx_available = mx31_rx_available, 665 .reset = mx31_reset, 666 .devtype = IMX35_CSPI, 667 }; 668 669 static struct spi_imx_devtype_data imx51_ecspi_devtype_data = { 670 .intctrl = mx51_ecspi_intctrl, 671 .config = mx51_ecspi_config, 672 .trigger = mx51_ecspi_trigger, 673 .rx_available = mx51_ecspi_rx_available, 674 .reset = mx51_ecspi_reset, 675 .devtype = IMX51_ECSPI, 676 }; 677 678 static const struct platform_device_id spi_imx_devtype[] = { 679 { 680 .name = "imx1-cspi", 681 .driver_data = (kernel_ulong_t) &imx1_cspi_devtype_data, 682 }, { 683 .name = "imx21-cspi", 684 .driver_data = (kernel_ulong_t) &imx21_cspi_devtype_data, 685 }, { 686 .name = "imx27-cspi", 687 .driver_data = (kernel_ulong_t) &imx27_cspi_devtype_data, 688 }, { 689 .name = "imx31-cspi", 690 .driver_data = (kernel_ulong_t) &imx31_cspi_devtype_data, 691 }, { 692 .name = "imx35-cspi", 693 .driver_data = (kernel_ulong_t) &imx35_cspi_devtype_data, 694 }, { 695 .name = "imx51-ecspi", 696 .driver_data = (kernel_ulong_t) &imx51_ecspi_devtype_data, 697 }, { 698 /* sentinel */ 699 } 700 }; 701 702 static const struct of_device_id spi_imx_dt_ids[] = { 703 { .compatible = "fsl,imx1-cspi", .data = &imx1_cspi_devtype_data, }, 704 { .compatible = "fsl,imx21-cspi", .data = &imx21_cspi_devtype_data, }, 705 { .compatible = "fsl,imx27-cspi", .data = &imx27_cspi_devtype_data, }, 706 { .compatible = "fsl,imx31-cspi", .data = &imx31_cspi_devtype_data, }, 707 { .compatible = "fsl,imx35-cspi", .data = &imx35_cspi_devtype_data, }, 708 { .compatible = "fsl,imx51-ecspi", .data = &imx51_ecspi_devtype_data, }, 709 { /* sentinel */ } 710 }; 711 MODULE_DEVICE_TABLE(of, spi_imx_dt_ids); 712 713 static void spi_imx_chipselect(struct spi_device *spi, int is_active) 714 { 715 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); 716 int gpio = spi_imx->chipselect[spi->chip_select]; 717 int active = is_active != BITBANG_CS_INACTIVE; 718 int dev_is_lowactive = !(spi->mode & SPI_CS_HIGH); 719 720 if (!gpio_is_valid(gpio)) 721 return; 722 723 gpio_set_value(gpio, dev_is_lowactive ^ active); 724 } 725 726 static void spi_imx_push(struct spi_imx_data *spi_imx) 727 { 728 while (spi_imx->txfifo < spi_imx_get_fifosize(spi_imx)) { 729 if (!spi_imx->count) 730 break; 731 spi_imx->tx(spi_imx); 732 spi_imx->txfifo++; 733 } 734 735 spi_imx->devtype_data->trigger(spi_imx); 736 } 737 738 static irqreturn_t spi_imx_isr(int irq, void *dev_id) 739 { 740 struct spi_imx_data *spi_imx = dev_id; 741 742 while (spi_imx->devtype_data->rx_available(spi_imx)) { 743 spi_imx->rx(spi_imx); 744 spi_imx->txfifo--; 745 } 746 747 if (spi_imx->count) { 748 spi_imx_push(spi_imx); 749 return IRQ_HANDLED; 750 } 751 752 if (spi_imx->txfifo) { 753 /* No data left to push, but still waiting for rx data, 754 * enable receive data available interrupt. 755 */ 756 spi_imx->devtype_data->intctrl( 757 spi_imx, MXC_INT_RR); 758 return IRQ_HANDLED; 759 } 760 761 spi_imx->devtype_data->intctrl(spi_imx, 0); 762 complete(&spi_imx->xfer_done); 763 764 return IRQ_HANDLED; 765 } 766 767 static int spi_imx_setupxfer(struct spi_device *spi, 768 struct spi_transfer *t) 769 { 770 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); 771 struct spi_imx_config config; 772 773 config.bpw = t ? t->bits_per_word : spi->bits_per_word; 774 config.speed_hz = t ? t->speed_hz : spi->max_speed_hz; 775 config.mode = spi->mode; 776 config.cs = spi->chip_select; 777 778 if (!config.speed_hz) 779 config.speed_hz = spi->max_speed_hz; 780 if (!config.bpw) 781 config.bpw = spi->bits_per_word; 782 783 /* Initialize the functions for transfer */ 784 if (config.bpw <= 8) { 785 spi_imx->rx = spi_imx_buf_rx_u8; 786 spi_imx->tx = spi_imx_buf_tx_u8; 787 } else if (config.bpw <= 16) { 788 spi_imx->rx = spi_imx_buf_rx_u16; 789 spi_imx->tx = spi_imx_buf_tx_u16; 790 } else { 791 spi_imx->rx = spi_imx_buf_rx_u32; 792 spi_imx->tx = spi_imx_buf_tx_u32; 793 } 794 795 spi_imx->devtype_data->config(spi_imx, &config); 796 797 return 0; 798 } 799 800 static void spi_imx_sdma_exit(struct spi_imx_data *spi_imx) 801 { 802 struct spi_master *master = spi_imx->bitbang.master; 803 804 if (master->dma_rx) { 805 dma_release_channel(master->dma_rx); 806 master->dma_rx = NULL; 807 } 808 809 if (master->dma_tx) { 810 dma_release_channel(master->dma_tx); 811 master->dma_tx = NULL; 812 } 813 814 spi_imx->dma_is_inited = 0; 815 } 816 817 static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx, 818 struct spi_master *master, 819 const struct resource *res) 820 { 821 struct dma_slave_config slave_config = {}; 822 int ret; 823 824 /* use pio mode for i.mx6dl chip TKT238285 */ 825 if (of_machine_is_compatible("fsl,imx6dl")) 826 return 0; 827 828 /* Prepare for TX DMA: */ 829 master->dma_tx = dma_request_slave_channel(dev, "tx"); 830 if (!master->dma_tx) { 831 dev_err(dev, "cannot get the TX DMA channel!\n"); 832 ret = -EINVAL; 833 goto err; 834 } 835 836 slave_config.direction = DMA_MEM_TO_DEV; 837 slave_config.dst_addr = res->start + MXC_CSPITXDATA; 838 slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 839 slave_config.dst_maxburst = spi_imx_get_fifosize(spi_imx) / 2; 840 ret = dmaengine_slave_config(master->dma_tx, &slave_config); 841 if (ret) { 842 dev_err(dev, "error in TX dma configuration.\n"); 843 goto err; 844 } 845 846 /* Prepare for RX : */ 847 master->dma_rx = dma_request_slave_channel(dev, "rx"); 848 if (!master->dma_rx) { 849 dev_dbg(dev, "cannot get the DMA channel.\n"); 850 ret = -EINVAL; 851 goto err; 852 } 853 854 slave_config.direction = DMA_DEV_TO_MEM; 855 slave_config.src_addr = res->start + MXC_CSPIRXDATA; 856 slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 857 slave_config.src_maxburst = spi_imx_get_fifosize(spi_imx) / 2; 858 ret = dmaengine_slave_config(master->dma_rx, &slave_config); 859 if (ret) { 860 dev_err(dev, "error in RX dma configuration.\n"); 861 goto err; 862 } 863 864 init_completion(&spi_imx->dma_rx_completion); 865 init_completion(&spi_imx->dma_tx_completion); 866 master->can_dma = spi_imx_can_dma; 867 master->max_dma_len = MAX_SDMA_BD_BYTES; 868 spi_imx->bitbang.master->flags = SPI_MASTER_MUST_RX | 869 SPI_MASTER_MUST_TX; 870 spi_imx->tx_wml = spi_imx_get_fifosize(spi_imx) / 2; 871 spi_imx->rx_wml = spi_imx_get_fifosize(spi_imx) / 2; 872 spi_imx->dma_is_inited = 1; 873 874 return 0; 875 err: 876 spi_imx_sdma_exit(spi_imx); 877 return ret; 878 } 879 880 static void spi_imx_dma_rx_callback(void *cookie) 881 { 882 struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie; 883 884 complete(&spi_imx->dma_rx_completion); 885 } 886 887 static void spi_imx_dma_tx_callback(void *cookie) 888 { 889 struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie; 890 891 complete(&spi_imx->dma_tx_completion); 892 } 893 894 static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx, 895 struct spi_transfer *transfer) 896 { 897 struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL; 898 int ret; 899 unsigned long timeout; 900 u32 dma; 901 int left; 902 struct spi_master *master = spi_imx->bitbang.master; 903 struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg; 904 905 if (tx) { 906 desc_tx = dmaengine_prep_slave_sg(master->dma_tx, 907 tx->sgl, tx->nents, DMA_MEM_TO_DEV, 908 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 909 if (!desc_tx) 910 goto no_dma; 911 912 desc_tx->callback = spi_imx_dma_tx_callback; 913 desc_tx->callback_param = (void *)spi_imx; 914 dmaengine_submit(desc_tx); 915 } 916 917 if (rx) { 918 desc_rx = dmaengine_prep_slave_sg(master->dma_rx, 919 rx->sgl, rx->nents, DMA_DEV_TO_MEM, 920 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 921 if (!desc_rx) 922 goto no_dma; 923 924 desc_rx->callback = spi_imx_dma_rx_callback; 925 desc_rx->callback_param = (void *)spi_imx; 926 dmaengine_submit(desc_rx); 927 } 928 929 reinit_completion(&spi_imx->dma_rx_completion); 930 reinit_completion(&spi_imx->dma_tx_completion); 931 932 /* Trigger the cspi module. */ 933 spi_imx->dma_finished = 0; 934 935 dma = readl(spi_imx->base + MX51_ECSPI_DMA); 936 dma = dma & (~MX51_ECSPI_DMA_RXT_WML_MASK); 937 /* Change RX_DMA_LENGTH trigger dma fetch tail data */ 938 left = transfer->len % spi_imx->rxt_wml; 939 if (left) 940 writel(dma | (left << MX51_ECSPI_DMA_RXT_WML_OFFSET), 941 spi_imx->base + MX51_ECSPI_DMA); 942 spi_imx->devtype_data->trigger(spi_imx); 943 944 dma_async_issue_pending(master->dma_tx); 945 dma_async_issue_pending(master->dma_rx); 946 /* Wait SDMA to finish the data transfer.*/ 947 timeout = wait_for_completion_timeout(&spi_imx->dma_tx_completion, 948 IMX_DMA_TIMEOUT); 949 if (!timeout) { 950 pr_warn("%s %s: I/O Error in DMA TX\n", 951 dev_driver_string(&master->dev), 952 dev_name(&master->dev)); 953 dmaengine_terminate_all(master->dma_tx); 954 } else { 955 timeout = wait_for_completion_timeout( 956 &spi_imx->dma_rx_completion, IMX_DMA_TIMEOUT); 957 if (!timeout) { 958 pr_warn("%s %s: I/O Error in DMA RX\n", 959 dev_driver_string(&master->dev), 960 dev_name(&master->dev)); 961 spi_imx->devtype_data->reset(spi_imx); 962 dmaengine_terminate_all(master->dma_rx); 963 } 964 writel(dma | 965 spi_imx->rxt_wml << MX51_ECSPI_DMA_RXT_WML_OFFSET, 966 spi_imx->base + MX51_ECSPI_DMA); 967 } 968 969 spi_imx->dma_finished = 1; 970 spi_imx->devtype_data->trigger(spi_imx); 971 972 if (!timeout) 973 ret = -ETIMEDOUT; 974 else 975 ret = transfer->len; 976 977 return ret; 978 979 no_dma: 980 pr_warn_once("%s %s: DMA not available, falling back to PIO\n", 981 dev_driver_string(&master->dev), 982 dev_name(&master->dev)); 983 return -EAGAIN; 984 } 985 986 static int spi_imx_pio_transfer(struct spi_device *spi, 987 struct spi_transfer *transfer) 988 { 989 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); 990 991 spi_imx->tx_buf = transfer->tx_buf; 992 spi_imx->rx_buf = transfer->rx_buf; 993 spi_imx->count = transfer->len; 994 spi_imx->txfifo = 0; 995 996 reinit_completion(&spi_imx->xfer_done); 997 998 spi_imx_push(spi_imx); 999 1000 spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE); 1001 1002 wait_for_completion(&spi_imx->xfer_done); 1003 1004 return transfer->len; 1005 } 1006 1007 static int spi_imx_transfer(struct spi_device *spi, 1008 struct spi_transfer *transfer) 1009 { 1010 int ret; 1011 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); 1012 1013 if (spi_imx->bitbang.master->can_dma && 1014 spi_imx_can_dma(spi_imx->bitbang.master, spi, transfer)) { 1015 spi_imx->usedma = true; 1016 ret = spi_imx_dma_transfer(spi_imx, transfer); 1017 if (ret != -EAGAIN) 1018 return ret; 1019 } 1020 spi_imx->usedma = false; 1021 1022 return spi_imx_pio_transfer(spi, transfer); 1023 } 1024 1025 static int spi_imx_setup(struct spi_device *spi) 1026 { 1027 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); 1028 int gpio = spi_imx->chipselect[spi->chip_select]; 1029 1030 dev_dbg(&spi->dev, "%s: mode %d, %u bpw, %d hz\n", __func__, 1031 spi->mode, spi->bits_per_word, spi->max_speed_hz); 1032 1033 if (gpio_is_valid(gpio)) 1034 gpio_direction_output(gpio, spi->mode & SPI_CS_HIGH ? 0 : 1); 1035 1036 spi_imx_chipselect(spi, BITBANG_CS_INACTIVE); 1037 1038 return 0; 1039 } 1040 1041 static void spi_imx_cleanup(struct spi_device *spi) 1042 { 1043 } 1044 1045 static int 1046 spi_imx_prepare_message(struct spi_master *master, struct spi_message *msg) 1047 { 1048 struct spi_imx_data *spi_imx = spi_master_get_devdata(master); 1049 int ret; 1050 1051 ret = clk_enable(spi_imx->clk_per); 1052 if (ret) 1053 return ret; 1054 1055 ret = clk_enable(spi_imx->clk_ipg); 1056 if (ret) { 1057 clk_disable(spi_imx->clk_per); 1058 return ret; 1059 } 1060 1061 return 0; 1062 } 1063 1064 static int 1065 spi_imx_unprepare_message(struct spi_master *master, struct spi_message *msg) 1066 { 1067 struct spi_imx_data *spi_imx = spi_master_get_devdata(master); 1068 1069 clk_disable(spi_imx->clk_ipg); 1070 clk_disable(spi_imx->clk_per); 1071 return 0; 1072 } 1073 1074 static int spi_imx_probe(struct platform_device *pdev) 1075 { 1076 struct device_node *np = pdev->dev.of_node; 1077 const struct of_device_id *of_id = 1078 of_match_device(spi_imx_dt_ids, &pdev->dev); 1079 struct spi_imx_master *mxc_platform_info = 1080 dev_get_platdata(&pdev->dev); 1081 struct spi_master *master; 1082 struct spi_imx_data *spi_imx; 1083 struct resource *res; 1084 int i, ret, num_cs, irq; 1085 1086 if (!np && !mxc_platform_info) { 1087 dev_err(&pdev->dev, "can't get the platform data\n"); 1088 return -EINVAL; 1089 } 1090 1091 ret = of_property_read_u32(np, "fsl,spi-num-chipselects", &num_cs); 1092 if (ret < 0) { 1093 if (mxc_platform_info) 1094 num_cs = mxc_platform_info->num_chipselect; 1095 else 1096 return ret; 1097 } 1098 1099 master = spi_alloc_master(&pdev->dev, 1100 sizeof(struct spi_imx_data) + sizeof(int) * num_cs); 1101 if (!master) 1102 return -ENOMEM; 1103 1104 platform_set_drvdata(pdev, master); 1105 1106 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32); 1107 master->bus_num = pdev->id; 1108 master->num_chipselect = num_cs; 1109 1110 spi_imx = spi_master_get_devdata(master); 1111 spi_imx->bitbang.master = master; 1112 1113 for (i = 0; i < master->num_chipselect; i++) { 1114 int cs_gpio = of_get_named_gpio(np, "cs-gpios", i); 1115 if (!gpio_is_valid(cs_gpio) && mxc_platform_info) 1116 cs_gpio = mxc_platform_info->chipselect[i]; 1117 1118 spi_imx->chipselect[i] = cs_gpio; 1119 if (!gpio_is_valid(cs_gpio)) 1120 continue; 1121 1122 ret = devm_gpio_request(&pdev->dev, spi_imx->chipselect[i], 1123 DRIVER_NAME); 1124 if (ret) { 1125 dev_err(&pdev->dev, "can't get cs gpios\n"); 1126 goto out_master_put; 1127 } 1128 } 1129 1130 spi_imx->bitbang.chipselect = spi_imx_chipselect; 1131 spi_imx->bitbang.setup_transfer = spi_imx_setupxfer; 1132 spi_imx->bitbang.txrx_bufs = spi_imx_transfer; 1133 spi_imx->bitbang.master->setup = spi_imx_setup; 1134 spi_imx->bitbang.master->cleanup = spi_imx_cleanup; 1135 spi_imx->bitbang.master->prepare_message = spi_imx_prepare_message; 1136 spi_imx->bitbang.master->unprepare_message = spi_imx_unprepare_message; 1137 spi_imx->bitbang.master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; 1138 1139 init_completion(&spi_imx->xfer_done); 1140 1141 spi_imx->devtype_data = of_id ? of_id->data : 1142 (struct spi_imx_devtype_data *) pdev->id_entry->driver_data; 1143 1144 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1145 spi_imx->base = devm_ioremap_resource(&pdev->dev, res); 1146 if (IS_ERR(spi_imx->base)) { 1147 ret = PTR_ERR(spi_imx->base); 1148 goto out_master_put; 1149 } 1150 1151 irq = platform_get_irq(pdev, 0); 1152 if (irq < 0) { 1153 ret = irq; 1154 goto out_master_put; 1155 } 1156 1157 ret = devm_request_irq(&pdev->dev, irq, spi_imx_isr, 0, 1158 dev_name(&pdev->dev), spi_imx); 1159 if (ret) { 1160 dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret); 1161 goto out_master_put; 1162 } 1163 1164 spi_imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); 1165 if (IS_ERR(spi_imx->clk_ipg)) { 1166 ret = PTR_ERR(spi_imx->clk_ipg); 1167 goto out_master_put; 1168 } 1169 1170 spi_imx->clk_per = devm_clk_get(&pdev->dev, "per"); 1171 if (IS_ERR(spi_imx->clk_per)) { 1172 ret = PTR_ERR(spi_imx->clk_per); 1173 goto out_master_put; 1174 } 1175 1176 ret = clk_prepare_enable(spi_imx->clk_per); 1177 if (ret) 1178 goto out_master_put; 1179 1180 ret = clk_prepare_enable(spi_imx->clk_ipg); 1181 if (ret) 1182 goto out_put_per; 1183 1184 spi_imx->spi_clk = clk_get_rate(spi_imx->clk_per); 1185 /* 1186 * Only validated on i.mx6 now, can remove the constrain if validated on 1187 * other chips. 1188 */ 1189 if (spi_imx->devtype_data == &imx51_ecspi_devtype_data 1190 && spi_imx_sdma_init(&pdev->dev, spi_imx, master, res)) 1191 dev_err(&pdev->dev, "dma setup error,use pio instead\n"); 1192 1193 spi_imx->devtype_data->reset(spi_imx); 1194 1195 spi_imx->devtype_data->intctrl(spi_imx, 0); 1196 1197 master->dev.of_node = pdev->dev.of_node; 1198 ret = spi_bitbang_start(&spi_imx->bitbang); 1199 if (ret) { 1200 dev_err(&pdev->dev, "bitbang start failed with %d\n", ret); 1201 goto out_clk_put; 1202 } 1203 1204 dev_info(&pdev->dev, "probed\n"); 1205 1206 clk_disable(spi_imx->clk_ipg); 1207 clk_disable(spi_imx->clk_per); 1208 return ret; 1209 1210 out_clk_put: 1211 clk_disable_unprepare(spi_imx->clk_ipg); 1212 out_put_per: 1213 clk_disable_unprepare(spi_imx->clk_per); 1214 out_master_put: 1215 spi_master_put(master); 1216 1217 return ret; 1218 } 1219 1220 static int spi_imx_remove(struct platform_device *pdev) 1221 { 1222 struct spi_master *master = platform_get_drvdata(pdev); 1223 struct spi_imx_data *spi_imx = spi_master_get_devdata(master); 1224 1225 spi_bitbang_stop(&spi_imx->bitbang); 1226 1227 writel(0, spi_imx->base + MXC_CSPICTRL); 1228 clk_unprepare(spi_imx->clk_ipg); 1229 clk_unprepare(spi_imx->clk_per); 1230 spi_imx_sdma_exit(spi_imx); 1231 spi_master_put(master); 1232 1233 return 0; 1234 } 1235 1236 static struct platform_driver spi_imx_driver = { 1237 .driver = { 1238 .name = DRIVER_NAME, 1239 .of_match_table = spi_imx_dt_ids, 1240 }, 1241 .id_table = spi_imx_devtype, 1242 .probe = spi_imx_probe, 1243 .remove = spi_imx_remove, 1244 }; 1245 module_platform_driver(spi_imx_driver); 1246 1247 MODULE_DESCRIPTION("SPI Master Controller driver"); 1248 MODULE_AUTHOR("Sascha Hauer, Pengutronix"); 1249 MODULE_LICENSE("GPL"); 1250 MODULE_ALIAS("platform:" DRIVER_NAME); 1251