1 /* 2 * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved. 3 * Copyright (C) 2008 Juergen Beisert 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License 7 * as published by the Free Software Foundation; either version 2 8 * of the License, or (at your option) any later version. 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the 16 * Free Software Foundation 17 * 51 Franklin Street, Fifth Floor 18 * Boston, MA 02110-1301, USA. 19 */ 20 21 #include <linux/clk.h> 22 #include <linux/completion.h> 23 #include <linux/delay.h> 24 #include <linux/dmaengine.h> 25 #include <linux/dma-mapping.h> 26 #include <linux/err.h> 27 #include <linux/gpio.h> 28 #include <linux/interrupt.h> 29 #include <linux/io.h> 30 #include <linux/irq.h> 31 #include <linux/kernel.h> 32 #include <linux/module.h> 33 #include <linux/platform_device.h> 34 #include <linux/slab.h> 35 #include <linux/spi/spi.h> 36 #include <linux/spi/spi_bitbang.h> 37 #include <linux/types.h> 38 #include <linux/of.h> 39 #include <linux/of_device.h> 40 #include <linux/of_gpio.h> 41 42 #include <linux/platform_data/dma-imx.h> 43 #include <linux/platform_data/spi-imx.h> 44 45 #define DRIVER_NAME "spi_imx" 46 47 #define MXC_CSPIRXDATA 0x00 48 #define MXC_CSPITXDATA 0x04 49 #define MXC_CSPICTRL 0x08 50 #define MXC_CSPIINT 0x0c 51 #define MXC_RESET 0x1c 52 53 /* generic defines to abstract from the different register layouts */ 54 #define MXC_INT_RR (1 << 0) /* Receive data ready interrupt */ 55 #define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */ 56 57 /* The maximum bytes that a sdma BD can transfer.*/ 58 #define MAX_SDMA_BD_BYTES (1 << 15) 59 #define IMX_DMA_TIMEOUT (msecs_to_jiffies(3000)) 60 struct spi_imx_config { 61 unsigned int speed_hz; 62 unsigned int bpw; 63 unsigned int mode; 64 u8 cs; 65 }; 66 67 enum spi_imx_devtype { 68 IMX1_CSPI, 69 IMX21_CSPI, 70 IMX27_CSPI, 71 IMX31_CSPI, 72 IMX35_CSPI, /* CSPI on all i.mx except above */ 73 IMX51_ECSPI, /* ECSPI on i.mx51 and later */ 74 }; 75 76 struct spi_imx_data; 77 78 struct spi_imx_devtype_data { 79 void (*intctrl)(struct spi_imx_data *, int); 80 int (*config)(struct spi_imx_data *, struct spi_imx_config *); 81 void (*trigger)(struct spi_imx_data *); 82 int (*rx_available)(struct spi_imx_data *); 83 void (*reset)(struct spi_imx_data *); 84 enum spi_imx_devtype devtype; 85 }; 86 87 struct spi_imx_data { 88 struct spi_bitbang bitbang; 89 90 struct completion xfer_done; 91 void __iomem *base; 92 struct clk *clk_per; 93 struct clk *clk_ipg; 94 unsigned long spi_clk; 95 96 unsigned int count; 97 void (*tx)(struct spi_imx_data *); 98 void (*rx)(struct spi_imx_data *); 99 void *rx_buf; 100 const void *tx_buf; 101 unsigned int txfifo; /* number of words pushed in tx FIFO */ 102 103 /* DMA */ 104 unsigned int dma_is_inited; 105 unsigned int dma_finished; 106 bool usedma; 107 u32 rx_wml; 108 u32 tx_wml; 109 u32 rxt_wml; 110 struct completion dma_rx_completion; 111 struct completion dma_tx_completion; 112 113 const struct spi_imx_devtype_data *devtype_data; 114 int chipselect[0]; 115 }; 116 117 static inline int is_imx27_cspi(struct spi_imx_data *d) 118 { 119 return d->devtype_data->devtype == IMX27_CSPI; 120 } 121 122 static inline int is_imx35_cspi(struct spi_imx_data *d) 123 { 124 return d->devtype_data->devtype == IMX35_CSPI; 125 } 126 127 static inline unsigned spi_imx_get_fifosize(struct spi_imx_data *d) 128 { 129 return (d->devtype_data->devtype == IMX51_ECSPI) ? 64 : 8; 130 } 131 132 #define MXC_SPI_BUF_RX(type) \ 133 static void spi_imx_buf_rx_##type(struct spi_imx_data *spi_imx) \ 134 { \ 135 unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA); \ 136 \ 137 if (spi_imx->rx_buf) { \ 138 *(type *)spi_imx->rx_buf = val; \ 139 spi_imx->rx_buf += sizeof(type); \ 140 } \ 141 } 142 143 #define MXC_SPI_BUF_TX(type) \ 144 static void spi_imx_buf_tx_##type(struct spi_imx_data *spi_imx) \ 145 { \ 146 type val = 0; \ 147 \ 148 if (spi_imx->tx_buf) { \ 149 val = *(type *)spi_imx->tx_buf; \ 150 spi_imx->tx_buf += sizeof(type); \ 151 } \ 152 \ 153 spi_imx->count -= sizeof(type); \ 154 \ 155 writel(val, spi_imx->base + MXC_CSPITXDATA); \ 156 } 157 158 MXC_SPI_BUF_RX(u8) 159 MXC_SPI_BUF_TX(u8) 160 MXC_SPI_BUF_RX(u16) 161 MXC_SPI_BUF_TX(u16) 162 MXC_SPI_BUF_RX(u32) 163 MXC_SPI_BUF_TX(u32) 164 165 /* First entry is reserved, second entry is valid only if SDHC_SPIEN is set 166 * (which is currently not the case in this driver) 167 */ 168 static int mxc_clkdivs[] = {0, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192, 169 256, 384, 512, 768, 1024}; 170 171 /* MX21, MX27 */ 172 static unsigned int spi_imx_clkdiv_1(unsigned int fin, 173 unsigned int fspi, unsigned int max) 174 { 175 int i; 176 177 for (i = 2; i < max; i++) 178 if (fspi * mxc_clkdivs[i] >= fin) 179 return i; 180 181 return max; 182 } 183 184 /* MX1, MX31, MX35, MX51 CSPI */ 185 static unsigned int spi_imx_clkdiv_2(unsigned int fin, 186 unsigned int fspi) 187 { 188 int i, div = 4; 189 190 for (i = 0; i < 7; i++) { 191 if (fspi * div >= fin) 192 return i; 193 div <<= 1; 194 } 195 196 return 7; 197 } 198 199 static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi, 200 struct spi_transfer *transfer) 201 { 202 struct spi_imx_data *spi_imx = spi_master_get_devdata(master); 203 204 if (spi_imx->dma_is_inited && (transfer->len > spi_imx->rx_wml) 205 && (transfer->len > spi_imx->tx_wml)) 206 return true; 207 return false; 208 } 209 210 #define MX51_ECSPI_CTRL 0x08 211 #define MX51_ECSPI_CTRL_ENABLE (1 << 0) 212 #define MX51_ECSPI_CTRL_XCH (1 << 2) 213 #define MX51_ECSPI_CTRL_SMC (1 << 3) 214 #define MX51_ECSPI_CTRL_MODE_MASK (0xf << 4) 215 #define MX51_ECSPI_CTRL_POSTDIV_OFFSET 8 216 #define MX51_ECSPI_CTRL_PREDIV_OFFSET 12 217 #define MX51_ECSPI_CTRL_CS(cs) ((cs) << 18) 218 #define MX51_ECSPI_CTRL_BL_OFFSET 20 219 220 #define MX51_ECSPI_CONFIG 0x0c 221 #define MX51_ECSPI_CONFIG_SCLKPHA(cs) (1 << ((cs) + 0)) 222 #define MX51_ECSPI_CONFIG_SCLKPOL(cs) (1 << ((cs) + 4)) 223 #define MX51_ECSPI_CONFIG_SBBCTRL(cs) (1 << ((cs) + 8)) 224 #define MX51_ECSPI_CONFIG_SSBPOL(cs) (1 << ((cs) + 12)) 225 #define MX51_ECSPI_CONFIG_SCLKCTL(cs) (1 << ((cs) + 20)) 226 227 #define MX51_ECSPI_INT 0x10 228 #define MX51_ECSPI_INT_TEEN (1 << 0) 229 #define MX51_ECSPI_INT_RREN (1 << 3) 230 231 #define MX51_ECSPI_DMA 0x14 232 #define MX51_ECSPI_DMA_TX_WML_OFFSET 0 233 #define MX51_ECSPI_DMA_TX_WML_MASK 0x3F 234 #define MX51_ECSPI_DMA_RX_WML_OFFSET 16 235 #define MX51_ECSPI_DMA_RX_WML_MASK (0x3F << 16) 236 #define MX51_ECSPI_DMA_RXT_WML_OFFSET 24 237 #define MX51_ECSPI_DMA_RXT_WML_MASK (0x3F << 24) 238 239 #define MX51_ECSPI_DMA_TEDEN_OFFSET 7 240 #define MX51_ECSPI_DMA_RXDEN_OFFSET 23 241 #define MX51_ECSPI_DMA_RXTDEN_OFFSET 31 242 243 #define MX51_ECSPI_STAT 0x18 244 #define MX51_ECSPI_STAT_RR (1 << 3) 245 246 /* MX51 eCSPI */ 247 static unsigned int mx51_ecspi_clkdiv(unsigned int fin, unsigned int fspi, 248 unsigned int *fres) 249 { 250 /* 251 * there are two 4-bit dividers, the pre-divider divides by 252 * $pre, the post-divider by 2^$post 253 */ 254 unsigned int pre, post; 255 256 if (unlikely(fspi > fin)) 257 return 0; 258 259 post = fls(fin) - fls(fspi); 260 if (fin > fspi << post) 261 post++; 262 263 /* now we have: (fin <= fspi << post) with post being minimal */ 264 265 post = max(4U, post) - 4; 266 if (unlikely(post > 0xf)) { 267 pr_err("%s: cannot set clock freq: %u (base freq: %u)\n", 268 __func__, fspi, fin); 269 return 0xff; 270 } 271 272 pre = DIV_ROUND_UP(fin, fspi << post) - 1; 273 274 pr_debug("%s: fin: %u, fspi: %u, post: %u, pre: %u\n", 275 __func__, fin, fspi, post, pre); 276 277 /* Resulting frequency for the SCLK line. */ 278 *fres = (fin / (pre + 1)) >> post; 279 280 return (pre << MX51_ECSPI_CTRL_PREDIV_OFFSET) | 281 (post << MX51_ECSPI_CTRL_POSTDIV_OFFSET); 282 } 283 284 static void __maybe_unused mx51_ecspi_intctrl(struct spi_imx_data *spi_imx, int enable) 285 { 286 unsigned val = 0; 287 288 if (enable & MXC_INT_TE) 289 val |= MX51_ECSPI_INT_TEEN; 290 291 if (enable & MXC_INT_RR) 292 val |= MX51_ECSPI_INT_RREN; 293 294 writel(val, spi_imx->base + MX51_ECSPI_INT); 295 } 296 297 static void __maybe_unused mx51_ecspi_trigger(struct spi_imx_data *spi_imx) 298 { 299 u32 reg = readl(spi_imx->base + MX51_ECSPI_CTRL); 300 301 if (!spi_imx->usedma) 302 reg |= MX51_ECSPI_CTRL_XCH; 303 else if (!spi_imx->dma_finished) 304 reg |= MX51_ECSPI_CTRL_SMC; 305 else 306 reg &= ~MX51_ECSPI_CTRL_SMC; 307 writel(reg, spi_imx->base + MX51_ECSPI_CTRL); 308 } 309 310 static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx, 311 struct spi_imx_config *config) 312 { 313 u32 ctrl = MX51_ECSPI_CTRL_ENABLE, cfg = 0, dma = 0; 314 u32 tx_wml_cfg, rx_wml_cfg, rxt_wml_cfg; 315 u32 clk = config->speed_hz, delay; 316 317 /* 318 * The hardware seems to have a race condition when changing modes. The 319 * current assumption is that the selection of the channel arrives 320 * earlier in the hardware than the mode bits when they are written at 321 * the same time. 322 * So set master mode for all channels as we do not support slave mode. 323 */ 324 ctrl |= MX51_ECSPI_CTRL_MODE_MASK; 325 326 /* set clock speed */ 327 ctrl |= mx51_ecspi_clkdiv(spi_imx->spi_clk, config->speed_hz, &clk); 328 329 /* set chip select to use */ 330 ctrl |= MX51_ECSPI_CTRL_CS(config->cs); 331 332 ctrl |= (config->bpw - 1) << MX51_ECSPI_CTRL_BL_OFFSET; 333 334 cfg |= MX51_ECSPI_CONFIG_SBBCTRL(config->cs); 335 336 if (config->mode & SPI_CPHA) 337 cfg |= MX51_ECSPI_CONFIG_SCLKPHA(config->cs); 338 339 if (config->mode & SPI_CPOL) { 340 cfg |= MX51_ECSPI_CONFIG_SCLKPOL(config->cs); 341 cfg |= MX51_ECSPI_CONFIG_SCLKCTL(config->cs); 342 } 343 if (config->mode & SPI_CS_HIGH) 344 cfg |= MX51_ECSPI_CONFIG_SSBPOL(config->cs); 345 346 writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL); 347 writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG); 348 349 /* 350 * Wait until the changes in the configuration register CONFIGREG 351 * propagate into the hardware. It takes exactly one tick of the 352 * SCLK clock, but we will wait two SCLK clock just to be sure. The 353 * effect of the delay it takes for the hardware to apply changes 354 * is noticable if the SCLK clock run very slow. In such a case, if 355 * the polarity of SCLK should be inverted, the GPIO ChipSelect might 356 * be asserted before the SCLK polarity changes, which would disrupt 357 * the SPI communication as the device on the other end would consider 358 * the change of SCLK polarity as a clock tick already. 359 */ 360 delay = (2 * 1000000) / clk; 361 if (likely(delay < 10)) /* SCLK is faster than 100 kHz */ 362 udelay(delay); 363 else /* SCLK is _very_ slow */ 364 usleep_range(delay, delay + 10); 365 366 /* 367 * Configure the DMA register: setup the watermark 368 * and enable DMA request. 369 */ 370 if (spi_imx->dma_is_inited) { 371 dma = readl(spi_imx->base + MX51_ECSPI_DMA); 372 373 spi_imx->rxt_wml = spi_imx_get_fifosize(spi_imx) / 2; 374 rx_wml_cfg = spi_imx->rx_wml << MX51_ECSPI_DMA_RX_WML_OFFSET; 375 tx_wml_cfg = spi_imx->tx_wml << MX51_ECSPI_DMA_TX_WML_OFFSET; 376 rxt_wml_cfg = spi_imx->rxt_wml << MX51_ECSPI_DMA_RXT_WML_OFFSET; 377 dma = (dma & ~MX51_ECSPI_DMA_TX_WML_MASK 378 & ~MX51_ECSPI_DMA_RX_WML_MASK 379 & ~MX51_ECSPI_DMA_RXT_WML_MASK) 380 | rx_wml_cfg | tx_wml_cfg | rxt_wml_cfg 381 |(1 << MX51_ECSPI_DMA_TEDEN_OFFSET) 382 |(1 << MX51_ECSPI_DMA_RXDEN_OFFSET) 383 |(1 << MX51_ECSPI_DMA_RXTDEN_OFFSET); 384 385 writel(dma, spi_imx->base + MX51_ECSPI_DMA); 386 } 387 388 return 0; 389 } 390 391 static int __maybe_unused mx51_ecspi_rx_available(struct spi_imx_data *spi_imx) 392 { 393 return readl(spi_imx->base + MX51_ECSPI_STAT) & MX51_ECSPI_STAT_RR; 394 } 395 396 static void __maybe_unused mx51_ecspi_reset(struct spi_imx_data *spi_imx) 397 { 398 /* drain receive buffer */ 399 while (mx51_ecspi_rx_available(spi_imx)) 400 readl(spi_imx->base + MXC_CSPIRXDATA); 401 } 402 403 #define MX31_INTREG_TEEN (1 << 0) 404 #define MX31_INTREG_RREN (1 << 3) 405 406 #define MX31_CSPICTRL_ENABLE (1 << 0) 407 #define MX31_CSPICTRL_MASTER (1 << 1) 408 #define MX31_CSPICTRL_XCH (1 << 2) 409 #define MX31_CSPICTRL_POL (1 << 4) 410 #define MX31_CSPICTRL_PHA (1 << 5) 411 #define MX31_CSPICTRL_SSCTL (1 << 6) 412 #define MX31_CSPICTRL_SSPOL (1 << 7) 413 #define MX31_CSPICTRL_BC_SHIFT 8 414 #define MX35_CSPICTRL_BL_SHIFT 20 415 #define MX31_CSPICTRL_CS_SHIFT 24 416 #define MX35_CSPICTRL_CS_SHIFT 12 417 #define MX31_CSPICTRL_DR_SHIFT 16 418 419 #define MX31_CSPISTATUS 0x14 420 #define MX31_STATUS_RR (1 << 3) 421 422 /* These functions also work for the i.MX35, but be aware that 423 * the i.MX35 has a slightly different register layout for bits 424 * we do not use here. 425 */ 426 static void __maybe_unused mx31_intctrl(struct spi_imx_data *spi_imx, int enable) 427 { 428 unsigned int val = 0; 429 430 if (enable & MXC_INT_TE) 431 val |= MX31_INTREG_TEEN; 432 if (enable & MXC_INT_RR) 433 val |= MX31_INTREG_RREN; 434 435 writel(val, spi_imx->base + MXC_CSPIINT); 436 } 437 438 static void __maybe_unused mx31_trigger(struct spi_imx_data *spi_imx) 439 { 440 unsigned int reg; 441 442 reg = readl(spi_imx->base + MXC_CSPICTRL); 443 reg |= MX31_CSPICTRL_XCH; 444 writel(reg, spi_imx->base + MXC_CSPICTRL); 445 } 446 447 static int __maybe_unused mx31_config(struct spi_imx_data *spi_imx, 448 struct spi_imx_config *config) 449 { 450 unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER; 451 int cs = spi_imx->chipselect[config->cs]; 452 453 reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) << 454 MX31_CSPICTRL_DR_SHIFT; 455 456 if (is_imx35_cspi(spi_imx)) { 457 reg |= (config->bpw - 1) << MX35_CSPICTRL_BL_SHIFT; 458 reg |= MX31_CSPICTRL_SSCTL; 459 } else { 460 reg |= (config->bpw - 1) << MX31_CSPICTRL_BC_SHIFT; 461 } 462 463 if (config->mode & SPI_CPHA) 464 reg |= MX31_CSPICTRL_PHA; 465 if (config->mode & SPI_CPOL) 466 reg |= MX31_CSPICTRL_POL; 467 if (config->mode & SPI_CS_HIGH) 468 reg |= MX31_CSPICTRL_SSPOL; 469 if (cs < 0) 470 reg |= (cs + 32) << 471 (is_imx35_cspi(spi_imx) ? MX35_CSPICTRL_CS_SHIFT : 472 MX31_CSPICTRL_CS_SHIFT); 473 474 writel(reg, spi_imx->base + MXC_CSPICTRL); 475 476 return 0; 477 } 478 479 static int __maybe_unused mx31_rx_available(struct spi_imx_data *spi_imx) 480 { 481 return readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR; 482 } 483 484 static void __maybe_unused mx31_reset(struct spi_imx_data *spi_imx) 485 { 486 /* drain receive buffer */ 487 while (readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR) 488 readl(spi_imx->base + MXC_CSPIRXDATA); 489 } 490 491 #define MX21_INTREG_RR (1 << 4) 492 #define MX21_INTREG_TEEN (1 << 9) 493 #define MX21_INTREG_RREN (1 << 13) 494 495 #define MX21_CSPICTRL_POL (1 << 5) 496 #define MX21_CSPICTRL_PHA (1 << 6) 497 #define MX21_CSPICTRL_SSPOL (1 << 8) 498 #define MX21_CSPICTRL_XCH (1 << 9) 499 #define MX21_CSPICTRL_ENABLE (1 << 10) 500 #define MX21_CSPICTRL_MASTER (1 << 11) 501 #define MX21_CSPICTRL_DR_SHIFT 14 502 #define MX21_CSPICTRL_CS_SHIFT 19 503 504 static void __maybe_unused mx21_intctrl(struct spi_imx_data *spi_imx, int enable) 505 { 506 unsigned int val = 0; 507 508 if (enable & MXC_INT_TE) 509 val |= MX21_INTREG_TEEN; 510 if (enable & MXC_INT_RR) 511 val |= MX21_INTREG_RREN; 512 513 writel(val, spi_imx->base + MXC_CSPIINT); 514 } 515 516 static void __maybe_unused mx21_trigger(struct spi_imx_data *spi_imx) 517 { 518 unsigned int reg; 519 520 reg = readl(spi_imx->base + MXC_CSPICTRL); 521 reg |= MX21_CSPICTRL_XCH; 522 writel(reg, spi_imx->base + MXC_CSPICTRL); 523 } 524 525 static int __maybe_unused mx21_config(struct spi_imx_data *spi_imx, 526 struct spi_imx_config *config) 527 { 528 unsigned int reg = MX21_CSPICTRL_ENABLE | MX21_CSPICTRL_MASTER; 529 int cs = spi_imx->chipselect[config->cs]; 530 unsigned int max = is_imx27_cspi(spi_imx) ? 16 : 18; 531 532 reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, config->speed_hz, max) << 533 MX21_CSPICTRL_DR_SHIFT; 534 reg |= config->bpw - 1; 535 536 if (config->mode & SPI_CPHA) 537 reg |= MX21_CSPICTRL_PHA; 538 if (config->mode & SPI_CPOL) 539 reg |= MX21_CSPICTRL_POL; 540 if (config->mode & SPI_CS_HIGH) 541 reg |= MX21_CSPICTRL_SSPOL; 542 if (cs < 0) 543 reg |= (cs + 32) << MX21_CSPICTRL_CS_SHIFT; 544 545 writel(reg, spi_imx->base + MXC_CSPICTRL); 546 547 return 0; 548 } 549 550 static int __maybe_unused mx21_rx_available(struct spi_imx_data *spi_imx) 551 { 552 return readl(spi_imx->base + MXC_CSPIINT) & MX21_INTREG_RR; 553 } 554 555 static void __maybe_unused mx21_reset(struct spi_imx_data *spi_imx) 556 { 557 writel(1, spi_imx->base + MXC_RESET); 558 } 559 560 #define MX1_INTREG_RR (1 << 3) 561 #define MX1_INTREG_TEEN (1 << 8) 562 #define MX1_INTREG_RREN (1 << 11) 563 564 #define MX1_CSPICTRL_POL (1 << 4) 565 #define MX1_CSPICTRL_PHA (1 << 5) 566 #define MX1_CSPICTRL_XCH (1 << 8) 567 #define MX1_CSPICTRL_ENABLE (1 << 9) 568 #define MX1_CSPICTRL_MASTER (1 << 10) 569 #define MX1_CSPICTRL_DR_SHIFT 13 570 571 static void __maybe_unused mx1_intctrl(struct spi_imx_data *spi_imx, int enable) 572 { 573 unsigned int val = 0; 574 575 if (enable & MXC_INT_TE) 576 val |= MX1_INTREG_TEEN; 577 if (enable & MXC_INT_RR) 578 val |= MX1_INTREG_RREN; 579 580 writel(val, spi_imx->base + MXC_CSPIINT); 581 } 582 583 static void __maybe_unused mx1_trigger(struct spi_imx_data *spi_imx) 584 { 585 unsigned int reg; 586 587 reg = readl(spi_imx->base + MXC_CSPICTRL); 588 reg |= MX1_CSPICTRL_XCH; 589 writel(reg, spi_imx->base + MXC_CSPICTRL); 590 } 591 592 static int __maybe_unused mx1_config(struct spi_imx_data *spi_imx, 593 struct spi_imx_config *config) 594 { 595 unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_MASTER; 596 597 reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) << 598 MX1_CSPICTRL_DR_SHIFT; 599 reg |= config->bpw - 1; 600 601 if (config->mode & SPI_CPHA) 602 reg |= MX1_CSPICTRL_PHA; 603 if (config->mode & SPI_CPOL) 604 reg |= MX1_CSPICTRL_POL; 605 606 writel(reg, spi_imx->base + MXC_CSPICTRL); 607 608 return 0; 609 } 610 611 static int __maybe_unused mx1_rx_available(struct spi_imx_data *spi_imx) 612 { 613 return readl(spi_imx->base + MXC_CSPIINT) & MX1_INTREG_RR; 614 } 615 616 static void __maybe_unused mx1_reset(struct spi_imx_data *spi_imx) 617 { 618 writel(1, spi_imx->base + MXC_RESET); 619 } 620 621 static struct spi_imx_devtype_data imx1_cspi_devtype_data = { 622 .intctrl = mx1_intctrl, 623 .config = mx1_config, 624 .trigger = mx1_trigger, 625 .rx_available = mx1_rx_available, 626 .reset = mx1_reset, 627 .devtype = IMX1_CSPI, 628 }; 629 630 static struct spi_imx_devtype_data imx21_cspi_devtype_data = { 631 .intctrl = mx21_intctrl, 632 .config = mx21_config, 633 .trigger = mx21_trigger, 634 .rx_available = mx21_rx_available, 635 .reset = mx21_reset, 636 .devtype = IMX21_CSPI, 637 }; 638 639 static struct spi_imx_devtype_data imx27_cspi_devtype_data = { 640 /* i.mx27 cspi shares the functions with i.mx21 one */ 641 .intctrl = mx21_intctrl, 642 .config = mx21_config, 643 .trigger = mx21_trigger, 644 .rx_available = mx21_rx_available, 645 .reset = mx21_reset, 646 .devtype = IMX27_CSPI, 647 }; 648 649 static struct spi_imx_devtype_data imx31_cspi_devtype_data = { 650 .intctrl = mx31_intctrl, 651 .config = mx31_config, 652 .trigger = mx31_trigger, 653 .rx_available = mx31_rx_available, 654 .reset = mx31_reset, 655 .devtype = IMX31_CSPI, 656 }; 657 658 static struct spi_imx_devtype_data imx35_cspi_devtype_data = { 659 /* i.mx35 and later cspi shares the functions with i.mx31 one */ 660 .intctrl = mx31_intctrl, 661 .config = mx31_config, 662 .trigger = mx31_trigger, 663 .rx_available = mx31_rx_available, 664 .reset = mx31_reset, 665 .devtype = IMX35_CSPI, 666 }; 667 668 static struct spi_imx_devtype_data imx51_ecspi_devtype_data = { 669 .intctrl = mx51_ecspi_intctrl, 670 .config = mx51_ecspi_config, 671 .trigger = mx51_ecspi_trigger, 672 .rx_available = mx51_ecspi_rx_available, 673 .reset = mx51_ecspi_reset, 674 .devtype = IMX51_ECSPI, 675 }; 676 677 static struct platform_device_id spi_imx_devtype[] = { 678 { 679 .name = "imx1-cspi", 680 .driver_data = (kernel_ulong_t) &imx1_cspi_devtype_data, 681 }, { 682 .name = "imx21-cspi", 683 .driver_data = (kernel_ulong_t) &imx21_cspi_devtype_data, 684 }, { 685 .name = "imx27-cspi", 686 .driver_data = (kernel_ulong_t) &imx27_cspi_devtype_data, 687 }, { 688 .name = "imx31-cspi", 689 .driver_data = (kernel_ulong_t) &imx31_cspi_devtype_data, 690 }, { 691 .name = "imx35-cspi", 692 .driver_data = (kernel_ulong_t) &imx35_cspi_devtype_data, 693 }, { 694 .name = "imx51-ecspi", 695 .driver_data = (kernel_ulong_t) &imx51_ecspi_devtype_data, 696 }, { 697 /* sentinel */ 698 } 699 }; 700 701 static const struct of_device_id spi_imx_dt_ids[] = { 702 { .compatible = "fsl,imx1-cspi", .data = &imx1_cspi_devtype_data, }, 703 { .compatible = "fsl,imx21-cspi", .data = &imx21_cspi_devtype_data, }, 704 { .compatible = "fsl,imx27-cspi", .data = &imx27_cspi_devtype_data, }, 705 { .compatible = "fsl,imx31-cspi", .data = &imx31_cspi_devtype_data, }, 706 { .compatible = "fsl,imx35-cspi", .data = &imx35_cspi_devtype_data, }, 707 { .compatible = "fsl,imx51-ecspi", .data = &imx51_ecspi_devtype_data, }, 708 { /* sentinel */ } 709 }; 710 MODULE_DEVICE_TABLE(of, spi_imx_dt_ids); 711 712 static void spi_imx_chipselect(struct spi_device *spi, int is_active) 713 { 714 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); 715 int gpio = spi_imx->chipselect[spi->chip_select]; 716 int active = is_active != BITBANG_CS_INACTIVE; 717 int dev_is_lowactive = !(spi->mode & SPI_CS_HIGH); 718 719 if (!gpio_is_valid(gpio)) 720 return; 721 722 gpio_set_value(gpio, dev_is_lowactive ^ active); 723 } 724 725 static void spi_imx_push(struct spi_imx_data *spi_imx) 726 { 727 while (spi_imx->txfifo < spi_imx_get_fifosize(spi_imx)) { 728 if (!spi_imx->count) 729 break; 730 spi_imx->tx(spi_imx); 731 spi_imx->txfifo++; 732 } 733 734 spi_imx->devtype_data->trigger(spi_imx); 735 } 736 737 static irqreturn_t spi_imx_isr(int irq, void *dev_id) 738 { 739 struct spi_imx_data *spi_imx = dev_id; 740 741 while (spi_imx->devtype_data->rx_available(spi_imx)) { 742 spi_imx->rx(spi_imx); 743 spi_imx->txfifo--; 744 } 745 746 if (spi_imx->count) { 747 spi_imx_push(spi_imx); 748 return IRQ_HANDLED; 749 } 750 751 if (spi_imx->txfifo) { 752 /* No data left to push, but still waiting for rx data, 753 * enable receive data available interrupt. 754 */ 755 spi_imx->devtype_data->intctrl( 756 spi_imx, MXC_INT_RR); 757 return IRQ_HANDLED; 758 } 759 760 spi_imx->devtype_data->intctrl(spi_imx, 0); 761 complete(&spi_imx->xfer_done); 762 763 return IRQ_HANDLED; 764 } 765 766 static int spi_imx_setupxfer(struct spi_device *spi, 767 struct spi_transfer *t) 768 { 769 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); 770 struct spi_imx_config config; 771 772 config.bpw = t ? t->bits_per_word : spi->bits_per_word; 773 config.speed_hz = t ? t->speed_hz : spi->max_speed_hz; 774 config.mode = spi->mode; 775 config.cs = spi->chip_select; 776 777 if (!config.speed_hz) 778 config.speed_hz = spi->max_speed_hz; 779 if (!config.bpw) 780 config.bpw = spi->bits_per_word; 781 782 /* Initialize the functions for transfer */ 783 if (config.bpw <= 8) { 784 spi_imx->rx = spi_imx_buf_rx_u8; 785 spi_imx->tx = spi_imx_buf_tx_u8; 786 } else if (config.bpw <= 16) { 787 spi_imx->rx = spi_imx_buf_rx_u16; 788 spi_imx->tx = spi_imx_buf_tx_u16; 789 } else { 790 spi_imx->rx = spi_imx_buf_rx_u32; 791 spi_imx->tx = spi_imx_buf_tx_u32; 792 } 793 794 spi_imx->devtype_data->config(spi_imx, &config); 795 796 return 0; 797 } 798 799 static void spi_imx_sdma_exit(struct spi_imx_data *spi_imx) 800 { 801 struct spi_master *master = spi_imx->bitbang.master; 802 803 if (master->dma_rx) { 804 dma_release_channel(master->dma_rx); 805 master->dma_rx = NULL; 806 } 807 808 if (master->dma_tx) { 809 dma_release_channel(master->dma_tx); 810 master->dma_tx = NULL; 811 } 812 813 spi_imx->dma_is_inited = 0; 814 } 815 816 static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx, 817 struct spi_master *master, 818 const struct resource *res) 819 { 820 struct dma_slave_config slave_config = {}; 821 int ret; 822 823 /* use pio mode for i.mx6dl chip TKT238285 */ 824 if (of_machine_is_compatible("fsl,imx6dl")) 825 return 0; 826 827 /* Prepare for TX DMA: */ 828 master->dma_tx = dma_request_slave_channel(dev, "tx"); 829 if (!master->dma_tx) { 830 dev_err(dev, "cannot get the TX DMA channel!\n"); 831 ret = -EINVAL; 832 goto err; 833 } 834 835 slave_config.direction = DMA_MEM_TO_DEV; 836 slave_config.dst_addr = res->start + MXC_CSPITXDATA; 837 slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 838 slave_config.dst_maxburst = spi_imx_get_fifosize(spi_imx) / 2; 839 ret = dmaengine_slave_config(master->dma_tx, &slave_config); 840 if (ret) { 841 dev_err(dev, "error in TX dma configuration.\n"); 842 goto err; 843 } 844 845 /* Prepare for RX : */ 846 master->dma_rx = dma_request_slave_channel(dev, "rx"); 847 if (!master->dma_rx) { 848 dev_dbg(dev, "cannot get the DMA channel.\n"); 849 ret = -EINVAL; 850 goto err; 851 } 852 853 slave_config.direction = DMA_DEV_TO_MEM; 854 slave_config.src_addr = res->start + MXC_CSPIRXDATA; 855 slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 856 slave_config.src_maxburst = spi_imx_get_fifosize(spi_imx) / 2; 857 ret = dmaengine_slave_config(master->dma_rx, &slave_config); 858 if (ret) { 859 dev_err(dev, "error in RX dma configuration.\n"); 860 goto err; 861 } 862 863 init_completion(&spi_imx->dma_rx_completion); 864 init_completion(&spi_imx->dma_tx_completion); 865 master->can_dma = spi_imx_can_dma; 866 master->max_dma_len = MAX_SDMA_BD_BYTES; 867 spi_imx->bitbang.master->flags = SPI_MASTER_MUST_RX | 868 SPI_MASTER_MUST_TX; 869 spi_imx->tx_wml = spi_imx_get_fifosize(spi_imx) / 2; 870 spi_imx->rx_wml = spi_imx_get_fifosize(spi_imx) / 2; 871 spi_imx->dma_is_inited = 1; 872 873 return 0; 874 err: 875 spi_imx_sdma_exit(spi_imx); 876 return ret; 877 } 878 879 static void spi_imx_dma_rx_callback(void *cookie) 880 { 881 struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie; 882 883 complete(&spi_imx->dma_rx_completion); 884 } 885 886 static void spi_imx_dma_tx_callback(void *cookie) 887 { 888 struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie; 889 890 complete(&spi_imx->dma_tx_completion); 891 } 892 893 static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx, 894 struct spi_transfer *transfer) 895 { 896 struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL; 897 int ret; 898 unsigned long timeout; 899 u32 dma; 900 int left; 901 struct spi_master *master = spi_imx->bitbang.master; 902 struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg; 903 904 if (tx) { 905 desc_tx = dmaengine_prep_slave_sg(master->dma_tx, 906 tx->sgl, tx->nents, DMA_MEM_TO_DEV, 907 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 908 if (!desc_tx) 909 goto no_dma; 910 911 desc_tx->callback = spi_imx_dma_tx_callback; 912 desc_tx->callback_param = (void *)spi_imx; 913 dmaengine_submit(desc_tx); 914 } 915 916 if (rx) { 917 desc_rx = dmaengine_prep_slave_sg(master->dma_rx, 918 rx->sgl, rx->nents, DMA_DEV_TO_MEM, 919 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 920 if (!desc_rx) 921 goto no_dma; 922 923 desc_rx->callback = spi_imx_dma_rx_callback; 924 desc_rx->callback_param = (void *)spi_imx; 925 dmaengine_submit(desc_rx); 926 } 927 928 reinit_completion(&spi_imx->dma_rx_completion); 929 reinit_completion(&spi_imx->dma_tx_completion); 930 931 /* Trigger the cspi module. */ 932 spi_imx->dma_finished = 0; 933 934 dma = readl(spi_imx->base + MX51_ECSPI_DMA); 935 dma = dma & (~MX51_ECSPI_DMA_RXT_WML_MASK); 936 /* Change RX_DMA_LENGTH trigger dma fetch tail data */ 937 left = transfer->len % spi_imx->rxt_wml; 938 if (left) 939 writel(dma | (left << MX51_ECSPI_DMA_RXT_WML_OFFSET), 940 spi_imx->base + MX51_ECSPI_DMA); 941 spi_imx->devtype_data->trigger(spi_imx); 942 943 dma_async_issue_pending(master->dma_tx); 944 dma_async_issue_pending(master->dma_rx); 945 /* Wait SDMA to finish the data transfer.*/ 946 timeout = wait_for_completion_timeout(&spi_imx->dma_tx_completion, 947 IMX_DMA_TIMEOUT); 948 if (!timeout) { 949 pr_warn("%s %s: I/O Error in DMA TX\n", 950 dev_driver_string(&master->dev), 951 dev_name(&master->dev)); 952 dmaengine_terminate_all(master->dma_tx); 953 } else { 954 timeout = wait_for_completion_timeout( 955 &spi_imx->dma_rx_completion, IMX_DMA_TIMEOUT); 956 if (!timeout) { 957 pr_warn("%s %s: I/O Error in DMA RX\n", 958 dev_driver_string(&master->dev), 959 dev_name(&master->dev)); 960 spi_imx->devtype_data->reset(spi_imx); 961 dmaengine_terminate_all(master->dma_rx); 962 } 963 writel(dma | 964 spi_imx->rxt_wml << MX51_ECSPI_DMA_RXT_WML_OFFSET, 965 spi_imx->base + MX51_ECSPI_DMA); 966 } 967 968 spi_imx->dma_finished = 1; 969 spi_imx->devtype_data->trigger(spi_imx); 970 971 if (!timeout) 972 ret = -ETIMEDOUT; 973 else 974 ret = transfer->len; 975 976 return ret; 977 978 no_dma: 979 pr_warn_once("%s %s: DMA not available, falling back to PIO\n", 980 dev_driver_string(&master->dev), 981 dev_name(&master->dev)); 982 return -EAGAIN; 983 } 984 985 static int spi_imx_pio_transfer(struct spi_device *spi, 986 struct spi_transfer *transfer) 987 { 988 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); 989 990 spi_imx->tx_buf = transfer->tx_buf; 991 spi_imx->rx_buf = transfer->rx_buf; 992 spi_imx->count = transfer->len; 993 spi_imx->txfifo = 0; 994 995 reinit_completion(&spi_imx->xfer_done); 996 997 spi_imx_push(spi_imx); 998 999 spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE); 1000 1001 wait_for_completion(&spi_imx->xfer_done); 1002 1003 return transfer->len; 1004 } 1005 1006 static int spi_imx_transfer(struct spi_device *spi, 1007 struct spi_transfer *transfer) 1008 { 1009 int ret; 1010 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); 1011 1012 if (spi_imx->bitbang.master->can_dma && 1013 spi_imx_can_dma(spi_imx->bitbang.master, spi, transfer)) { 1014 spi_imx->usedma = true; 1015 ret = spi_imx_dma_transfer(spi_imx, transfer); 1016 if (ret != -EAGAIN) 1017 return ret; 1018 } 1019 spi_imx->usedma = false; 1020 1021 return spi_imx_pio_transfer(spi, transfer); 1022 } 1023 1024 static int spi_imx_setup(struct spi_device *spi) 1025 { 1026 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); 1027 int gpio = spi_imx->chipselect[spi->chip_select]; 1028 1029 dev_dbg(&spi->dev, "%s: mode %d, %u bpw, %d hz\n", __func__, 1030 spi->mode, spi->bits_per_word, spi->max_speed_hz); 1031 1032 if (gpio_is_valid(gpio)) 1033 gpio_direction_output(gpio, spi->mode & SPI_CS_HIGH ? 0 : 1); 1034 1035 spi_imx_chipselect(spi, BITBANG_CS_INACTIVE); 1036 1037 return 0; 1038 } 1039 1040 static void spi_imx_cleanup(struct spi_device *spi) 1041 { 1042 } 1043 1044 static int 1045 spi_imx_prepare_message(struct spi_master *master, struct spi_message *msg) 1046 { 1047 struct spi_imx_data *spi_imx = spi_master_get_devdata(master); 1048 int ret; 1049 1050 ret = clk_enable(spi_imx->clk_per); 1051 if (ret) 1052 return ret; 1053 1054 ret = clk_enable(spi_imx->clk_ipg); 1055 if (ret) { 1056 clk_disable(spi_imx->clk_per); 1057 return ret; 1058 } 1059 1060 return 0; 1061 } 1062 1063 static int 1064 spi_imx_unprepare_message(struct spi_master *master, struct spi_message *msg) 1065 { 1066 struct spi_imx_data *spi_imx = spi_master_get_devdata(master); 1067 1068 clk_disable(spi_imx->clk_ipg); 1069 clk_disable(spi_imx->clk_per); 1070 return 0; 1071 } 1072 1073 static int spi_imx_probe(struct platform_device *pdev) 1074 { 1075 struct device_node *np = pdev->dev.of_node; 1076 const struct of_device_id *of_id = 1077 of_match_device(spi_imx_dt_ids, &pdev->dev); 1078 struct spi_imx_master *mxc_platform_info = 1079 dev_get_platdata(&pdev->dev); 1080 struct spi_master *master; 1081 struct spi_imx_data *spi_imx; 1082 struct resource *res; 1083 int i, ret, num_cs, irq; 1084 1085 if (!np && !mxc_platform_info) { 1086 dev_err(&pdev->dev, "can't get the platform data\n"); 1087 return -EINVAL; 1088 } 1089 1090 ret = of_property_read_u32(np, "fsl,spi-num-chipselects", &num_cs); 1091 if (ret < 0) { 1092 if (mxc_platform_info) 1093 num_cs = mxc_platform_info->num_chipselect; 1094 else 1095 return ret; 1096 } 1097 1098 master = spi_alloc_master(&pdev->dev, 1099 sizeof(struct spi_imx_data) + sizeof(int) * num_cs); 1100 if (!master) 1101 return -ENOMEM; 1102 1103 platform_set_drvdata(pdev, master); 1104 1105 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32); 1106 master->bus_num = pdev->id; 1107 master->num_chipselect = num_cs; 1108 1109 spi_imx = spi_master_get_devdata(master); 1110 spi_imx->bitbang.master = master; 1111 1112 for (i = 0; i < master->num_chipselect; i++) { 1113 int cs_gpio = of_get_named_gpio(np, "cs-gpios", i); 1114 if (!gpio_is_valid(cs_gpio) && mxc_platform_info) 1115 cs_gpio = mxc_platform_info->chipselect[i]; 1116 1117 spi_imx->chipselect[i] = cs_gpio; 1118 if (!gpio_is_valid(cs_gpio)) 1119 continue; 1120 1121 ret = devm_gpio_request(&pdev->dev, spi_imx->chipselect[i], 1122 DRIVER_NAME); 1123 if (ret) { 1124 dev_err(&pdev->dev, "can't get cs gpios\n"); 1125 goto out_master_put; 1126 } 1127 } 1128 1129 spi_imx->bitbang.chipselect = spi_imx_chipselect; 1130 spi_imx->bitbang.setup_transfer = spi_imx_setupxfer; 1131 spi_imx->bitbang.txrx_bufs = spi_imx_transfer; 1132 spi_imx->bitbang.master->setup = spi_imx_setup; 1133 spi_imx->bitbang.master->cleanup = spi_imx_cleanup; 1134 spi_imx->bitbang.master->prepare_message = spi_imx_prepare_message; 1135 spi_imx->bitbang.master->unprepare_message = spi_imx_unprepare_message; 1136 spi_imx->bitbang.master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; 1137 1138 init_completion(&spi_imx->xfer_done); 1139 1140 spi_imx->devtype_data = of_id ? of_id->data : 1141 (struct spi_imx_devtype_data *) pdev->id_entry->driver_data; 1142 1143 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1144 spi_imx->base = devm_ioremap_resource(&pdev->dev, res); 1145 if (IS_ERR(spi_imx->base)) { 1146 ret = PTR_ERR(spi_imx->base); 1147 goto out_master_put; 1148 } 1149 1150 irq = platform_get_irq(pdev, 0); 1151 if (irq < 0) { 1152 ret = irq; 1153 goto out_master_put; 1154 } 1155 1156 ret = devm_request_irq(&pdev->dev, irq, spi_imx_isr, 0, 1157 dev_name(&pdev->dev), spi_imx); 1158 if (ret) { 1159 dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret); 1160 goto out_master_put; 1161 } 1162 1163 spi_imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); 1164 if (IS_ERR(spi_imx->clk_ipg)) { 1165 ret = PTR_ERR(spi_imx->clk_ipg); 1166 goto out_master_put; 1167 } 1168 1169 spi_imx->clk_per = devm_clk_get(&pdev->dev, "per"); 1170 if (IS_ERR(spi_imx->clk_per)) { 1171 ret = PTR_ERR(spi_imx->clk_per); 1172 goto out_master_put; 1173 } 1174 1175 ret = clk_prepare_enable(spi_imx->clk_per); 1176 if (ret) 1177 goto out_master_put; 1178 1179 ret = clk_prepare_enable(spi_imx->clk_ipg); 1180 if (ret) 1181 goto out_put_per; 1182 1183 spi_imx->spi_clk = clk_get_rate(spi_imx->clk_per); 1184 /* 1185 * Only validated on i.mx6 now, can remove the constrain if validated on 1186 * other chips. 1187 */ 1188 if (spi_imx->devtype_data == &imx51_ecspi_devtype_data 1189 && spi_imx_sdma_init(&pdev->dev, spi_imx, master, res)) 1190 dev_err(&pdev->dev, "dma setup error,use pio instead\n"); 1191 1192 spi_imx->devtype_data->reset(spi_imx); 1193 1194 spi_imx->devtype_data->intctrl(spi_imx, 0); 1195 1196 master->dev.of_node = pdev->dev.of_node; 1197 ret = spi_bitbang_start(&spi_imx->bitbang); 1198 if (ret) { 1199 dev_err(&pdev->dev, "bitbang start failed with %d\n", ret); 1200 goto out_clk_put; 1201 } 1202 1203 dev_info(&pdev->dev, "probed\n"); 1204 1205 clk_disable(spi_imx->clk_ipg); 1206 clk_disable(spi_imx->clk_per); 1207 return ret; 1208 1209 out_clk_put: 1210 clk_disable_unprepare(spi_imx->clk_ipg); 1211 out_put_per: 1212 clk_disable_unprepare(spi_imx->clk_per); 1213 out_master_put: 1214 spi_master_put(master); 1215 1216 return ret; 1217 } 1218 1219 static int spi_imx_remove(struct platform_device *pdev) 1220 { 1221 struct spi_master *master = platform_get_drvdata(pdev); 1222 struct spi_imx_data *spi_imx = spi_master_get_devdata(master); 1223 1224 spi_bitbang_stop(&spi_imx->bitbang); 1225 1226 writel(0, spi_imx->base + MXC_CSPICTRL); 1227 clk_unprepare(spi_imx->clk_ipg); 1228 clk_unprepare(spi_imx->clk_per); 1229 spi_imx_sdma_exit(spi_imx); 1230 spi_master_put(master); 1231 1232 return 0; 1233 } 1234 1235 static struct platform_driver spi_imx_driver = { 1236 .driver = { 1237 .name = DRIVER_NAME, 1238 .of_match_table = spi_imx_dt_ids, 1239 }, 1240 .id_table = spi_imx_devtype, 1241 .probe = spi_imx_probe, 1242 .remove = spi_imx_remove, 1243 }; 1244 module_platform_driver(spi_imx_driver); 1245 1246 MODULE_DESCRIPTION("SPI Master Controller driver"); 1247 MODULE_AUTHOR("Sascha Hauer, Pengutronix"); 1248 MODULE_LICENSE("GPL"); 1249 MODULE_ALIAS("platform:" DRIVER_NAME); 1250