1 /* 2 * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved. 3 * Copyright (C) 2008 Juergen Beisert 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License 7 * as published by the Free Software Foundation; either version 2 8 * of the License, or (at your option) any later version. 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the 16 * Free Software Foundation 17 * 51 Franklin Street, Fifth Floor 18 * Boston, MA 02110-1301, USA. 19 */ 20 21 #include <linux/clk.h> 22 #include <linux/completion.h> 23 #include <linux/delay.h> 24 #include <linux/dmaengine.h> 25 #include <linux/dma-mapping.h> 26 #include <linux/err.h> 27 #include <linux/gpio.h> 28 #include <linux/interrupt.h> 29 #include <linux/io.h> 30 #include <linux/irq.h> 31 #include <linux/kernel.h> 32 #include <linux/module.h> 33 #include <linux/platform_device.h> 34 #include <linux/slab.h> 35 #include <linux/spi/spi.h> 36 #include <linux/spi/spi_bitbang.h> 37 #include <linux/types.h> 38 #include <linux/of.h> 39 #include <linux/of_device.h> 40 #include <linux/of_gpio.h> 41 42 #include <linux/platform_data/dma-imx.h> 43 #include <linux/platform_data/spi-imx.h> 44 45 #define DRIVER_NAME "spi_imx" 46 47 #define MXC_CSPIRXDATA 0x00 48 #define MXC_CSPITXDATA 0x04 49 #define MXC_CSPICTRL 0x08 50 #define MXC_CSPIINT 0x0c 51 #define MXC_RESET 0x1c 52 53 /* generic defines to abstract from the different register layouts */ 54 #define MXC_INT_RR (1 << 0) /* Receive data ready interrupt */ 55 #define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */ 56 57 /* The maximum bytes that a sdma BD can transfer.*/ 58 #define MAX_SDMA_BD_BYTES (1 << 15) 59 #define IMX_DMA_TIMEOUT (msecs_to_jiffies(3000)) 60 struct spi_imx_config { 61 unsigned int speed_hz; 62 unsigned int bpw; 63 unsigned int mode; 64 u8 cs; 65 }; 66 67 enum spi_imx_devtype { 68 IMX1_CSPI, 69 IMX21_CSPI, 70 IMX27_CSPI, 71 IMX31_CSPI, 72 IMX35_CSPI, /* CSPI on all i.mx except above */ 73 IMX51_ECSPI, /* ECSPI on i.mx51 and later */ 74 }; 75 76 struct spi_imx_data; 77 78 struct spi_imx_devtype_data { 79 void (*intctrl)(struct spi_imx_data *, int); 80 int (*config)(struct spi_imx_data *, struct spi_imx_config *); 81 void (*trigger)(struct spi_imx_data *); 82 int (*rx_available)(struct spi_imx_data *); 83 void (*reset)(struct spi_imx_data *); 84 enum spi_imx_devtype devtype; 85 }; 86 87 struct spi_imx_data { 88 struct spi_bitbang bitbang; 89 90 struct completion xfer_done; 91 void __iomem *base; 92 struct clk *clk_per; 93 struct clk *clk_ipg; 94 unsigned long spi_clk; 95 96 unsigned int count; 97 void (*tx)(struct spi_imx_data *); 98 void (*rx)(struct spi_imx_data *); 99 void *rx_buf; 100 const void *tx_buf; 101 unsigned int txfifo; /* number of words pushed in tx FIFO */ 102 103 /* DMA */ 104 unsigned int dma_is_inited; 105 unsigned int dma_finished; 106 bool usedma; 107 u32 rx_wml; 108 u32 tx_wml; 109 u32 rxt_wml; 110 struct completion dma_rx_completion; 111 struct completion dma_tx_completion; 112 113 const struct spi_imx_devtype_data *devtype_data; 114 int chipselect[0]; 115 }; 116 117 static inline int is_imx27_cspi(struct spi_imx_data *d) 118 { 119 return d->devtype_data->devtype == IMX27_CSPI; 120 } 121 122 static inline int is_imx35_cspi(struct spi_imx_data *d) 123 { 124 return d->devtype_data->devtype == IMX35_CSPI; 125 } 126 127 static inline unsigned spi_imx_get_fifosize(struct spi_imx_data *d) 128 { 129 return (d->devtype_data->devtype == IMX51_ECSPI) ? 64 : 8; 130 } 131 132 #define MXC_SPI_BUF_RX(type) \ 133 static void spi_imx_buf_rx_##type(struct spi_imx_data *spi_imx) \ 134 { \ 135 unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA); \ 136 \ 137 if (spi_imx->rx_buf) { \ 138 *(type *)spi_imx->rx_buf = val; \ 139 spi_imx->rx_buf += sizeof(type); \ 140 } \ 141 } 142 143 #define MXC_SPI_BUF_TX(type) \ 144 static void spi_imx_buf_tx_##type(struct spi_imx_data *spi_imx) \ 145 { \ 146 type val = 0; \ 147 \ 148 if (spi_imx->tx_buf) { \ 149 val = *(type *)spi_imx->tx_buf; \ 150 spi_imx->tx_buf += sizeof(type); \ 151 } \ 152 \ 153 spi_imx->count -= sizeof(type); \ 154 \ 155 writel(val, spi_imx->base + MXC_CSPITXDATA); \ 156 } 157 158 MXC_SPI_BUF_RX(u8) 159 MXC_SPI_BUF_TX(u8) 160 MXC_SPI_BUF_RX(u16) 161 MXC_SPI_BUF_TX(u16) 162 MXC_SPI_BUF_RX(u32) 163 MXC_SPI_BUF_TX(u32) 164 165 /* First entry is reserved, second entry is valid only if SDHC_SPIEN is set 166 * (which is currently not the case in this driver) 167 */ 168 static int mxc_clkdivs[] = {0, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192, 169 256, 384, 512, 768, 1024}; 170 171 /* MX21, MX27 */ 172 static unsigned int spi_imx_clkdiv_1(unsigned int fin, 173 unsigned int fspi, unsigned int max) 174 { 175 int i; 176 177 for (i = 2; i < max; i++) 178 if (fspi * mxc_clkdivs[i] >= fin) 179 return i; 180 181 return max; 182 } 183 184 /* MX1, MX31, MX35, MX51 CSPI */ 185 static unsigned int spi_imx_clkdiv_2(unsigned int fin, 186 unsigned int fspi) 187 { 188 int i, div = 4; 189 190 for (i = 0; i < 7; i++) { 191 if (fspi * div >= fin) 192 return i; 193 div <<= 1; 194 } 195 196 return 7; 197 } 198 199 static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi, 200 struct spi_transfer *transfer) 201 { 202 struct spi_imx_data *spi_imx = spi_master_get_devdata(master); 203 204 if (spi_imx->dma_is_inited 205 && transfer->len > spi_imx->rx_wml * sizeof(u32) 206 && transfer->len > spi_imx->tx_wml * sizeof(u32)) 207 return true; 208 return false; 209 } 210 211 #define MX51_ECSPI_CTRL 0x08 212 #define MX51_ECSPI_CTRL_ENABLE (1 << 0) 213 #define MX51_ECSPI_CTRL_XCH (1 << 2) 214 #define MX51_ECSPI_CTRL_SMC (1 << 3) 215 #define MX51_ECSPI_CTRL_MODE_MASK (0xf << 4) 216 #define MX51_ECSPI_CTRL_POSTDIV_OFFSET 8 217 #define MX51_ECSPI_CTRL_PREDIV_OFFSET 12 218 #define MX51_ECSPI_CTRL_CS(cs) ((cs) << 18) 219 #define MX51_ECSPI_CTRL_BL_OFFSET 20 220 221 #define MX51_ECSPI_CONFIG 0x0c 222 #define MX51_ECSPI_CONFIG_SCLKPHA(cs) (1 << ((cs) + 0)) 223 #define MX51_ECSPI_CONFIG_SCLKPOL(cs) (1 << ((cs) + 4)) 224 #define MX51_ECSPI_CONFIG_SBBCTRL(cs) (1 << ((cs) + 8)) 225 #define MX51_ECSPI_CONFIG_SSBPOL(cs) (1 << ((cs) + 12)) 226 #define MX51_ECSPI_CONFIG_SCLKCTL(cs) (1 << ((cs) + 20)) 227 228 #define MX51_ECSPI_INT 0x10 229 #define MX51_ECSPI_INT_TEEN (1 << 0) 230 #define MX51_ECSPI_INT_RREN (1 << 3) 231 232 #define MX51_ECSPI_DMA 0x14 233 #define MX51_ECSPI_DMA_TX_WML_OFFSET 0 234 #define MX51_ECSPI_DMA_TX_WML_MASK 0x3F 235 #define MX51_ECSPI_DMA_RX_WML_OFFSET 16 236 #define MX51_ECSPI_DMA_RX_WML_MASK (0x3F << 16) 237 #define MX51_ECSPI_DMA_RXT_WML_OFFSET 24 238 #define MX51_ECSPI_DMA_RXT_WML_MASK (0x3F << 24) 239 240 #define MX51_ECSPI_DMA_TEDEN_OFFSET 7 241 #define MX51_ECSPI_DMA_RXDEN_OFFSET 23 242 #define MX51_ECSPI_DMA_RXTDEN_OFFSET 31 243 244 #define MX51_ECSPI_STAT 0x18 245 #define MX51_ECSPI_STAT_RR (1 << 3) 246 247 /* MX51 eCSPI */ 248 static unsigned int mx51_ecspi_clkdiv(unsigned int fin, unsigned int fspi, 249 unsigned int *fres) 250 { 251 /* 252 * there are two 4-bit dividers, the pre-divider divides by 253 * $pre, the post-divider by 2^$post 254 */ 255 unsigned int pre, post; 256 257 if (unlikely(fspi > fin)) 258 return 0; 259 260 post = fls(fin) - fls(fspi); 261 if (fin > fspi << post) 262 post++; 263 264 /* now we have: (fin <= fspi << post) with post being minimal */ 265 266 post = max(4U, post) - 4; 267 if (unlikely(post > 0xf)) { 268 pr_err("%s: cannot set clock freq: %u (base freq: %u)\n", 269 __func__, fspi, fin); 270 return 0xff; 271 } 272 273 pre = DIV_ROUND_UP(fin, fspi << post) - 1; 274 275 pr_debug("%s: fin: %u, fspi: %u, post: %u, pre: %u\n", 276 __func__, fin, fspi, post, pre); 277 278 /* Resulting frequency for the SCLK line. */ 279 *fres = (fin / (pre + 1)) >> post; 280 281 return (pre << MX51_ECSPI_CTRL_PREDIV_OFFSET) | 282 (post << MX51_ECSPI_CTRL_POSTDIV_OFFSET); 283 } 284 285 static void __maybe_unused mx51_ecspi_intctrl(struct spi_imx_data *spi_imx, int enable) 286 { 287 unsigned val = 0; 288 289 if (enable & MXC_INT_TE) 290 val |= MX51_ECSPI_INT_TEEN; 291 292 if (enable & MXC_INT_RR) 293 val |= MX51_ECSPI_INT_RREN; 294 295 writel(val, spi_imx->base + MX51_ECSPI_INT); 296 } 297 298 static void __maybe_unused mx51_ecspi_trigger(struct spi_imx_data *spi_imx) 299 { 300 u32 reg = readl(spi_imx->base + MX51_ECSPI_CTRL); 301 302 if (!spi_imx->usedma) 303 reg |= MX51_ECSPI_CTRL_XCH; 304 else if (!spi_imx->dma_finished) 305 reg |= MX51_ECSPI_CTRL_SMC; 306 else 307 reg &= ~MX51_ECSPI_CTRL_SMC; 308 writel(reg, spi_imx->base + MX51_ECSPI_CTRL); 309 } 310 311 static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx, 312 struct spi_imx_config *config) 313 { 314 u32 ctrl = MX51_ECSPI_CTRL_ENABLE, cfg = 0, dma = 0; 315 u32 tx_wml_cfg, rx_wml_cfg, rxt_wml_cfg; 316 u32 clk = config->speed_hz, delay; 317 318 /* 319 * The hardware seems to have a race condition when changing modes. The 320 * current assumption is that the selection of the channel arrives 321 * earlier in the hardware than the mode bits when they are written at 322 * the same time. 323 * So set master mode for all channels as we do not support slave mode. 324 */ 325 ctrl |= MX51_ECSPI_CTRL_MODE_MASK; 326 327 /* set clock speed */ 328 ctrl |= mx51_ecspi_clkdiv(spi_imx->spi_clk, config->speed_hz, &clk); 329 330 /* set chip select to use */ 331 ctrl |= MX51_ECSPI_CTRL_CS(config->cs); 332 333 ctrl |= (config->bpw - 1) << MX51_ECSPI_CTRL_BL_OFFSET; 334 335 cfg |= MX51_ECSPI_CONFIG_SBBCTRL(config->cs); 336 337 if (config->mode & SPI_CPHA) 338 cfg |= MX51_ECSPI_CONFIG_SCLKPHA(config->cs); 339 else 340 cfg &= ~MX51_ECSPI_CONFIG_SCLKPHA(config->cs); 341 342 if (config->mode & SPI_CPOL) { 343 cfg |= MX51_ECSPI_CONFIG_SCLKPOL(config->cs); 344 cfg |= MX51_ECSPI_CONFIG_SCLKCTL(config->cs); 345 } else { 346 cfg &= ~MX51_ECSPI_CONFIG_SCLKPOL(config->cs); 347 cfg &= ~MX51_ECSPI_CONFIG_SCLKCTL(config->cs); 348 } 349 if (config->mode & SPI_CS_HIGH) 350 cfg |= MX51_ECSPI_CONFIG_SSBPOL(config->cs); 351 else 352 cfg &= ~MX51_ECSPI_CONFIG_SSBPOL(config->cs); 353 354 writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL); 355 writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG); 356 357 /* 358 * Wait until the changes in the configuration register CONFIGREG 359 * propagate into the hardware. It takes exactly one tick of the 360 * SCLK clock, but we will wait two SCLK clock just to be sure. The 361 * effect of the delay it takes for the hardware to apply changes 362 * is noticable if the SCLK clock run very slow. In such a case, if 363 * the polarity of SCLK should be inverted, the GPIO ChipSelect might 364 * be asserted before the SCLK polarity changes, which would disrupt 365 * the SPI communication as the device on the other end would consider 366 * the change of SCLK polarity as a clock tick already. 367 */ 368 delay = (2 * 1000000) / clk; 369 if (likely(delay < 10)) /* SCLK is faster than 100 kHz */ 370 udelay(delay); 371 else /* SCLK is _very_ slow */ 372 usleep_range(delay, delay + 10); 373 374 /* 375 * Configure the DMA register: setup the watermark 376 * and enable DMA request. 377 */ 378 if (spi_imx->dma_is_inited) { 379 dma = readl(spi_imx->base + MX51_ECSPI_DMA); 380 381 spi_imx->rxt_wml = spi_imx_get_fifosize(spi_imx) / 2; 382 rx_wml_cfg = spi_imx->rx_wml << MX51_ECSPI_DMA_RX_WML_OFFSET; 383 tx_wml_cfg = spi_imx->tx_wml << MX51_ECSPI_DMA_TX_WML_OFFSET; 384 rxt_wml_cfg = spi_imx->rxt_wml << MX51_ECSPI_DMA_RXT_WML_OFFSET; 385 dma = (dma & ~MX51_ECSPI_DMA_TX_WML_MASK 386 & ~MX51_ECSPI_DMA_RX_WML_MASK 387 & ~MX51_ECSPI_DMA_RXT_WML_MASK) 388 | rx_wml_cfg | tx_wml_cfg | rxt_wml_cfg 389 |(1 << MX51_ECSPI_DMA_TEDEN_OFFSET) 390 |(1 << MX51_ECSPI_DMA_RXDEN_OFFSET) 391 |(1 << MX51_ECSPI_DMA_RXTDEN_OFFSET); 392 393 writel(dma, spi_imx->base + MX51_ECSPI_DMA); 394 } 395 396 return 0; 397 } 398 399 static int __maybe_unused mx51_ecspi_rx_available(struct spi_imx_data *spi_imx) 400 { 401 return readl(spi_imx->base + MX51_ECSPI_STAT) & MX51_ECSPI_STAT_RR; 402 } 403 404 static void __maybe_unused mx51_ecspi_reset(struct spi_imx_data *spi_imx) 405 { 406 /* drain receive buffer */ 407 while (mx51_ecspi_rx_available(spi_imx)) 408 readl(spi_imx->base + MXC_CSPIRXDATA); 409 } 410 411 #define MX31_INTREG_TEEN (1 << 0) 412 #define MX31_INTREG_RREN (1 << 3) 413 414 #define MX31_CSPICTRL_ENABLE (1 << 0) 415 #define MX31_CSPICTRL_MASTER (1 << 1) 416 #define MX31_CSPICTRL_XCH (1 << 2) 417 #define MX31_CSPICTRL_POL (1 << 4) 418 #define MX31_CSPICTRL_PHA (1 << 5) 419 #define MX31_CSPICTRL_SSCTL (1 << 6) 420 #define MX31_CSPICTRL_SSPOL (1 << 7) 421 #define MX31_CSPICTRL_BC_SHIFT 8 422 #define MX35_CSPICTRL_BL_SHIFT 20 423 #define MX31_CSPICTRL_CS_SHIFT 24 424 #define MX35_CSPICTRL_CS_SHIFT 12 425 #define MX31_CSPICTRL_DR_SHIFT 16 426 427 #define MX31_CSPISTATUS 0x14 428 #define MX31_STATUS_RR (1 << 3) 429 430 /* These functions also work for the i.MX35, but be aware that 431 * the i.MX35 has a slightly different register layout for bits 432 * we do not use here. 433 */ 434 static void __maybe_unused mx31_intctrl(struct spi_imx_data *spi_imx, int enable) 435 { 436 unsigned int val = 0; 437 438 if (enable & MXC_INT_TE) 439 val |= MX31_INTREG_TEEN; 440 if (enable & MXC_INT_RR) 441 val |= MX31_INTREG_RREN; 442 443 writel(val, spi_imx->base + MXC_CSPIINT); 444 } 445 446 static void __maybe_unused mx31_trigger(struct spi_imx_data *spi_imx) 447 { 448 unsigned int reg; 449 450 reg = readl(spi_imx->base + MXC_CSPICTRL); 451 reg |= MX31_CSPICTRL_XCH; 452 writel(reg, spi_imx->base + MXC_CSPICTRL); 453 } 454 455 static int __maybe_unused mx31_config(struct spi_imx_data *spi_imx, 456 struct spi_imx_config *config) 457 { 458 unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER; 459 int cs = spi_imx->chipselect[config->cs]; 460 461 reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) << 462 MX31_CSPICTRL_DR_SHIFT; 463 464 if (is_imx35_cspi(spi_imx)) { 465 reg |= (config->bpw - 1) << MX35_CSPICTRL_BL_SHIFT; 466 reg |= MX31_CSPICTRL_SSCTL; 467 } else { 468 reg |= (config->bpw - 1) << MX31_CSPICTRL_BC_SHIFT; 469 } 470 471 if (config->mode & SPI_CPHA) 472 reg |= MX31_CSPICTRL_PHA; 473 if (config->mode & SPI_CPOL) 474 reg |= MX31_CSPICTRL_POL; 475 if (config->mode & SPI_CS_HIGH) 476 reg |= MX31_CSPICTRL_SSPOL; 477 if (cs < 0) 478 reg |= (cs + 32) << 479 (is_imx35_cspi(spi_imx) ? MX35_CSPICTRL_CS_SHIFT : 480 MX31_CSPICTRL_CS_SHIFT); 481 482 writel(reg, spi_imx->base + MXC_CSPICTRL); 483 484 return 0; 485 } 486 487 static int __maybe_unused mx31_rx_available(struct spi_imx_data *spi_imx) 488 { 489 return readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR; 490 } 491 492 static void __maybe_unused mx31_reset(struct spi_imx_data *spi_imx) 493 { 494 /* drain receive buffer */ 495 while (readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR) 496 readl(spi_imx->base + MXC_CSPIRXDATA); 497 } 498 499 #define MX21_INTREG_RR (1 << 4) 500 #define MX21_INTREG_TEEN (1 << 9) 501 #define MX21_INTREG_RREN (1 << 13) 502 503 #define MX21_CSPICTRL_POL (1 << 5) 504 #define MX21_CSPICTRL_PHA (1 << 6) 505 #define MX21_CSPICTRL_SSPOL (1 << 8) 506 #define MX21_CSPICTRL_XCH (1 << 9) 507 #define MX21_CSPICTRL_ENABLE (1 << 10) 508 #define MX21_CSPICTRL_MASTER (1 << 11) 509 #define MX21_CSPICTRL_DR_SHIFT 14 510 #define MX21_CSPICTRL_CS_SHIFT 19 511 512 static void __maybe_unused mx21_intctrl(struct spi_imx_data *spi_imx, int enable) 513 { 514 unsigned int val = 0; 515 516 if (enable & MXC_INT_TE) 517 val |= MX21_INTREG_TEEN; 518 if (enable & MXC_INT_RR) 519 val |= MX21_INTREG_RREN; 520 521 writel(val, spi_imx->base + MXC_CSPIINT); 522 } 523 524 static void __maybe_unused mx21_trigger(struct spi_imx_data *spi_imx) 525 { 526 unsigned int reg; 527 528 reg = readl(spi_imx->base + MXC_CSPICTRL); 529 reg |= MX21_CSPICTRL_XCH; 530 writel(reg, spi_imx->base + MXC_CSPICTRL); 531 } 532 533 static int __maybe_unused mx21_config(struct spi_imx_data *spi_imx, 534 struct spi_imx_config *config) 535 { 536 unsigned int reg = MX21_CSPICTRL_ENABLE | MX21_CSPICTRL_MASTER; 537 int cs = spi_imx->chipselect[config->cs]; 538 unsigned int max = is_imx27_cspi(spi_imx) ? 16 : 18; 539 540 reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, config->speed_hz, max) << 541 MX21_CSPICTRL_DR_SHIFT; 542 reg |= config->bpw - 1; 543 544 if (config->mode & SPI_CPHA) 545 reg |= MX21_CSPICTRL_PHA; 546 if (config->mode & SPI_CPOL) 547 reg |= MX21_CSPICTRL_POL; 548 if (config->mode & SPI_CS_HIGH) 549 reg |= MX21_CSPICTRL_SSPOL; 550 if (cs < 0) 551 reg |= (cs + 32) << MX21_CSPICTRL_CS_SHIFT; 552 553 writel(reg, spi_imx->base + MXC_CSPICTRL); 554 555 return 0; 556 } 557 558 static int __maybe_unused mx21_rx_available(struct spi_imx_data *spi_imx) 559 { 560 return readl(spi_imx->base + MXC_CSPIINT) & MX21_INTREG_RR; 561 } 562 563 static void __maybe_unused mx21_reset(struct spi_imx_data *spi_imx) 564 { 565 writel(1, spi_imx->base + MXC_RESET); 566 } 567 568 #define MX1_INTREG_RR (1 << 3) 569 #define MX1_INTREG_TEEN (1 << 8) 570 #define MX1_INTREG_RREN (1 << 11) 571 572 #define MX1_CSPICTRL_POL (1 << 4) 573 #define MX1_CSPICTRL_PHA (1 << 5) 574 #define MX1_CSPICTRL_XCH (1 << 8) 575 #define MX1_CSPICTRL_ENABLE (1 << 9) 576 #define MX1_CSPICTRL_MASTER (1 << 10) 577 #define MX1_CSPICTRL_DR_SHIFT 13 578 579 static void __maybe_unused mx1_intctrl(struct spi_imx_data *spi_imx, int enable) 580 { 581 unsigned int val = 0; 582 583 if (enable & MXC_INT_TE) 584 val |= MX1_INTREG_TEEN; 585 if (enable & MXC_INT_RR) 586 val |= MX1_INTREG_RREN; 587 588 writel(val, spi_imx->base + MXC_CSPIINT); 589 } 590 591 static void __maybe_unused mx1_trigger(struct spi_imx_data *spi_imx) 592 { 593 unsigned int reg; 594 595 reg = readl(spi_imx->base + MXC_CSPICTRL); 596 reg |= MX1_CSPICTRL_XCH; 597 writel(reg, spi_imx->base + MXC_CSPICTRL); 598 } 599 600 static int __maybe_unused mx1_config(struct spi_imx_data *spi_imx, 601 struct spi_imx_config *config) 602 { 603 unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_MASTER; 604 605 reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) << 606 MX1_CSPICTRL_DR_SHIFT; 607 reg |= config->bpw - 1; 608 609 if (config->mode & SPI_CPHA) 610 reg |= MX1_CSPICTRL_PHA; 611 if (config->mode & SPI_CPOL) 612 reg |= MX1_CSPICTRL_POL; 613 614 writel(reg, spi_imx->base + MXC_CSPICTRL); 615 616 return 0; 617 } 618 619 static int __maybe_unused mx1_rx_available(struct spi_imx_data *spi_imx) 620 { 621 return readl(spi_imx->base + MXC_CSPIINT) & MX1_INTREG_RR; 622 } 623 624 static void __maybe_unused mx1_reset(struct spi_imx_data *spi_imx) 625 { 626 writel(1, spi_imx->base + MXC_RESET); 627 } 628 629 static struct spi_imx_devtype_data imx1_cspi_devtype_data = { 630 .intctrl = mx1_intctrl, 631 .config = mx1_config, 632 .trigger = mx1_trigger, 633 .rx_available = mx1_rx_available, 634 .reset = mx1_reset, 635 .devtype = IMX1_CSPI, 636 }; 637 638 static struct spi_imx_devtype_data imx21_cspi_devtype_data = { 639 .intctrl = mx21_intctrl, 640 .config = mx21_config, 641 .trigger = mx21_trigger, 642 .rx_available = mx21_rx_available, 643 .reset = mx21_reset, 644 .devtype = IMX21_CSPI, 645 }; 646 647 static struct spi_imx_devtype_data imx27_cspi_devtype_data = { 648 /* i.mx27 cspi shares the functions with i.mx21 one */ 649 .intctrl = mx21_intctrl, 650 .config = mx21_config, 651 .trigger = mx21_trigger, 652 .rx_available = mx21_rx_available, 653 .reset = mx21_reset, 654 .devtype = IMX27_CSPI, 655 }; 656 657 static struct spi_imx_devtype_data imx31_cspi_devtype_data = { 658 .intctrl = mx31_intctrl, 659 .config = mx31_config, 660 .trigger = mx31_trigger, 661 .rx_available = mx31_rx_available, 662 .reset = mx31_reset, 663 .devtype = IMX31_CSPI, 664 }; 665 666 static struct spi_imx_devtype_data imx35_cspi_devtype_data = { 667 /* i.mx35 and later cspi shares the functions with i.mx31 one */ 668 .intctrl = mx31_intctrl, 669 .config = mx31_config, 670 .trigger = mx31_trigger, 671 .rx_available = mx31_rx_available, 672 .reset = mx31_reset, 673 .devtype = IMX35_CSPI, 674 }; 675 676 static struct spi_imx_devtype_data imx51_ecspi_devtype_data = { 677 .intctrl = mx51_ecspi_intctrl, 678 .config = mx51_ecspi_config, 679 .trigger = mx51_ecspi_trigger, 680 .rx_available = mx51_ecspi_rx_available, 681 .reset = mx51_ecspi_reset, 682 .devtype = IMX51_ECSPI, 683 }; 684 685 static const struct platform_device_id spi_imx_devtype[] = { 686 { 687 .name = "imx1-cspi", 688 .driver_data = (kernel_ulong_t) &imx1_cspi_devtype_data, 689 }, { 690 .name = "imx21-cspi", 691 .driver_data = (kernel_ulong_t) &imx21_cspi_devtype_data, 692 }, { 693 .name = "imx27-cspi", 694 .driver_data = (kernel_ulong_t) &imx27_cspi_devtype_data, 695 }, { 696 .name = "imx31-cspi", 697 .driver_data = (kernel_ulong_t) &imx31_cspi_devtype_data, 698 }, { 699 .name = "imx35-cspi", 700 .driver_data = (kernel_ulong_t) &imx35_cspi_devtype_data, 701 }, { 702 .name = "imx51-ecspi", 703 .driver_data = (kernel_ulong_t) &imx51_ecspi_devtype_data, 704 }, { 705 /* sentinel */ 706 } 707 }; 708 709 static const struct of_device_id spi_imx_dt_ids[] = { 710 { .compatible = "fsl,imx1-cspi", .data = &imx1_cspi_devtype_data, }, 711 { .compatible = "fsl,imx21-cspi", .data = &imx21_cspi_devtype_data, }, 712 { .compatible = "fsl,imx27-cspi", .data = &imx27_cspi_devtype_data, }, 713 { .compatible = "fsl,imx31-cspi", .data = &imx31_cspi_devtype_data, }, 714 { .compatible = "fsl,imx35-cspi", .data = &imx35_cspi_devtype_data, }, 715 { .compatible = "fsl,imx51-ecspi", .data = &imx51_ecspi_devtype_data, }, 716 { /* sentinel */ } 717 }; 718 MODULE_DEVICE_TABLE(of, spi_imx_dt_ids); 719 720 static void spi_imx_chipselect(struct spi_device *spi, int is_active) 721 { 722 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); 723 int gpio = spi_imx->chipselect[spi->chip_select]; 724 int active = is_active != BITBANG_CS_INACTIVE; 725 int dev_is_lowactive = !(spi->mode & SPI_CS_HIGH); 726 727 if (!gpio_is_valid(gpio)) 728 return; 729 730 gpio_set_value(gpio, dev_is_lowactive ^ active); 731 } 732 733 static void spi_imx_push(struct spi_imx_data *spi_imx) 734 { 735 while (spi_imx->txfifo < spi_imx_get_fifosize(spi_imx)) { 736 if (!spi_imx->count) 737 break; 738 spi_imx->tx(spi_imx); 739 spi_imx->txfifo++; 740 } 741 742 spi_imx->devtype_data->trigger(spi_imx); 743 } 744 745 static irqreturn_t spi_imx_isr(int irq, void *dev_id) 746 { 747 struct spi_imx_data *spi_imx = dev_id; 748 749 while (spi_imx->devtype_data->rx_available(spi_imx)) { 750 spi_imx->rx(spi_imx); 751 spi_imx->txfifo--; 752 } 753 754 if (spi_imx->count) { 755 spi_imx_push(spi_imx); 756 return IRQ_HANDLED; 757 } 758 759 if (spi_imx->txfifo) { 760 /* No data left to push, but still waiting for rx data, 761 * enable receive data available interrupt. 762 */ 763 spi_imx->devtype_data->intctrl( 764 spi_imx, MXC_INT_RR); 765 return IRQ_HANDLED; 766 } 767 768 spi_imx->devtype_data->intctrl(spi_imx, 0); 769 complete(&spi_imx->xfer_done); 770 771 return IRQ_HANDLED; 772 } 773 774 static int spi_imx_setupxfer(struct spi_device *spi, 775 struct spi_transfer *t) 776 { 777 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); 778 struct spi_imx_config config; 779 780 config.bpw = t ? t->bits_per_word : spi->bits_per_word; 781 config.speed_hz = t ? t->speed_hz : spi->max_speed_hz; 782 config.mode = spi->mode; 783 config.cs = spi->chip_select; 784 785 if (!config.speed_hz) 786 config.speed_hz = spi->max_speed_hz; 787 if (!config.bpw) 788 config.bpw = spi->bits_per_word; 789 790 /* Initialize the functions for transfer */ 791 if (config.bpw <= 8) { 792 spi_imx->rx = spi_imx_buf_rx_u8; 793 spi_imx->tx = spi_imx_buf_tx_u8; 794 } else if (config.bpw <= 16) { 795 spi_imx->rx = spi_imx_buf_rx_u16; 796 spi_imx->tx = spi_imx_buf_tx_u16; 797 } else { 798 spi_imx->rx = spi_imx_buf_rx_u32; 799 spi_imx->tx = spi_imx_buf_tx_u32; 800 } 801 802 spi_imx->devtype_data->config(spi_imx, &config); 803 804 return 0; 805 } 806 807 static void spi_imx_sdma_exit(struct spi_imx_data *spi_imx) 808 { 809 struct spi_master *master = spi_imx->bitbang.master; 810 811 if (master->dma_rx) { 812 dma_release_channel(master->dma_rx); 813 master->dma_rx = NULL; 814 } 815 816 if (master->dma_tx) { 817 dma_release_channel(master->dma_tx); 818 master->dma_tx = NULL; 819 } 820 821 spi_imx->dma_is_inited = 0; 822 } 823 824 static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx, 825 struct spi_master *master, 826 const struct resource *res) 827 { 828 struct dma_slave_config slave_config = {}; 829 int ret; 830 831 /* use pio mode for i.mx6dl chip TKT238285 */ 832 if (of_machine_is_compatible("fsl,imx6dl")) 833 return 0; 834 835 /* Prepare for TX DMA: */ 836 master->dma_tx = dma_request_slave_channel(dev, "tx"); 837 if (!master->dma_tx) { 838 dev_err(dev, "cannot get the TX DMA channel!\n"); 839 ret = -EINVAL; 840 goto err; 841 } 842 843 slave_config.direction = DMA_MEM_TO_DEV; 844 slave_config.dst_addr = res->start + MXC_CSPITXDATA; 845 slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 846 slave_config.dst_maxburst = spi_imx_get_fifosize(spi_imx) / 2; 847 ret = dmaengine_slave_config(master->dma_tx, &slave_config); 848 if (ret) { 849 dev_err(dev, "error in TX dma configuration.\n"); 850 goto err; 851 } 852 853 /* Prepare for RX : */ 854 master->dma_rx = dma_request_slave_channel(dev, "rx"); 855 if (!master->dma_rx) { 856 dev_dbg(dev, "cannot get the DMA channel.\n"); 857 ret = -EINVAL; 858 goto err; 859 } 860 861 slave_config.direction = DMA_DEV_TO_MEM; 862 slave_config.src_addr = res->start + MXC_CSPIRXDATA; 863 slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 864 slave_config.src_maxburst = spi_imx_get_fifosize(spi_imx) / 2; 865 ret = dmaengine_slave_config(master->dma_rx, &slave_config); 866 if (ret) { 867 dev_err(dev, "error in RX dma configuration.\n"); 868 goto err; 869 } 870 871 init_completion(&spi_imx->dma_rx_completion); 872 init_completion(&spi_imx->dma_tx_completion); 873 master->can_dma = spi_imx_can_dma; 874 master->max_dma_len = MAX_SDMA_BD_BYTES; 875 spi_imx->bitbang.master->flags = SPI_MASTER_MUST_RX | 876 SPI_MASTER_MUST_TX; 877 spi_imx->tx_wml = spi_imx_get_fifosize(spi_imx) / 2; 878 spi_imx->rx_wml = spi_imx_get_fifosize(spi_imx) / 2; 879 spi_imx->dma_is_inited = 1; 880 881 return 0; 882 err: 883 spi_imx_sdma_exit(spi_imx); 884 return ret; 885 } 886 887 static void spi_imx_dma_rx_callback(void *cookie) 888 { 889 struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie; 890 891 complete(&spi_imx->dma_rx_completion); 892 } 893 894 static void spi_imx_dma_tx_callback(void *cookie) 895 { 896 struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie; 897 898 complete(&spi_imx->dma_tx_completion); 899 } 900 901 static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx, 902 struct spi_transfer *transfer) 903 { 904 struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL; 905 int ret; 906 unsigned long timeout; 907 u32 dma; 908 int left; 909 struct spi_master *master = spi_imx->bitbang.master; 910 struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg; 911 912 if (tx) { 913 desc_tx = dmaengine_prep_slave_sg(master->dma_tx, 914 tx->sgl, tx->nents, DMA_MEM_TO_DEV, 915 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 916 if (!desc_tx) 917 goto no_dma; 918 919 desc_tx->callback = spi_imx_dma_tx_callback; 920 desc_tx->callback_param = (void *)spi_imx; 921 dmaengine_submit(desc_tx); 922 } 923 924 if (rx) { 925 desc_rx = dmaengine_prep_slave_sg(master->dma_rx, 926 rx->sgl, rx->nents, DMA_DEV_TO_MEM, 927 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 928 if (!desc_rx) 929 goto no_dma; 930 931 desc_rx->callback = spi_imx_dma_rx_callback; 932 desc_rx->callback_param = (void *)spi_imx; 933 dmaengine_submit(desc_rx); 934 } 935 936 reinit_completion(&spi_imx->dma_rx_completion); 937 reinit_completion(&spi_imx->dma_tx_completion); 938 939 /* Trigger the cspi module. */ 940 spi_imx->dma_finished = 0; 941 942 dma = readl(spi_imx->base + MX51_ECSPI_DMA); 943 dma = dma & (~MX51_ECSPI_DMA_RXT_WML_MASK); 944 /* Change RX_DMA_LENGTH trigger dma fetch tail data */ 945 left = transfer->len % spi_imx->rxt_wml; 946 if (left) 947 writel(dma | (left << MX51_ECSPI_DMA_RXT_WML_OFFSET), 948 spi_imx->base + MX51_ECSPI_DMA); 949 spi_imx->devtype_data->trigger(spi_imx); 950 951 dma_async_issue_pending(master->dma_tx); 952 dma_async_issue_pending(master->dma_rx); 953 /* Wait SDMA to finish the data transfer.*/ 954 timeout = wait_for_completion_timeout(&spi_imx->dma_tx_completion, 955 IMX_DMA_TIMEOUT); 956 if (!timeout) { 957 pr_warn("%s %s: I/O Error in DMA TX\n", 958 dev_driver_string(&master->dev), 959 dev_name(&master->dev)); 960 dmaengine_terminate_all(master->dma_tx); 961 } else { 962 timeout = wait_for_completion_timeout( 963 &spi_imx->dma_rx_completion, IMX_DMA_TIMEOUT); 964 if (!timeout) { 965 pr_warn("%s %s: I/O Error in DMA RX\n", 966 dev_driver_string(&master->dev), 967 dev_name(&master->dev)); 968 spi_imx->devtype_data->reset(spi_imx); 969 dmaengine_terminate_all(master->dma_rx); 970 } 971 writel(dma | 972 spi_imx->rxt_wml << MX51_ECSPI_DMA_RXT_WML_OFFSET, 973 spi_imx->base + MX51_ECSPI_DMA); 974 } 975 976 spi_imx->dma_finished = 1; 977 spi_imx->devtype_data->trigger(spi_imx); 978 979 if (!timeout) 980 ret = -ETIMEDOUT; 981 else 982 ret = transfer->len; 983 984 return ret; 985 986 no_dma: 987 pr_warn_once("%s %s: DMA not available, falling back to PIO\n", 988 dev_driver_string(&master->dev), 989 dev_name(&master->dev)); 990 return -EAGAIN; 991 } 992 993 static int spi_imx_pio_transfer(struct spi_device *spi, 994 struct spi_transfer *transfer) 995 { 996 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); 997 998 spi_imx->tx_buf = transfer->tx_buf; 999 spi_imx->rx_buf = transfer->rx_buf; 1000 spi_imx->count = transfer->len; 1001 spi_imx->txfifo = 0; 1002 1003 reinit_completion(&spi_imx->xfer_done); 1004 1005 spi_imx_push(spi_imx); 1006 1007 spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE); 1008 1009 wait_for_completion(&spi_imx->xfer_done); 1010 1011 return transfer->len; 1012 } 1013 1014 static int spi_imx_transfer(struct spi_device *spi, 1015 struct spi_transfer *transfer) 1016 { 1017 int ret; 1018 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); 1019 1020 if (spi_imx->bitbang.master->can_dma && 1021 spi_imx_can_dma(spi_imx->bitbang.master, spi, transfer)) { 1022 spi_imx->usedma = true; 1023 ret = spi_imx_dma_transfer(spi_imx, transfer); 1024 if (ret != -EAGAIN) 1025 return ret; 1026 } 1027 spi_imx->usedma = false; 1028 1029 return spi_imx_pio_transfer(spi, transfer); 1030 } 1031 1032 static int spi_imx_setup(struct spi_device *spi) 1033 { 1034 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); 1035 int gpio = spi_imx->chipselect[spi->chip_select]; 1036 1037 dev_dbg(&spi->dev, "%s: mode %d, %u bpw, %d hz\n", __func__, 1038 spi->mode, spi->bits_per_word, spi->max_speed_hz); 1039 1040 if (gpio_is_valid(gpio)) 1041 gpio_direction_output(gpio, spi->mode & SPI_CS_HIGH ? 0 : 1); 1042 1043 spi_imx_chipselect(spi, BITBANG_CS_INACTIVE); 1044 1045 return 0; 1046 } 1047 1048 static void spi_imx_cleanup(struct spi_device *spi) 1049 { 1050 } 1051 1052 static int 1053 spi_imx_prepare_message(struct spi_master *master, struct spi_message *msg) 1054 { 1055 struct spi_imx_data *spi_imx = spi_master_get_devdata(master); 1056 int ret; 1057 1058 ret = clk_enable(spi_imx->clk_per); 1059 if (ret) 1060 return ret; 1061 1062 ret = clk_enable(spi_imx->clk_ipg); 1063 if (ret) { 1064 clk_disable(spi_imx->clk_per); 1065 return ret; 1066 } 1067 1068 return 0; 1069 } 1070 1071 static int 1072 spi_imx_unprepare_message(struct spi_master *master, struct spi_message *msg) 1073 { 1074 struct spi_imx_data *spi_imx = spi_master_get_devdata(master); 1075 1076 clk_disable(spi_imx->clk_ipg); 1077 clk_disable(spi_imx->clk_per); 1078 return 0; 1079 } 1080 1081 static int spi_imx_probe(struct platform_device *pdev) 1082 { 1083 struct device_node *np = pdev->dev.of_node; 1084 const struct of_device_id *of_id = 1085 of_match_device(spi_imx_dt_ids, &pdev->dev); 1086 struct spi_imx_master *mxc_platform_info = 1087 dev_get_platdata(&pdev->dev); 1088 struct spi_master *master; 1089 struct spi_imx_data *spi_imx; 1090 struct resource *res; 1091 int i, ret, num_cs, irq; 1092 1093 if (!np && !mxc_platform_info) { 1094 dev_err(&pdev->dev, "can't get the platform data\n"); 1095 return -EINVAL; 1096 } 1097 1098 ret = of_property_read_u32(np, "fsl,spi-num-chipselects", &num_cs); 1099 if (ret < 0) { 1100 if (mxc_platform_info) 1101 num_cs = mxc_platform_info->num_chipselect; 1102 else 1103 return ret; 1104 } 1105 1106 master = spi_alloc_master(&pdev->dev, 1107 sizeof(struct spi_imx_data) + sizeof(int) * num_cs); 1108 if (!master) 1109 return -ENOMEM; 1110 1111 platform_set_drvdata(pdev, master); 1112 1113 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32); 1114 master->bus_num = pdev->id; 1115 master->num_chipselect = num_cs; 1116 1117 spi_imx = spi_master_get_devdata(master); 1118 spi_imx->bitbang.master = master; 1119 1120 for (i = 0; i < master->num_chipselect; i++) { 1121 int cs_gpio = of_get_named_gpio(np, "cs-gpios", i); 1122 if (!gpio_is_valid(cs_gpio) && mxc_platform_info) 1123 cs_gpio = mxc_platform_info->chipselect[i]; 1124 1125 spi_imx->chipselect[i] = cs_gpio; 1126 if (!gpio_is_valid(cs_gpio)) 1127 continue; 1128 1129 ret = devm_gpio_request(&pdev->dev, spi_imx->chipselect[i], 1130 DRIVER_NAME); 1131 if (ret) { 1132 dev_err(&pdev->dev, "can't get cs gpios\n"); 1133 goto out_master_put; 1134 } 1135 } 1136 1137 spi_imx->bitbang.chipselect = spi_imx_chipselect; 1138 spi_imx->bitbang.setup_transfer = spi_imx_setupxfer; 1139 spi_imx->bitbang.txrx_bufs = spi_imx_transfer; 1140 spi_imx->bitbang.master->setup = spi_imx_setup; 1141 spi_imx->bitbang.master->cleanup = spi_imx_cleanup; 1142 spi_imx->bitbang.master->prepare_message = spi_imx_prepare_message; 1143 spi_imx->bitbang.master->unprepare_message = spi_imx_unprepare_message; 1144 spi_imx->bitbang.master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; 1145 1146 init_completion(&spi_imx->xfer_done); 1147 1148 spi_imx->devtype_data = of_id ? of_id->data : 1149 (struct spi_imx_devtype_data *) pdev->id_entry->driver_data; 1150 1151 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1152 spi_imx->base = devm_ioremap_resource(&pdev->dev, res); 1153 if (IS_ERR(spi_imx->base)) { 1154 ret = PTR_ERR(spi_imx->base); 1155 goto out_master_put; 1156 } 1157 1158 irq = platform_get_irq(pdev, 0); 1159 if (irq < 0) { 1160 ret = irq; 1161 goto out_master_put; 1162 } 1163 1164 ret = devm_request_irq(&pdev->dev, irq, spi_imx_isr, 0, 1165 dev_name(&pdev->dev), spi_imx); 1166 if (ret) { 1167 dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret); 1168 goto out_master_put; 1169 } 1170 1171 spi_imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); 1172 if (IS_ERR(spi_imx->clk_ipg)) { 1173 ret = PTR_ERR(spi_imx->clk_ipg); 1174 goto out_master_put; 1175 } 1176 1177 spi_imx->clk_per = devm_clk_get(&pdev->dev, "per"); 1178 if (IS_ERR(spi_imx->clk_per)) { 1179 ret = PTR_ERR(spi_imx->clk_per); 1180 goto out_master_put; 1181 } 1182 1183 ret = clk_prepare_enable(spi_imx->clk_per); 1184 if (ret) 1185 goto out_master_put; 1186 1187 ret = clk_prepare_enable(spi_imx->clk_ipg); 1188 if (ret) 1189 goto out_put_per; 1190 1191 spi_imx->spi_clk = clk_get_rate(spi_imx->clk_per); 1192 /* 1193 * Only validated on i.mx6 now, can remove the constrain if validated on 1194 * other chips. 1195 */ 1196 if (spi_imx->devtype_data == &imx51_ecspi_devtype_data 1197 && spi_imx_sdma_init(&pdev->dev, spi_imx, master, res)) 1198 dev_err(&pdev->dev, "dma setup error,use pio instead\n"); 1199 1200 spi_imx->devtype_data->reset(spi_imx); 1201 1202 spi_imx->devtype_data->intctrl(spi_imx, 0); 1203 1204 master->dev.of_node = pdev->dev.of_node; 1205 ret = spi_bitbang_start(&spi_imx->bitbang); 1206 if (ret) { 1207 dev_err(&pdev->dev, "bitbang start failed with %d\n", ret); 1208 goto out_clk_put; 1209 } 1210 1211 dev_info(&pdev->dev, "probed\n"); 1212 1213 clk_disable(spi_imx->clk_ipg); 1214 clk_disable(spi_imx->clk_per); 1215 return ret; 1216 1217 out_clk_put: 1218 clk_disable_unprepare(spi_imx->clk_ipg); 1219 out_put_per: 1220 clk_disable_unprepare(spi_imx->clk_per); 1221 out_master_put: 1222 spi_master_put(master); 1223 1224 return ret; 1225 } 1226 1227 static int spi_imx_remove(struct platform_device *pdev) 1228 { 1229 struct spi_master *master = platform_get_drvdata(pdev); 1230 struct spi_imx_data *spi_imx = spi_master_get_devdata(master); 1231 1232 spi_bitbang_stop(&spi_imx->bitbang); 1233 1234 writel(0, spi_imx->base + MXC_CSPICTRL); 1235 clk_unprepare(spi_imx->clk_ipg); 1236 clk_unprepare(spi_imx->clk_per); 1237 spi_imx_sdma_exit(spi_imx); 1238 spi_master_put(master); 1239 1240 return 0; 1241 } 1242 1243 static struct platform_driver spi_imx_driver = { 1244 .driver = { 1245 .name = DRIVER_NAME, 1246 .of_match_table = spi_imx_dt_ids, 1247 }, 1248 .id_table = spi_imx_devtype, 1249 .probe = spi_imx_probe, 1250 .remove = spi_imx_remove, 1251 }; 1252 module_platform_driver(spi_imx_driver); 1253 1254 MODULE_DESCRIPTION("SPI Master Controller driver"); 1255 MODULE_AUTHOR("Sascha Hauer, Pengutronix"); 1256 MODULE_LICENSE("GPL"); 1257 MODULE_ALIAS("platform:" DRIVER_NAME); 1258